text stringlengths 4 1.02M | meta dict |
|---|---|
import variable
import string
statement_ = ""
type_ = ""
is_value = True
def is_whitespace(character):
for i in range(0,len(string.whitespace)):
if character == string.whitespace[i]:
return True
return False
def get_third_parameter(temptext1):
text = temptext1.strip()
if text == "":
return ""
flag = False;times = 0;
for i in range(0,len(text)):
if is_whitespace(text[i]) and (not flag):
flag = True
times= times + 1
if not is_whitespace(text[i]):
flag = False
if times == 2:
temptext = text[i:].strip()
break
return temptext
def get_words_array(temptext):
text = temptext.strip()
if text == "":
return []
text = text + " "
arr = [];position = 0;flag = False
for i in range(0, len(text)):
if is_whitespace(text[i])and (not flag):
arr.append(text[position:i].strip())
position = i
flag = True
if not is_whitespace(text[i]):
flag = False
return arr;
def get_words_number(temptext):
text = temptext.strip()
if text == "":
return 0;
wordsNumber = 1
flag = False
for i in range(0,len(text)):
if is_whitespace(text[i]) and (not flag):
wordsNumber = wordsNumber + 1
flag = True
if not is_whitespace(text[i]):
flag = False
return wordsNumber
def is_variable(text):
if text == "":
return False
for i in range(0,len(text)):
if not (text[i].isalpha() or text[i] == '_'):
return False
return True
def DoProcess(argumentArray,text):
if len(argumentArray) == 0:
print("cuo wu kong guo cheng")
return
if argumentArray[0] == '+':
Add(argumentArray)
elif argumentArray[0] == '-':
Subtract(argumentArray)
elif argumentArray[0] == '*':
Multiply(argumentArray)
elif argumentArray[0] == '/':
Divide(argumentArray)
elif argumentArray[0] == '=':
Equal(argumentArray)
elif argumentArray[0] == '<':
Less(argumentArray)
elif argumentArray[0] == '>':
Greater(argumentArray)
elif argumentArray[0] == '<=':
LessOrEqual(argumentArray)
elif argumentArray[0] == '>=':
GreaterOrEqual(argumentArray)
elif argumentArray[0] == 'and':
And(argumentArray)
elif argumentArray[0] == 'or':
Or(argumentArray)
elif argumentArray[0] == 'not':
Not(argumentArray)
elif argumentArray[0] == 'define':
Define(argumentArray,text)
else:
print("fei fa guo cheng")
def Add(arr):
if len(arr) < 2:
raise Exception
tempstatement = ""
for i in range(1, len(arr)):
if is_variable(arr[i]):
tempstatement += str(variable.variable_[arr[i]])
tempstatement += "+"
else:
tempstatement += arr[i]
tempstatement += "+"
global statement_,is_value
is_value = True
statement_ = str(eval( tempstatement[:-1] ))
def Subtract(arr):
if len(arr) < 2:
raise Exception
tempstatement = ""
for i in range(1, len(arr)):
if is_variable(arr[i]):
tempstatement += str(variable.variable_[arr[i]])
tempstatement += "-"
else:
tempstatement += arr[i]
tempstatement += "-"
global statement_,is_value
is_value = True
statement_ = str(eval( tempstatement[:-1] ))
def Multiply(arr):
if len(arr) < 2:
raise Exception
tempstatement = ""
for i in range(1, len(arr)):
if is_variable(arr[i]):
tempstatement += str(variable.variable_[arr[i]])
tempstatement += "*"
else:
tempstatement += arr[i]
tempstatement += "*"
global statement_,is_value
is_value = True
statement_ = str(eval( tempstatement[:-1] ))
def Divide(arr):
if len(arr) < 2:
raise Exception
tempstatement = ""
for i in range(1, len(arr)):
if is_variable(arr[i]):
tempstatement += str(variable.variable_[arr[i]])
tempstatement += "/"
else:
tempstatement += arr[i]
tempstatement += "/"
global statement_,is_value
is_value = True
statement_ = str(eval( tempstatement[:-1] ))
def Equal(arr):
if len(arr) != 3:
raise Exception
if is_variable(arr[1]):
left = str(variable.variable_[arr[1]])
else:
left = arr[1]
if is_variable(arr[2]):
right = str(variable.variable_[arr[2]])
else:
right = arr[2]
tempstatement = left + "==" + right
global statement_,is_value
is_value = True
statement_ = str(eval( tempstatement ))
def Less(arr):
if len(arr) != 3:
raise Exception
if is_variable(arr[1]):
left = str(variable.variable_[arr[1]])
else:
left = arr[1]
if is_variable(arr[2]):
right = str(variable.variable_[arr[2]])
else:
right = arr[2]
tempstatement = left + "<" + right
global statement_,is_value
is_value = True
statement_ = str(eval( tempstatement ))
def Greater(arr):
if len(arr) != 3:
raise Exception
if is_variable(arr[1]):
left = str(variable.variable_[arr[1]])
else:
left = arr[1]
if is_variable(arr[2]):
right = str(variable.variable_[arr[2]])
else:
right = arr[2]
tempstatement = left + ">" + right
global statement_,is_value
is_value = True
statement_ = str(eval( tempstatement ))
def LessOrEqual(arr):
if len(arr) != 3:
raise Exception
if is_variable(arr[1]):
left = str(variable.variable_[arr[1]])
else:
left = arr[1]
if is_variable(arr[2]):
right = str(variable.variable_[arr[2]])
else:
right = arr[2]
tempstatement = left + "<=" + right
global statement_,is_value
is_value = True
statement_ = str(eval( tempstatement ))
def GreaterOrEqual(arr):
if len(arr) != 3:
raise Exception
if is_variable(arr[1]):
left = str(variable.variable_[arr[1]])
else:
left = arr[1]
if is_variable(arr[2]):
right = str(variable.variable_[arr[2]])
else:
right = arr[2]
tempstatement = left + ">=" + right
global statement_,is_value
is_value = True
statement_ = str(eval( tempstatement ))
def And(arr):
if len(arr) < 3:
raise Exception
tempstatement = ""
for i in range(1, len(arr)):
if is_variable(arr[i]):
tempstatement += str(variable.variable_[arr[i]])
tempstatement += " and "
else:
tempstatement += arr[i]
tempstatement += " and "
global statement_,is_value
is_value = True
statement_ = str(eval( tempstatement[:-4] ))
def Or(arr):
if len(arr) < 3:
raise Exception
tempstatement = ""
for i in range(1, len(arr)):
if is_variable(arr[i]):
tempstatement += str(variable.variable_[arr[i]])
tempstatement += " or "
else:
tempstatement += arr[i]
tempstatement += " or "
global statement_,is_value
is_value = True
statement_ = str(eval(tempstatement[:-4] ))
def Not(arr):
if len(arr) != 2:
raise Exception
if is_variable(arr[1]):
left = str(variable.variable_[arr[1]])
else:
left = arr[1]
global statement_,is_value
is_value = True
statement_ = str(eval("not " + left))
def Define(arr,text):
if len(arr) < 3:
raise Exception
if not is_variable(arr[1]):
raise Exception
third = get_third_parameter(text)
try:
variable.variable_[arr[1]] = eval(third)
except:
try:
variable.variable_[arr[1]] = variable.variable_[third]
except:
print("error: %s wei ding yi" %(third))
global is_value
is_value = False
| {
"content_hash": "b5d8fa739d09100665e9a67d738a0182",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 66,
"avg_line_length": 22.314917127071823,
"alnum_prop": 0.5446892795246349,
"repo_name": "wangxiaozhi123/wuniLang",
"id": "63086f916d9652921c55ae81546f74620907c1b6",
"size": "8078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "process.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12167"
}
],
"symlink_target": ""
} |
"""
byceps.services.board.topic_command_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
from typing import Tuple
from ...database import db
from ...events.board import (
BoardTopicCreated,
BoardTopicHidden,
BoardTopicLocked,
BoardTopicMoved,
BoardTopicPinned,
BoardTopicUnhidden,
BoardTopicUnlocked,
BoardTopicUnpinned,
BoardTopicUpdated,
)
from ...typing import UserID
from .aggregation_service import aggregate_category, aggregate_topic
from .models.category import Category as DbCategory
from .models.posting import InitialTopicPostingAssociation, Posting as DbPosting
from .models.topic import Topic as DbTopic
from .posting_command_service import update_posting
from . import topic_query_service
from .transfer.models import CategoryID, TopicID
def create_topic(
category_id: CategoryID, creator_id: UserID, title: str, body: str
) -> Tuple[DbTopic, BoardTopicCreated]:
"""Create a topic with an initial posting in that category."""
topic = DbTopic(category_id, creator_id, title)
posting = DbPosting(topic, creator_id, body)
initial_topic_posting_association = InitialTopicPostingAssociation(
topic, posting
)
db.session.add(topic)
db.session.add(posting)
db.session.add(initial_topic_posting_association)
db.session.commit()
aggregate_topic(topic)
event = BoardTopicCreated(
occurred_at=topic.created_at, topic_id=topic.id, url=None
)
return topic, event
def update_topic(
topic_id: TopicID, editor_id: UserID, title: str, body: str
) -> BoardTopicUpdated:
"""Update the topic (and its initial posting)."""
topic = _get_topic(topic_id)
topic.title = title.strip()
posting_event = update_posting(
topic.initial_posting.id, editor_id, body, commit=False
)
db.session.commit()
return BoardTopicUpdated(
occurred_at=posting_event.occurred_at,
topic_id=topic.id,
editor_id=editor_id,
url=None,
)
def hide_topic(topic_id: TopicID, moderator_id: UserID) -> BoardTopicHidden:
"""Hide the topic."""
topic = _get_topic(topic_id)
now = datetime.utcnow()
topic.hidden = True
topic.hidden_at = now
topic.hidden_by_id = moderator_id
db.session.commit()
aggregate_topic(topic)
return BoardTopicHidden(
occurred_at=now, topic_id=topic.id, moderator_id=moderator_id, url=None
)
def unhide_topic(topic_id: TopicID, moderator_id: UserID) -> BoardTopicUnhidden:
"""Un-hide the topic."""
topic = _get_topic(topic_id)
now = datetime.utcnow()
# TODO: Store who un-hid the topic.
topic.hidden = False
topic.hidden_at = None
topic.hidden_by_id = None
db.session.commit()
aggregate_topic(topic)
return BoardTopicUnhidden(
occurred_at=now, topic_id=topic.id, moderator_id=moderator_id, url=None
)
def lock_topic(topic_id: TopicID, moderator_id: UserID) -> BoardTopicLocked:
"""Lock the topic."""
topic = _get_topic(topic_id)
now = datetime.utcnow()
topic.locked = True
topic.locked_at = now
topic.locked_by_id = moderator_id
db.session.commit()
return BoardTopicLocked(
occurred_at=now, topic_id=topic.id, moderator_id=moderator_id, url=None
)
def unlock_topic(topic_id: TopicID, moderator_id: UserID) -> BoardTopicUnlocked:
"""Unlock the topic."""
topic = _get_topic(topic_id)
now = datetime.utcnow()
# TODO: Store who unlocked the topic.
topic.locked = False
topic.locked_at = None
topic.locked_by_id = None
db.session.commit()
return BoardTopicUnlocked(
occurred_at=now, topic_id=topic.id, moderator_id=moderator_id, url=None
)
def pin_topic(topic_id: TopicID, moderator_id: UserID) -> BoardTopicPinned:
"""Pin the topic."""
topic = _get_topic(topic_id)
now = datetime.utcnow()
topic.pinned = True
topic.pinned_at = now
topic.pinned_by_id = moderator_id
db.session.commit()
return BoardTopicPinned(
occurred_at=now, topic_id=topic.id, moderator_id=moderator_id, url=None
)
def unpin_topic(topic_id: TopicID, moderator_id: UserID) -> BoardTopicUnpinned:
"""Unpin the topic."""
topic = _get_topic(topic_id)
now = datetime.utcnow()
# TODO: Store who unpinned the topic.
topic.pinned = False
topic.pinned_at = None
topic.pinned_by_id = None
db.session.commit()
return BoardTopicUnpinned(
occurred_at=now, topic_id=topic.id, moderator_id=moderator_id, url=None
)
def move_topic(
topic_id: TopicID, new_category_id: CategoryID, moderator_id: UserID
) -> BoardTopicMoved:
"""Move the topic to another category."""
topic = _get_topic(topic_id)
now = datetime.utcnow()
old_category = topic.category
new_category = DbCategory.query.get(new_category_id)
topic.category = new_category
db.session.commit()
for category in old_category, new_category:
aggregate_category(category)
return BoardTopicMoved(
occurred_at=now,
topic_id=topic.id,
old_category_id=old_category.id,
new_category_id=new_category.id,
moderator_id=moderator_id,
url=None,
)
def limit_topic_to_announcements(topic_id: TopicID) -> None:
"""Limit posting in the topic to moderators."""
topic = _get_topic(topic_id)
topic.posting_limited_to_moderators = True
db.session.commit()
def remove_limit_of_topic_to_announcements(topic_id: TopicID) -> None:
"""Allow non-moderators to post in the topic again."""
topic = _get_topic(topic_id)
topic.posting_limited_to_moderators = False
db.session.commit()
def _get_topic(topic_id: TopicID) -> DbTopic:
return topic_query_service.get_topic(topic_id)
| {
"content_hash": "ff6ace90bdb24b26a2182abed6a89c83",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 80,
"avg_line_length": 25.982456140350877,
"alnum_prop": 0.6737002025658338,
"repo_name": "m-ober/byceps",
"id": "0b879bba7144e9d225c19ff9cb596b7bfa78c265",
"size": "5924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "byceps/services/board/topic_command_service.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38499"
},
{
"name": "Dockerfile",
"bytes": "1302"
},
{
"name": "HTML",
"bytes": "369989"
},
{
"name": "JavaScript",
"bytes": "9483"
},
{
"name": "Python",
"bytes": "1152996"
}
],
"symlink_target": ""
} |
from . import coredata as cdata
from .mesonlib import MachineChoice
import os.path
import pprint
import textwrap
def add_arguments(parser):
parser.add_argument('--all', action='store_true', dest='all', default=False,
help='Show data not used by current backend.')
parser.add_argument('builddir', nargs='?', default='.', help='The build directory')
def dump_compilers(compilers):
for lang, compiler in compilers.items():
print(' ' + lang + ':')
print(' Id: ' + compiler.id)
print(' Command: ' + ' '.join(compiler.exelist))
if compiler.full_version:
print(' Full version: ' + compiler.full_version)
if compiler.version:
print(' Detected version: ' + compiler.version)
def dump_guids(d):
for name, value in d.items():
print(' ' + name + ': ' + value)
def run(options):
datadir = 'meson-private'
if options.builddir is not None:
datadir = os.path.join(options.builddir, datadir)
if not os.path.isdir(datadir):
print('Current directory is not a build dir. Please specify it or '
'change the working directory to it.')
return 1
all_backends = options.all
print('This is a dump of the internal unstable cache of meson. This is for debugging only.')
print('Do NOT parse, this will change from version to version in incompatible ways')
print('')
coredata = cdata.load(options.builddir)
backend = coredata.get_builtin_option('backend')
for k, v in sorted(coredata.__dict__.items()):
if k in ('backend_options', 'base_options', 'builtins', 'compiler_options', 'user_options'):
# use `meson configure` to view these
pass
elif k in ['install_guid', 'test_guid', 'regen_guid']:
if all_backends or backend.startswith('vs'):
print(k + ': ' + v)
elif k == 'target_guids':
if all_backends or backend.startswith('vs'):
print(k + ':')
dump_guids(v)
elif k in ['lang_guids']:
if all_backends or backend.startswith('vs') or backend == 'xcode':
print(k + ':')
dump_guids(v)
elif k == 'meson_command':
if all_backends or backend.startswith('vs'):
print('Meson command used in build file regeneration: ' + ' '.join(v))
elif k == 'pkgconf_envvar':
print('Last seen PKGCONFIG environment variable value: ' + v)
elif k == 'version':
print('Meson version: ' + v)
elif k == 'cross_files':
if v:
print('Cross File: ' + ' '.join(v))
elif k == 'config_files':
if v:
print('Native File: ' + ' '.join(v))
elif k == 'compilers':
for for_machine in MachineChoice:
print('Cached {} machine compilers:'.format(
for_machine.get_lower_case_name()))
dump_compilers(v[for_machine])
elif k == 'deps':
def print_dep(dep_key, dep):
print(' ' + dep_key[0] + ": ")
print(' compile args: ' + repr(dep.get_compile_args()))
print(' link args: ' + repr(dep.get_link_args()))
if dep.get_sources():
print(' sources: ' + repr(dep.get_sources()))
print(' version: ' + repr(dep.get_version()))
for for_machine in iter(MachineChoice):
items_list = list(sorted(v[for_machine].items()))
if items_list:
print('Cached dependencies for {} machine' % for_machine.get_lower_case_name())
for dep_key, deps in items_list:
for dep in deps:
print_dep(dep_key, dep)
else:
print(k + ':')
print(textwrap.indent(pprint.pformat(v), ' '))
| {
"content_hash": "7ab5dfa45bb4df4416fd770200e5dacc",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 100,
"avg_line_length": 40.313131313131315,
"alnum_prop": 0.5339513906289151,
"repo_name": "becm/meson",
"id": "5463f162511671b2fb045fe876a99716f4c10f72",
"size": "4580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesonbuild/munstable_coredata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4190"
},
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "167971"
},
{
"name": "C#",
"bytes": "1130"
},
{
"name": "C++",
"bytes": "51171"
},
{
"name": "CMake",
"bytes": "27103"
},
{
"name": "Cuda",
"bytes": "7454"
},
{
"name": "D",
"bytes": "5313"
},
{
"name": "Dockerfile",
"bytes": "1960"
},
{
"name": "Emacs Lisp",
"bytes": "919"
},
{
"name": "Fortran",
"bytes": "11539"
},
{
"name": "Genie",
"bytes": "341"
},
{
"name": "HTML",
"bytes": "117"
},
{
"name": "Inno Setup",
"bytes": "354"
},
{
"name": "Java",
"bytes": "2570"
},
{
"name": "JavaScript",
"bytes": "136"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "139"
},
{
"name": "Meson",
"bytes": "454262"
},
{
"name": "Objective-C",
"bytes": "1235"
},
{
"name": "Objective-C++",
"bytes": "381"
},
{
"name": "PowerShell",
"bytes": "2242"
},
{
"name": "Python",
"bytes": "2912935"
},
{
"name": "Roff",
"bytes": "569"
},
{
"name": "Rust",
"bytes": "1079"
},
{
"name": "Shell",
"bytes": "6800"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10025"
},
{
"name": "Verilog",
"bytes": "709"
},
{
"name": "Vim script",
"bytes": "9919"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
import tempfile
import shutil
import codecs
from django.utils.six import StringIO
from django.test import TestCase
from django.test.utils import override_settings
from django.core.management import call_command
from powerpages.models import Page
from powerpages.sync import (
PageFileDumper, FilePageLoader, SyncStatus,
WebsiteDumpOperation, WebsiteLoadOperation, normalize_page_fields
)
def _file_contents(s, strip_spaces=True):
return s.strip(' ') if strip_spaces else s
class BaseSyncTestCase(TestCase):
maxDiff = None
normalized_content = _file_contents('''{
"alias": "test-page",
"description": "At vero eos et accusamus et iusto odio",
"keywords": "lorem ipsum dolor sit amet",
"page_processor": "powerpages.RedirectProcessor",
"page_processor_config": {
"to url": "/test/"
},
"title": "De Finibus Bonorum et Malorum"
}
## TEMPLATE SOURCE: ##
<h1>{{ website_page.title }}</h1>
''')
edited_content = '''
{
"alias": "test-page",
"description": "At vero eos et accusamus et iusto odio",
"title": "De Finibus Bonorum et Malorum",
"page_processor_config": {"to url": "/test/"},
"page_processor": "powerpages.RedirectProcessor",
"keywords": "lorem ipsum dolor sit amet"
}
## TEMPLATE SOURCE: ##
<h1>{{ website_page.title }}</h1>'''
simple_content = _file_contents('''{
"description": "",
"title": "",
"alias": null,
"page_processor_config": null,
"page_processor": "powerpages.DefaultPageProcessor",
"keywords": ""
}
## TEMPLATE SOURCE: ##
''')
def setUp(self):
self.sync_directory = tempfile.mkdtemp()
self.settings_change = override_settings(
POWER_PAGES={'SYNC_DIRECTORY': self.sync_directory}
)
self.settings_change.enable()
def tearDown(self):
self.settings_change.disable()
shutil.rmtree(self.sync_directory)
def _make_file(self, relative_path, content, make_dirs=True):
absolute_path = os.path.join(self.sync_directory, relative_path)
if make_dirs:
absolute_dir_path, file_name = os.path.split(absolute_path)
os.makedirs(absolute_dir_path)
with codecs.open(absolute_path, 'w', encoding='utf-8') as f:
f.write(content)
class PageFileDumperTestCase(BaseSyncTestCase):
def test_relative_path_root(self):
page = Page.objects.create(
url='/',
)
dumper = PageFileDumper(page)
self.assertEqual(dumper.relative_path(), '_index_.page')
def test_relative_path_nested(self):
page = Page.objects.create(
url='/a/b/test/',
)
dumper = PageFileDumper(page)
self.assertEqual(dumper.relative_path(), 'a/b/test.page')
def test_relative_path_txt(self):
page = Page.objects.create(
url='robots.txt',
)
dumper = PageFileDumper(page)
self.assertEqual(dumper.relative_path(), 'robots.txt')
def test_absolute_path(self):
page = Page.objects.create(
url='/a/b/test/',
)
dumper = PageFileDumper(page)
self.assertEqual(
dumper.absolute_path(),
os.path.join(self.sync_directory, 'a/b/test.page')
)
def test_page_fields_default(self):
page = Page.objects.create(
url='/a/b/test/',
)
dumper = PageFileDumper(page)
self.assertEqual(
dumper.page_fields(),
{
'alias': None,
'description': '',
'keywords': '',
'page_processor': 'powerpages.DefaultPageProcessor',
'page_processor_config': None,
'template': '',
'title': ''
}
)
def test_page_fields_full(self):
page = Page.objects.create(
url='/a/b/test/',
alias='test-page',
description='At vero eos et accusamus et iusto odio',
keywords='lorem ipsum dolor sit amet',
page_processor='powerpages.RedirectProcessor',
page_processor_config={
'to url': '/test/'
},
template='<h1>{{ website_page.title }}</h1>\n',
title='De Finibus Bonorum et Malorum',
)
dumper = PageFileDumper(page)
self.assertEqual(
dumper.page_fields(),
{
'alias': 'test-page',
'description': 'At vero eos et accusamus et iusto odio',
'keywords': 'lorem ipsum dolor sit amet',
'page_processor': 'powerpages.RedirectProcessor',
'page_processor_config': {
'to url': '/test/'
},
'template': '<h1>{{ website_page.title }}</h1>\n',
'title': 'De Finibus Bonorum et Malorum'
}
)
def test_file_contents_default(self):
page = Page.objects.create(
url='/a/b/test/',
)
dumper = PageFileDumper(page)
self.assertEqual(
dumper.file_contents(),
_file_contents('''{
"alias": null,
"description": "",
"keywords": "",
"page_processor": "powerpages.DefaultPageProcessor",
"page_processor_config": null,
"title": ""
}
## TEMPLATE SOURCE: ##
''')
)
def test_file_contents_full(self):
page = Page.objects.create(
url='/a/b/test/',
alias='test-page',
description='At vero eos et accusamus et iusto odio',
keywords='lorem ipsum dolor sit amet',
page_processor='powerpages.RedirectProcessor',
page_processor_config={
'to url': '/test/'
},
template='<h1>{{ website_page.title }}</h1>\n',
title='De Finibus Bonorum et Malorum',
)
dumper = PageFileDumper(page)
self.assertEqual(
dumper.file_contents(),
_file_contents('''{
"alias": "test-page",
"description": "At vero eos et accusamus et iusto odio",
"keywords": "lorem ipsum dolor sit amet",
"page_processor": "powerpages.RedirectProcessor",
"page_processor_config": {
"to url": "/test/"
},
"title": "De Finibus Bonorum et Malorum"
}
## TEMPLATE SOURCE: ##
<h1>{{ website_page.title }}</h1>
''')
)
def test_file_exists_false(self):
page = Page.objects.create(
url='/',
)
dumper = PageFileDumper(page)
self.assertFalse(dumper.file_exists())
def test_file_exists_true(self):
page = Page.objects.create(
url='/',
)
open(os.path.join(self.sync_directory, '_index_.page'), 'a').close()
dumper = PageFileDumper(page)
self.assertTrue(dumper.file_exists())
def test_status_added(self):
page = Page.objects.create(
url='/a/b/test/',
)
dumper = PageFileDumper(page)
self.assertEqual(dumper.status(), SyncStatus.ADDED)
def test_status_no_changes(self):
page = Page.objects.create(
url='/a/b/test/',
)
PageFileDumper(page).save()
dumper = PageFileDumper(page)
self.assertEqual(dumper.status(), SyncStatus.NO_CHANGES)
def test_status_no_changes_saved(self):
page = Page.objects.create(
url='/a/b/test/',
)
PageFileDumper(page).save()
page.save()
dumper = PageFileDumper(page)
self.assertEqual(dumper.status(), SyncStatus.NO_CHANGES)
def _test_status_modified(self, **kwargs):
page = Page.objects.create(
url='/a/b/test/',
)
PageFileDumper(page).save()
for attr, value in kwargs.items():
setattr(page, attr, value)
page.save()
dumper = PageFileDumper(page)
self.assertEqual(dumper.status(), SyncStatus.MODIFIED)
def test_status_modified_template(self):
self._test_status_modified(template='<h1>TEST</h1>\n')
def test_status_modified_title(self):
self._test_status_modified(title='TEST')
def test_status_modified_description(self):
self._test_status_modified(description='Test test test.')
def test_status_modified_keywords(self):
self._test_status_modified(keywords='test test test')
def test_status_modified_alias(self):
self._test_status_modified(alias='test-page')
def test_status_modified_page_processor(self):
self._test_status_modified(
page_processor='powerpages.RedirectProcessor'
)
def test_status_modified_page_processor_config(self):
self._test_status_modified(page_processor_config={'sitemap': False})
def test_save_file_contents(self):
page = Page.objects.create(
url='/a/b/test/',
alias='test-page',
description='At vero eos et accusamus et iusto odio',
keywords='lorem ipsum dolor sit amet',
page_processor='powerpages.RedirectProcessor',
page_processor_config={
'to url': '/test/'
},
template='<h1>{{ website_page.title }}</h1>\n',
title='De Finibus Bonorum et Malorum',
)
PageFileDumper(page).save()
path = os.path.join(self.sync_directory, 'a/b/test.page')
self.assertTrue(os.path.exists(path))
with open(path) as f:
file_contents = f.read()
self.assertEqual(
file_contents,
_file_contents('''{
"alias": "test-page",
"description": "At vero eos et accusamus et iusto odio",
"keywords": "lorem ipsum dolor sit amet",
"page_processor": "powerpages.RedirectProcessor",
"page_processor_config": {
"to url": "/test/"
},
"title": "De Finibus Bonorum et Malorum"
}
## TEMPLATE SOURCE: ##
<h1>{{ website_page.title }}</h1>
''')
)
def test_save_is_dirty(self):
page = Page.objects.create(
url='/a/b/test/',
is_dirty=True
)
PageFileDumper(page).save()
page = Page.objects.get(pk=page.pk)
self.assertFalse(page.is_dirty)
def test_diff_template_modified(self):
page = Page.objects.create(
url='/a/b/test/',
template="<p>\nTEST\n<\p>\n"
)
PageFileDumper(page).save()
page.template = "<p>\nCHANGE\n<\p>\n"
page.save()
dumper = PageFileDumper(page)
self.assertEqual(
dumper.diff(),
'''--- Current content
+++ Coming changes
@@ -8,5 +8,5 @@
}
## TEMPLATE SOURCE: ##
<p>
-TEST
+CHANGE
<\p>'''
)
def test_diff_title_modified(self):
page = Page.objects.create(
url='/a/b/test/',
title="TEST"
)
PageFileDumper(page).save()
page.title = "CHANGE"
page.save()
dumper = PageFileDumper(page)
self.assertEqual(
dumper.diff(),
_file_contents('''--- Current content
+++ Coming changes
@@ -4,6 +4,6 @@
"keywords": "",
"page_processor": "powerpages.DefaultPageProcessor",
"page_processor_config": null,
- "title": "TEST"
+ "title": "CHANGE"
}
## TEMPLATE SOURCE: ##''')
)
class PageFileLoaderTestCase(BaseSyncTestCase):
def test_data_normalization(self):
self.assertEqual(
normalize_page_fields(
FilePageLoader.load(self.normalized_content)
),
normalize_page_fields(
FilePageLoader.load(self.edited_content)
)
)
def test_url_root(self):
loader = FilePageLoader('_index_.page')
self.assertEqual(loader.url(), '/')
def test_url_nested(self):
loader = FilePageLoader('a/b/test.page')
self.assertEqual(loader.url(), '/a/b/test/')
def test_relative_path_txt(self):
loader = FilePageLoader('robots.txt')
self.assertEqual(loader.url(), '/robots.txt')
def test_absolute_path(self):
loader = FilePageLoader('a/b/test.page')
self.assertEqual(
loader.absolute_path(),
os.path.join(self.sync_directory, 'a/b/test.page')
)
def test_page_fields_normalized(self):
path = 'a/b/test.page'
self._make_file(path, self.normalized_content)
loader = FilePageLoader(path)
self.assertEqual(
loader.page_fields(),
{
'alias': 'test-page',
'description': 'At vero eos et accusamus et iusto odio',
'keywords': 'lorem ipsum dolor sit amet',
'page_processor': 'powerpages.RedirectProcessor',
'page_processor_config': {
'to url': '/test/'
},
'template': '<h1>{{ website_page.title }}</h1>\n',
'title': 'De Finibus Bonorum et Malorum'
}
)
def test_page_fields_edited(self):
path = 'a/b/test.page'
self._make_file(path, self.edited_content)
loader = FilePageLoader(path)
self.assertEqual(
loader.page_fields(),
{
'alias': 'test-page',
'description': 'At vero eos et accusamus et iusto odio',
'keywords': 'lorem ipsum dolor sit amet',
'page_processor': 'powerpages.RedirectProcessor',
'page_processor_config': {
'to url': '/test/'
},
'template': '<h1>{{ website_page.title }}</h1>\n',
'title': 'De Finibus Bonorum et Malorum'
}
)
def test_file_contents(self):
path = 'a/b/test.page'
self._make_file(path, self.edited_content)
loader = FilePageLoader(path)
self.assertEqual(loader.file_contents(), self.edited_content)
def test_page_existing(self):
page = Page.objects.create(url='/a/b/test/')
path = 'a/b/test.page'
self._make_file(path, self.normalized_content)
loader = FilePageLoader(path)
self.assertEqual(loader.page(), page)
def test_page_non_existing(self):
path = 'a/b/test.page'
self._make_file(path, self.normalized_content)
loader = FilePageLoader(path)
self.assertIsNone(loader.page())
def test_status_added(self):
path = 'a/b/test.page'
self._make_file(path, self.edited_content)
loader = FilePageLoader(path)
self.assertEqual(loader.status(), SyncStatus.ADDED)
def test_status_no_changes(self):
path = 'a/b/test.page'
self._make_file(path, self.edited_content)
FilePageLoader(path).save()
loader = FilePageLoader(path)
self.assertEqual(loader.status(), SyncStatus.NO_CHANGES)
def _test_status_modified(self, **kwargs):
path = 'a/b/test.page'
self._make_file(path, self.edited_content)
FilePageLoader(path).save()
page = Page.objects.get(url='/a/b/test/')
for attr, value in kwargs.items():
setattr(page, attr, value)
page.save()
loader = FilePageLoader(path)
self.assertEqual(loader.status(), SyncStatus.MODIFIED)
def test_status_modified_template(self):
self._test_status_modified(template='<h1>TEST</h1>')
def test_status_modified_title(self):
self._test_status_modified(title='TEST')
def test_status_modified_description(self):
self._test_status_modified(description='Test test test.')
def test_status_modified_keywords(self):
self._test_status_modified(keywords='test test test')
def test_status_modified_alias(self):
self._test_status_modified(alias='test-page-2')
def test_status_modified_page_processor(self):
self._test_status_modified(
page_processor='powerpages.NotFoundProcessor'
)
def test_status_modified_page_processor_config(self):
self._test_status_modified(page_processor_config={'sitemap': False})
def test_diff_template_modified(self):
path = 'a/b/test.page'
self._make_file(path, self.edited_content)
FilePageLoader(path).save()
page = Page.objects.get(url='/a/b/test/')
page.template = 'TEST'
page.save()
loader = FilePageLoader(path)
self.assertEqual(
loader.diff(),
_file_contents('''--- Current content
+++ Coming changes
@@ -9,4 +9,4 @@
"title": "De Finibus Bonorum et Malorum"
}
## TEMPLATE SOURCE: ##
-TEST
+<h1>{{ website_page.title }}</h1>''')
)
def test_diff_title_modified(self):
path = 'a/b/test.page'
self._make_file(path, self.edited_content)
FilePageLoader(path).save()
page = Page.objects.get(url='/a/b/test/')
page.title = 'CHANGE'
page.save()
loader = FilePageLoader(path)
self.assertEqual(
loader.diff(),
_file_contents('''--- Current content
+++ Coming changes
@@ -6,7 +6,7 @@
"page_processor_config": {
"to url": "/test/"
},
- "title": "CHANGE"
+ "title": "De Finibus Bonorum et Malorum"
}
## TEMPLATE SOURCE: ##
<h1>{{ website_page.title }}</h1>''')
)
class WebsiteDumpOperationTestCase(BaseSyncTestCase):
def test_dump_new_file(self):
Page.objects.create(
url='/a/b/test/',
alias='test-page',
description='At vero eos et accusamus et iusto odio',
keywords='lorem ipsum dolor sit amet',
page_processor='powerpages.RedirectProcessor',
page_processor_config={
'to url': '/test/'
},
template='<h1>{{ website_page.title }}</h1>\n',
title='De Finibus Bonorum et Malorum',
)
stdout = StringIO()
stderr = StringIO()
operation = WebsiteDumpOperation(
root_url='/a/b/test/',
stdout=stdout,
stderr=stderr,
get_input=lambda p: 'y',
dry_run=False,
no_interactive=False,
quiet=False,
force=False,
git_add=False,
no_color=True,
)
operation.run()
# Check result file:
path = os.path.join(self.sync_directory, 'a/b/test.page')
self.assertTrue(os.path.exists(path))
with codecs.open(path, encoding='utf-8') as f:
file_contents = f.read()
self.assertEqual(
file_contents,
_file_contents('''{
"alias": "test-page",
"description": "At vero eos et accusamus et iusto odio",
"keywords": "lorem ipsum dolor sit amet",
"page_processor": "powerpages.RedirectProcessor",
"page_processor_config": {
"to url": "/test/"
},
"title": "De Finibus Bonorum et Malorum"
}
## TEMPLATE SOURCE: ##
<h1>{{ website_page.title }}</h1>
''')
)
output = stdout.getvalue()
# Check stdout:
self.assertIn('[A] = 1', output) # 1 file created
self.assertNotIn('[M] = ', output) # no modifications
self.assertNotIn('[D] = ', output) # no deletions
# Check stderr:
self.assertEqual(stderr.getvalue(), '') # no errors
def test_dump_new_page_with_command(self):
Page.objects.create(
url='/a/b/test/',
alias='test-page',
description='At vero eos et accusamus et iusto odio',
keywords='lorem ipsum dolor sit amet',
page_processor='powerpages.RedirectProcessor',
page_processor_config={
'to url': '/test/'
},
template='<h1>{{ website_page.title }}</h1>\n',
title='De Finibus Bonorum et Malorum',
)
stdout = StringIO()
stderr = StringIO()
call_command(
'website_dump',
root_url='/a/b/test/',
stdout=stdout,
stderr=stderr,
get_input=lambda p: 'y',
dry_run=False,
no_interactive=False,
quiet=False,
force=False,
git_add=False,
no_color=True,
)
# Check result file:
path = os.path.join(self.sync_directory, 'a/b/test.page')
self.assertTrue(os.path.exists(path))
with codecs.open(path, encoding='utf-8') as f:
file_contents = f.read()
self.assertEqual(
file_contents,
_file_contents('''{
"alias": "test-page",
"description": "At vero eos et accusamus et iusto odio",
"keywords": "lorem ipsum dolor sit amet",
"page_processor": "powerpages.RedirectProcessor",
"page_processor_config": {
"to url": "/test/"
},
"title": "De Finibus Bonorum et Malorum"
}
## TEMPLATE SOURCE: ##
<h1>{{ website_page.title }}</h1>
''')
)
output = stdout.getvalue()
# Check stdout:
self.assertIn('[A] = 1', output) # 1 file created
self.assertNotIn('[M] = ', output) # no modifications
self.assertNotIn('[D] = ', output) # no deletions
# Check stderr:
self.assertEqual(stderr.getvalue(), '') # no errors
def test_dump_modified_file(self):
page = Page.objects.create(
url='/a/b/test/',
alias='test-page',
description='At vero eos et accusamus et iusto odio',
keywords='lorem ipsum dolor sit amet',
page_processor='powerpages.RedirectProcessor',
page_processor_config={
'to url': '/test/'
},
template='<h1>{{ website_page.title }}</h1>\n',
title='De Finibus Bonorum et Malorum',
)
PageFileDumper(page).save() # file is created
page.title = "CHANGE!"
page.save()
stdout = StringIO()
stderr = StringIO()
operation = WebsiteDumpOperation(
root_url='/a/b/test/',
stdout=stdout,
stderr=stderr,
get_input=lambda p: 'y',
dry_run=False,
no_interactive=False,
quiet=False,
force=False,
git_add=False,
no_color=True,
)
operation.run()
# Check result file:
path = os.path.join(self.sync_directory, 'a/b/test.page')
self.assertTrue(os.path.exists(path))
with codecs.open(path, encoding='utf-8') as f:
file_contents = f.read()
self.assertEqual(
file_contents,
_file_contents('''{
"alias": "test-page",
"description": "At vero eos et accusamus et iusto odio",
"keywords": "lorem ipsum dolor sit amet",
"page_processor": "powerpages.RedirectProcessor",
"page_processor_config": {
"to url": "/test/"
},
"title": "CHANGE!"
}
## TEMPLATE SOURCE: ##
<h1>{{ website_page.title }}</h1>
''')
)
output = stdout.getvalue()
# Check stdout:
self.assertNotIn('[A] = ', output) # no additions
self.assertIn('[M] = 1', output) # 1 file modified
self.assertNotIn('[D] = ', output) # no deletions
# Check stderr:
self.assertEqual(stderr.getvalue(), '') # no errors
def test_dump_deleted_file(self):
# root page:
root = Page.objects.create(url='/')
PageFileDumper(root).save()
# sibling page:
sibling = Page.objects.create(url='/dummy/')
PageFileDumper(sibling).save()
# TODO: handle removing all children
page = Page.objects.create(
url='/test/',
alias='test-page',
description='At vero eos et accusamus et iusto odio',
keywords='lorem ipsum dolor sit amet',
page_processor='powerpages.RedirectProcessor',
page_processor_config={
'to url': '/test/'
},
template='<h1>{{ website_page.title }}</h1>\n',
title='De Finibus Bonorum et Malorum',
)
PageFileDumper(page).save() # file is created
page.delete()
stdout = StringIO()
stderr = StringIO()
operation = WebsiteDumpOperation(
root_url='/',
stdout=stdout,
stderr=stderr,
get_input=lambda p: 'y',
dry_run=False,
no_interactive=False,
quiet=False,
force=False,
git_add=False,
no_color=True,
)
operation.run()
# Check result file:
path = os.path.join(self.sync_directory, 'test.page')
self.assertFalse(os.path.exists(path))
output = stdout.getvalue()
# Check stdout:
self.assertNotIn('[A] = ', output) # no additions
self.assertNotIn('[M] = ', output) # no modifications
self.assertIn('[D] = 1', output) # 1 file deleted
# Check stderr:
self.assertEqual(stderr.getvalue(), '') # no errors
class WebsiteLoadOperationTestCase(BaseSyncTestCase):
def test_load_new_page(self):
# root page:
root = Page.objects.create(url='/')
PageFileDumper(root).save()
# sibling page:
sibling = Page.objects.create(url='/dummy/')
PageFileDumper(sibling).save()
self._make_file(
'test.page',
_file_contents('''{
"alias": "test-page",
"description": "At vero eos et accusamus et iusto odio",
"keywords": "lorem ipsum dolor sit amet",
"page_processor": "powerpages.RedirectProcessor",
"page_processor_config": {
"to url": "/test/"
},
"title": "De Finibus Bonorum et Malorum"
}
## TEMPLATE SOURCE: ##
<h1>{{ website_page.title }}</h1>
'''),
make_dirs=False
)
stdout = StringIO()
stderr = StringIO()
operation = WebsiteLoadOperation(
root_url='/',
stdout=stdout,
stderr=stderr,
get_input=lambda p: 'y',
dry_run=False,
no_interactive=False,
quiet=False,
force=False,
git_add=False,
no_color=True,
)
operation.run()
# Check result file:
page = Page.objects.filter(url='/test/').first()
self.assertIsNotNone(page)
self.assertEqual(page.alias, "test-page")
self.assertEqual(
page.description, "At vero eos et accusamus et iusto odio"
)
self.assertEqual(page.keywords, "lorem ipsum dolor sit amet")
self.assertEqual(page.page_processor, "powerpages.RedirectProcessor")
self.assertEqual(page.page_processor_config, {"to url": "/test/"})
self.assertEqual(page.title, "De Finibus Bonorum et Malorum")
self.assertEqual(page.template, '<h1>{{ website_page.title }}</h1>\n')
output = stdout.getvalue()
# Check stdout:
self.assertIn('[A] = 1', output) # 1 page added
self.assertNotIn('[M] = ', output) # no modifications
self.assertNotIn('[D] = ', output) # no deletions
# Check stderr:
self.assertEqual(stderr.getvalue(), '') # no errors
def test_load_new_page_using_command(self):
# root page:
root = Page.objects.create(url='/')
PageFileDumper(root).save()
# sibling page:
sibling = Page.objects.create(url='/dummy/')
PageFileDumper(sibling).save()
self._make_file(
'test.page',
_file_contents('''{
"alias": "test-page",
"description": "At vero eos et accusamus et iusto odio",
"keywords": "lorem ipsum dolor sit amet",
"page_processor": "powerpages.RedirectProcessor",
"page_processor_config": {
"to url": "/test/"
},
"title": "De Finibus Bonorum et Malorum"
}
## TEMPLATE SOURCE: ##
<h1>{{ website_page.title }}</h1>
'''),
make_dirs=False
)
stdout = StringIO()
stderr = StringIO()
call_command(
'website_load',
root_url='/',
stdout=stdout,
stderr=stderr,
get_input=lambda p: 'y',
dry_run=False,
no_interactive=False,
quiet=False,
force=False,
git_add=False,
no_color=True,
)
# Check result file:
page = Page.objects.filter(url='/test/').first()
self.assertIsNotNone(page)
self.assertEqual(page.alias, "test-page")
self.assertEqual(
page.description, "At vero eos et accusamus et iusto odio"
)
self.assertEqual(page.keywords, "lorem ipsum dolor sit amet")
self.assertEqual(page.page_processor,
"powerpages.RedirectProcessor")
self.assertEqual(page.page_processor_config, {"to url": "/test/"})
self.assertEqual(page.title, "De Finibus Bonorum et Malorum")
self.assertEqual(page.template,
'<h1>{{ website_page.title }}</h1>\n')
output = stdout.getvalue()
# Check stdout:
self.assertIn('[A] = 1', output) # 1 page added
self.assertNotIn('[M] = ', output) # no modifications
self.assertNotIn('[D] = ', output) # no deletions
# Check stderr:
self.assertEqual(stderr.getvalue(), '') # no errors
def test_load_modified_page(self):
# root page:
root = Page.objects.create(url='/')
PageFileDumper(root).save()
# sibling page:
sibling = Page.objects.create(url='/dummy/')
PageFileDumper(sibling).save()
Page.objects.create(
url='/test/',
alias='test-page',
description='At vero eos et accusamus et iusto odio',
keywords='lorem ipsum dolor sit amet',
page_processor='powerpages.RedirectProcessor',
page_processor_config={
'to url': '/test/'
},
template='<h1>{{ website_page.title }}</h1>\n',
title='De Finibus Bonorum et Malorum',
)
self._make_file(
'test.page',
_file_contents('''{
"alias": "test-page",
"description": "At vero eos et accusamus et iusto odio",
"keywords": "lorem ipsum dolor sit amet",
"page_processor": "powerpages.RedirectProcessor",
"page_processor_config": {
"to url": "/test/"
},
"title": "TEST"
}
## TEMPLATE SOURCE: ##
<h1>CHANGE</h1>
'''),
make_dirs=False
)
stdout = StringIO()
stderr = StringIO()
operation = WebsiteLoadOperation(
root_url='/',
stdout=stdout,
stderr=stderr,
get_input=lambda p: 'y',
dry_run=False,
no_interactive=False,
quiet=False,
force=False,
git_add=False,
no_color=True,
)
operation.run()
# Check result file:
page = Page.objects.filter(url='/test/').first()
self.assertIsNotNone(page)
self.assertEqual(page.alias, "test-page")
self.assertEqual(
page.description, "At vero eos et accusamus et iusto odio"
)
self.assertEqual(page.keywords, "lorem ipsum dolor sit amet")
self.assertEqual(page.page_processor, "powerpages.RedirectProcessor")
self.assertEqual(page.page_processor_config, {"to url": "/test/"})
self.assertEqual(page.title, "TEST")
self.assertEqual(page.template, '<h1>CHANGE</h1>\n')
output = stdout.getvalue()
# Check stdout:
self.assertNotIn('[A] = ', output) # no additions
self.assertIn('[M] = 1', output) # 1 page modified
self.assertNotIn('[D] = ', output) # no deletions
# Check stderr:
self.assertEqual(stderr.getvalue(), '') # no errors
def test_load_deleted_page(self):
# root page:
root = Page.objects.create(url='/')
PageFileDumper(root).save()
# sibling page:
sibling = Page.objects.create(url='/dummy/')
PageFileDumper(sibling).save()
Page.objects.create(
url='/test/',
alias='test-page',
description='At vero eos et accusamus et iusto odio',
keywords='lorem ipsum dolor sit amet',
page_processor='powerpages.RedirectProcessor',
page_processor_config={
'to url': '/test/'
},
template='<h1>{{ website_page.title }}</h1>\n',
title='De Finibus Bonorum et Malorum',
)
# no file!
stdout = StringIO()
stderr = StringIO()
operation = WebsiteLoadOperation(
root_url='/',
stdout=stdout,
stderr=stderr,
get_input=lambda p: 'y',
dry_run=False,
no_interactive=False,
quiet=False,
force=False,
git_add=False,
no_color=True,
)
operation.run()
# Check result file:
page = Page.objects.filter(url='/test/').first()
self.assertIsNone(page)
output = stdout.getvalue()
# Check stdout:
self.assertNotIn('[A] = ', output) # no additions
self.assertNotIn('[M] = ', output) # no modifications
self.assertIn('[D] = 1', output) # 1 page deleted
# Check stderr:
self.assertEqual(stderr.getvalue(), '') # no errors
| {
"content_hash": "b938ce69bd26c95d3d59c0a9ef9f0928",
"timestamp": "",
"source": "github",
"line_count": 1045,
"max_line_length": 78,
"avg_line_length": 32.03732057416268,
"alnum_prop": 0.5535111562471997,
"repo_name": "Open-E-WEB/django-powerpages",
"id": "26666b26cacec5d164f722932465baf6c9ee17c4",
"size": "33504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "powerpages/tests/test_sync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "838"
},
{
"name": "HTML",
"bytes": "1532"
},
{
"name": "JavaScript",
"bytes": "5732"
},
{
"name": "Makefile",
"bytes": "730"
},
{
"name": "Python",
"bytes": "184250"
}
],
"symlink_target": ""
} |
"""Support to send data to a Splunk instance."""
import asyncio
from http import HTTPStatus
import json
import logging
import time
from aiohttp import ClientConnectionError, ClientResponseError
from hass_splunk import SplunkPayloadError, hass_splunk
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_SSL,
CONF_TOKEN,
CONF_VERIFY_SSL,
EVENT_STATE_CHANGED,
)
from homeassistant.helpers import state as state_helper
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import FILTER_SCHEMA
from homeassistant.helpers.json import JSONEncoder
_LOGGER = logging.getLogger(__name__)
DOMAIN = "splunk"
CONF_FILTER = "filter"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8088
DEFAULT_SSL = False
DEFAULT_NAME = "HASS"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_FILTER, default={}): FILTER_SCHEMA,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Splunk component."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
token = conf.get(CONF_TOKEN)
use_ssl = conf[CONF_SSL]
verify_ssl = conf.get(CONF_VERIFY_SSL)
name = conf.get(CONF_NAME)
entity_filter = conf[CONF_FILTER]
event_collector = hass_splunk(
session=async_get_clientsession(hass),
host=host,
port=port,
token=token,
use_ssl=use_ssl,
verify_ssl=verify_ssl,
)
if not await event_collector.check(connectivity=False, token=True, busy=False):
return False
payload = {
"time": time.time(),
"host": name,
"event": {
"domain": DOMAIN,
"meta": "Splunk integration has started",
},
}
await event_collector.queue(json.dumps(payload, cls=JSONEncoder), send=False)
async def splunk_event_listener(event):
"""Listen for new messages on the bus and sends them to Splunk."""
state = event.data.get("new_state")
if state is None or not entity_filter(state.entity_id):
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
payload = {
"time": event.time_fired.timestamp(),
"host": name,
"event": {
"domain": state.domain,
"entity_id": state.object_id,
"attributes": dict(state.attributes),
"value": _state,
},
}
try:
await event_collector.queue(json.dumps(payload, cls=JSONEncoder), send=True)
except SplunkPayloadError as err:
if err.status == HTTPStatus.UNAUTHORIZED:
_LOGGER.error(err)
else:
_LOGGER.warning(err)
except ClientConnectionError as err:
_LOGGER.warning(err)
except asyncio.TimeoutError:
_LOGGER.warning("Connection to %s:%s timed out", host, port)
except ClientResponseError as err:
_LOGGER.error(err.message)
hass.bus.async_listen(EVENT_STATE_CHANGED, splunk_event_listener)
return True
| {
"content_hash": "113e2b718910548af984e47cb550957b",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 88,
"avg_line_length": 29.5625,
"alnum_prop": 0.6136363636363636,
"repo_name": "aronsky/home-assistant",
"id": "6b40f9b7d58392a2be162f00d5852bd1e08b2a0e",
"size": "3784",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/splunk/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
"""
HTTP server for controlling the Motion process and serving the Android
application. The module consists of the motion process and the functions for
routing the HTTP requests.
"""
from bottle import abort, get, post, request, run, static_file, parse_auth
from events import SnapshotEventHandler
from notifier import AlertNotifier
from watchdog.observers import Observer
import os
import logging
import requests
import subprocess
import sys
import threading
import time
import util
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.StreamHandler())
class MotionProcess(object):
"""
Encapsulates the motion process handling.
"""
def __init__(self, settings):
self.process = None
self.device = None
self.control_port = settings['control_port']
self.settings = settings
self.snapshot_event = threading.Event()
self.latest_snapshot = None
self.notifier = None
def start(self):
"""
Encapsulates the start procedure for creating the Motion process.
"""
if self.device is None:
abort(409, 'Cannot start motion detection without device')
elif self.process is None:
LOG.info('Start motion process')
default_path = '/usr/local/etc/security-cam/motion.conf'
if os.path.exists(default_path):
self.process = subprocess.Popen(['motion', '-c', default_path])
else:
self.process = subprocess.Popen(['motion', '-c',
'conf/motion.conf'])
else:
LOG.info('Motion process already running')
def stop(self):
"""
Encapsulates the start procedure for killing the Motion process.
"""
if self.process is not None:
self.process.kill()
self.process = None
def status(self):
"""
Returns the server state based on the device and process state.
"""
if self.device is None:
return 'Idle'
elif self.process is None:
return 'Ready'
else:
return 'Running'
def set_device(self, identifier):
"""
Method for registering the device with the server.
"""
self.device = identifier
if identifier is None:
self.notifier = None
else:
self.notifier = AlertNotifier(self.settings, identifier)
def alert(self, filename):
"""
Sends a push notification to the registered device.
"""
self.notifier.notify(filename)
def request_snapshot(self):
"""
Issues the creation of a new snapshot and returns it after the file has
been saved.
"""
url = 'http://localhost:%d/0/action/snapshot' % self.control_port
requests.get(url)
self.snapshot_event.wait()
return self.latest_snapshot
def notify_about_snapshot(self, filename):
"""
Used to set the filename and clear the lock so the request snapshot
routine eventually returns the image.
"""
self.latest_snapshot = filename
self.snapshot_event.set()
self.snapshot_event.clear()
default_path = '/usr/local/etc/security-cam/settings.cfg'
if os.path.exists(default_path):
settings = util.read_settings(default_path)
else:
settings = util.read_settings('conf/settings.cfg')
def authenticate(func):
"""
Parses the credentials from the HTTP header and validates them.
"""
def validate(*args, **kwargs):
"""
Validation function for checking the credentials.
"""
auth_header = request.headers.get('Authorization')
if auth_header is None:
abort(401, 'Access denied')
credentials = parse_auth(auth_header)
if (credentials[0] == settings['user'] and
credentials[1] == settings['password']):
return func(**kwargs)
else:
abort(401, 'Access denied')
return validate
motion = MotionProcess(settings)
event_handler = SnapshotEventHandler(motion)
observer = Observer()
observer.schedule(event_handler, settings['captures_path'], recursive=False)
observer.start()
@get('/server/status', apply=[authenticate])
def get_status():
"""
For synchnorizing the client with the server state.
"""
return motion.status()
@get('/motion/detection/start', apply=[authenticate])
def start_motion_detection():
"""
Starts the motion process including detection for motion.
"""
motion.start()
time.sleep(3) # camera initialization phase
@get('/motion/detection/stop', apply=[authenticate])
def stop_motion_detection():
"""
Stops the motion process by killing it.
"""
motion.stop()
@get('/server/action/snapshot', apply=[authenticate])
def make_snapshot():
"""
Issues the creation of a new snapshot.
"""
filename = motion.request_snapshot()
return static_file(filename, root='captures',
mimetype='image/jpg')
@get('/static/captures/<filename:re:.*\.jpg>', apply=[authenticate])
def send_snapshot(filename):
"""
Returns the specified snapshot, used for returning either a new current
snapshot or a snapshot that triggered the motion detection.
"""
return static_file(filename, root='captures', mimetype='image/jpg')
@post('/device/register', apply=[authenticate])
def register_device():
"""
Registers the client as device with the server. The identifier has to be
defined in the HTTP header.
"""
identifier = request.forms.get('id')
if identifier:
LOG.debug('Register device %s' % identifier)
motion.set_device(identifier)
else:
abort(400, 'Bad request')
@post('/device/unregister', apply=[authenticate])
def unregister_device():
"""
Unregisters the device again.
"""
identifier = request.forms.get('identifier')
LOG.debug('Unregister device %s' % identifier)
motion.set_device(None)
if __name__ == '__main__':
try:
run(server='paste', host='0.0.0.0', port=4000)
except Exception as e:
LOG.warning(e)
finally:
observer.stop()
sys.exit()
| {
"content_hash": "2a3824d3b7eb3b9c3c0cf5eb00abefb9",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 79,
"avg_line_length": 26.558823529411764,
"alnum_prop": 0.6231608922638823,
"repo_name": "platzhirsch/security-cam",
"id": "bcd7e5f50dfcbdcfc1862443b2d1ecdcd9af0614",
"size": "6343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/securitas/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "83386"
},
{
"name": "Python",
"bytes": "14619"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import include, url
from cms import views
from cms.apphook_pool import apphook_pool
from cms.appresolver import get_app_patterns
from cms.constants import SLUG_REGEXP
if settings.APPEND_SLASH:
regexp = r'^(?P<slug>%s)/$' % SLUG_REGEXP
else:
regexp = r'^(?P<slug>%s)$' % SLUG_REGEXP
if apphook_pool.get_apphooks():
# If there are some application urls, use special resolver,
# so we will have standard reverse support.
urlpatterns = get_app_patterns()
else:
urlpatterns = []
urlpatterns.extend([
url(r'^cms_login/$', views.login, name='cms_login'),
url(r'^cms_wizard/', include('cms.wizards.urls')),
url(regexp, views.details, name='pages-details-by-slug'),
url(r'^$', views.details, {'slug': ''}, name='pages-root'),
])
| {
"content_hash": "1721fdc8288417e542df4ca0442716ea",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 63,
"avg_line_length": 29.321428571428573,
"alnum_prop": 0.6845310596833131,
"repo_name": "yakky/django-cms",
"id": "c578c84f5f64ac2bd856c4115d4ea30ca1868e2b",
"size": "845",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "cms/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "143463"
},
{
"name": "HTML",
"bytes": "188490"
},
{
"name": "JavaScript",
"bytes": "1286649"
},
{
"name": "Python",
"bytes": "2209198"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
} |
__author__ = 'Neil Butcher'
from PyQt4 import QtGui
class CommandChangePerson(QtGui.QUndoCommand):
def __init__(self, person, key, value):
super(CommandChangePerson, self).__init__('Changed the ' + key + ' of ' + person.name)
self.person = person
self.key = key
self.value = value
if key == 'name':
self.starting_value = person.name
elif key == 'email':
self.starting_value = person.email
elif key == 'address':
self.starting_value = person.address
elif key == 'phone':
self.starting_value = person.phone_number
def redo(self):
if self.key == 'name':
self.person.name = self.value
elif self.key == 'email':
self.person.email = self.value
elif self.key == 'address':
self.person.address = self.value
elif self.key == 'phone':
self.person.phone_number = self.value
def undo(self):
if self.key == 'name':
self.person.name = self.starting_value
elif self.key == 'email':
self.person.email = self.starting_value
elif self.key == 'address':
self.person.address = self.starting_value
elif self.key == 'phone':
self.person.phone_number = self.starting_value
class CommandChangePersonBlacklist(QtGui.QUndoCommand):
def __init__(self, person, blacklist_bool, date):
description = 'Changed the blacklist status of ' + person.name + ' on date' + str(date)
super(CommandChangePersonBlacklist, self).__init__(description)
self.person = person
self.blacklist_bool = blacklist_bool
self.date = date
def redo(self):
if self.blacklist_bool:
self.person.blacklist_date(self.date)
else:
self.person.free_date(self.date)
def undo(self):
if self.blacklist_bool:
self.person.free_date(self.date)
else:
self.person.blacklist_date(self.date) | {
"content_hash": "a2c315778a01fde5b07384b8c025916f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 95,
"avg_line_length": 34.016666666666666,
"alnum_prop": 0.5825575698187163,
"repo_name": "ergoregion/Rota-Program",
"id": "ef83e310fc467788bb8cdd28e8c708563bd78a99",
"size": "2041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Rota_System/UI/People/commands_person.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "254617"
}
],
"symlink_target": ""
} |
import sys
import os.path
import pprint
sys.path.append(os.path.abspath(__file__ + "\..\.."))
import windows
import windows.test
import windows.debug
from windows.generated_def.winstructs import *
class MyDebugger(windows.debug.Debugger):
def on_exception(self, exception):
code = exception.ExceptionRecord.ExceptionCode
addr = exception.ExceptionRecord.ExceptionAddress
print("Got exception {0} at 0x{1:x}".format(code, addr))
class PrintUnicodeString(windows.debug.Breakpoint):
def __init__(self, addr, argument_position):
super(PrintUnicodeString, self).__init__(addr)
self.arg_pos = argument_position
def trigger(self, dbg, exc):
p = dbg.current_process
t = dbg.current_thread
esp = t.context.Esp
unicode_string_addr = p.read_ptr(esp + (self.arg_pos + 1) * 4)
wstring_addr = p.read_ptr(unicode_string_addr + 4)
dll_loaded = p.read_wstring(wstring_addr).lower()
print("Loading <{0}>".format(dll_loaded))
if dll_loaded.endswith("comctl32.dll"):
print("Ask to load <comctl32.dll>: exiting process")
dbg.current_process.exit()
calc = windows.test.pop_proc_32(dwCreationFlags=DEBUG_PROCESS)
d = MyDebugger(calc)
d.add_bp(PrintUnicodeString("ntdll!LdrLoadDll", argument_position=2))
d.loop()
| {
"content_hash": "66ebfefae7b5a65c714056d044861b95",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 70,
"avg_line_length": 29.217391304347824,
"alnum_prop": 0.6696428571428571,
"repo_name": "hakril/PythonForWindows",
"id": "cfcfd370d303a6d96251b5ea4fab2c41670c0a94",
"size": "1344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/debug/debugger_print_LdrLoaddll.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4087889"
}
],
"symlink_target": ""
} |
"""
.. module:: helpers
:synopsis: XNAT utility functions.
"""
import os
import re
import itertools
from datetime import datetime
from pyxnat.core.resources import Scan
from .constants import (XNAT_TYPES, UNLABELED_TYPES, ASSESSOR_SYNONYMS,
EXPERIMENT_SYNONYM, EXPERIMENT_PATH_TYPES,
TYPE_DESIGNATORS, MODALITY_TYPES, DATE_FMT)
class ParseError(Exception):
pass
def xnat_path(obj):
"""
Returns the XNAT object path in the canonical form:
/<project>[/<subject>[/<session>[/type/<name>]*]]
e.g.::
/QIN/Breast003/Session02/scan/1/resource/NIFTI/file/volume001.nii.gz
:param obj: the XNAT object
:return: the XNAT path
"""
name = xnat_name(obj)
# The lower-case type name.
xnat_type = obj.__class__.__name__.lower()
# The types leading to and including experiment are unqualified
# by the type in the path.
if xnat_type in EXPERIMENT_PATH_TYPES:
subpath = name
else:
subpath = "%s/%s" % (xnat_type, name)
parent = obj.parent()
# If there is a parent, then prepend the parent path to the
# relative sub-path. Otherwise, the path is the absolute project
# name.
if parent:
return "%s/%s" % (xnat_path(parent), subpath)
else:
return '/' + subpath
def xnat_name(obj):
"""
Returns the canonical XNAT object name determined as the :meth:`xnat_key`
with the parent key prefix removed, if necessary, e.g.::
>> xnat_key(session)
Breast003_Session01
>> xnat_name(session)
Session01
The scan name is an integer, the other names are strings.
:param obj: the XNAT object
:return: the canonical XNAT name
"""
key = xnat_key(obj)
if isinstance(obj, Scan):
return int(key)
parent = obj.parent()
if parent:
prefix = xnat_key(parent) + '_'
if key.startswith(prefix):
return key[len(prefix):]
return key
def xnat_key(obj):
"""
Returns the XNAT object key unique within the parent scope,
determined as follows:
* If the object is a Reconstruction, then the XNAT id
* Otherwise, the XNAT label
:param obj: the XNAT object
:return: the XNAT label or id
"""
type_name = obj.__class__.__name__.lower()
return obj.id() if type_name in UNLABELED_TYPES else obj.label()
def xnat_children(obj):
"""
Returns the XNAT objects contained in the given parent object.
:param obj: the XNAT parent object
:return: the child objects
:rtype: list
"""
# pyxnat children() is oddly the child collection attributes
# rather than objects.
child_attrs = obj.children()
# The children generators for each child type.
nested = (getattr(obj, attr)() for attr in child_attrs)
flattened = itertools.chain(*nested)
return list(flattened)
def pluralize_type_designator(designator):
"""
:param designator: the XNAT type name or synonym
:return: the pluralized type designator
"""
if designator == 'analysis':
return 'analyses'
elif designator.endswith('s'):
# No examples of this (yet), but play it safe.
return designator
else:
return designator + 's'
def is_pluralized_type_designator(designator):
"""
:param designator: the XNAT type name or synonym
:return: whether the designator is a pluralized XNAT type
designator
"""
return any(designator == pluralize_type_designator(xnat_type)
for xnat_type in TYPE_DESIGNATORS)
def hierarchical_label(*names):
"""
Returns the XNAT label for the given hierarchical name, qualified
by a prefix if necessary.
Example:
>>> from qixnat.helpers import hierarchical_label
>>> hierarchical_label('Breast003', 'Session01')
'Breast003_Session01'
>>> hierarchical_label('Breast003', 'Breast003_Session01')
'Breast003_Session01'
>>> hierarchical_label(3) # for scan number 3
3
:param names: the object names
:return: the corresponding XNAT label
"""
names = list(names)
if not all(names):
raise ValueError("The XNAT label name hierarchy is invalid: %s" %
names)
last = names.pop()
if names:
prefix = hierarchical_label(*names)
if last.startswith(prefix):
return last
else:
return "%s_%s" % (prefix, last)
else:
return last
def rest_type(type_name, modality=None):
"""
Qualifies the given type name with the modality, e.g.:
>>> from qixnat.helpers import rest_type
>>> rest_type('experiment', 'MR')
'xnat:mrSessionData'
:param type_name: the XNAT type name
:param modality: the case-insensitive modality, e.g. ``MR`` or ``CT``
:return: the full XNAT subtype designation
:raise XNATError: if the type name is in
:const:`qixnat.constants.MODALITY_TYPES`
but modality is None
"""
# XNAT subtypes an Experiment as Session.
rest_name = 'session' if type_name == 'experiment' else type_name
if type_name in MODALITY_TYPES:
if type_name in MODALITY_TYPES:
if not modality:
raise ParseError("Modality is required to create a XNAT"
" %s" % type_name)
return "xnat:%s%sData" % (modality.lower(), rest_name.capitalize())
else:
return "xnat:%sData" % rest_name
def rest_date(value):
"""
:param value: the input ``datetime`` object or None
:return: None, if the input is None, otherwise the input formatted
as a string using the :const:`qixnat.constants.DATE_FMT`
:rtype: str
"""
return value.strftime(DATE_FMT) if value else None
def parse_xnat_date(value):
"""
The XNAT REST client unfortunately returns date fields as a string.
experiment must have a date. This method converts the string
input to a datetime.
:param value: the input string in :const:`qixnat.constants.DATE_FMT`
format or None
:return: None, if the input is None, otherwise the input parsed
as a datetime object
:rtype: datetime.datetime
"""
return datetime.strptime(value, DATE_FMT) if value else None
def path_hierarchy(path):
"""
Transforms the given XNAT path into a list of *(type, value)*
tuples.
The *path* string argument must consist of a sequence of
slash-delimited XNAT object specifications, where each specification
is either a singular XNAT type and value, e.g. ``subject/Breast003``,
or a pluralized XNAT type, e.g. ``resources``.
The path can include wildcards, e.g. ``/project/QIN/subject/Breast*``.
If the path starts with a forward slash, then the first three
components can elide the XNAT type. Thus, the following are
equivalent::
path_hierarchy('/project/QIN/subject/Breast003/experiment/Session02')
path_hierarchy('/QIN/Breast003/Session02')
The following XNAT object type synonyms are allowed:
* ``session`` => ``experiment``
* ``analysis`` or ``assessment`` => ``assessor``
Pluralized type synonyms are standardized according to the singular
form, e.g. ``analyses`` => ``assessors``.
The path hierarchy result is a list of *(type, value)* tuples. A
pluralization value is a wild card.
Examples:
>>> from qixnat.helpers import path_hierarchy
>>> path_hierarchy('/QIN/Breast003/Session03/resource/reg_Qzu7R/files')
[('project', 'QIN'), ('subject', 'Breast*'), ('project', 'QIN'),
('subject', 'Breast*'), ('experiment', 'Session03'),
('resource', 'reg_Qzu7R'), ('file', '*')]
>>> path_hierarchy('/QIN/Breast*/*/resources')
[('project', 'QIN'), ('subjects, 'Breast*'), ('experiments, '*'),
('resource', '*')]
:param path: the XNAT object path string or list
:return: the path hierarchy list
:rtype: list
"""
# Remove the leading slash, if necessary, before splitting
# the path items.
if path.startswith('/'):
relpath = path[1:]
else:
relpath = path
# Allow but ignore a trailing slash.
if relpath.endswith('/'):
relpath = relpath[:-1]
# There must be something left.
if not relpath:
raise ValueError("The path argument is empty.")
# The path items list.
items = relpath.split('/')
# If the path starts with a '/', then the first three items are
# /project/subject/experiment, and can elide the object type.
if path.startswith('/'):
prefix = []
# Walk through the first three object specifications
# to create a standard prefix.
first = items.pop(0)
if re.match(r"projects?$", first):
prj = items.pop(0)
else:
prj = first
prefix.extend(('project', prj))
if items:
first = items.pop(0)
if re.match(r"subjects?$", first):
sbj = items.pop(0) if items else '*'
else:
sbj = first
prefix.extend(('subjects', sbj))
if items:
first = items.pop(0)
if re.match(r"(session|experiment)s?$", first):
sess = items.pop(0) if items else '*'
else:
sess = first
prefix.extend(('experiments', sess))
# Prepend the augmented prefix.
items = prefix + items
# A terminal type not followed by a value has a '*' value.
if len(items) % 2:
items.append('*')
# Partition the items into (type, value) pairs.
return [(_standardize_attribute(items[i]), items[i+1])
for i in range(0, len(items), 2)]
def _standardize_attribute(name):
"""
Returns the standardized XNAT attribute for the given name, with
the following substitutions:
* ``session`` => ``experiment``
* ``analysis`` or ``assessment`` => ``assessor``
* pluralizations => the singular standardization, e.g.
``analyses`` => ``assessor``
:param name: the attribute name
:return: the standardized XNAT attribute
:raise ParseError: if the name is not recognized as an attribute
designator
"""
if name in XNAT_TYPES:
return name
elif name == EXPERIMENT_SYNONYM:
return 'experiment'
elif name == 'analyses':
return 'assessor'
elif name in ASSESSOR_SYNONYMS:
return 'assessor'
elif name.endswith('s'):
return _standardize_attribute(name[:-1])
else:
raise ParseError("The XNAT path item %s is not recognized as an"
" XNAT object type" % name)
| {
"content_hash": "8ccd7570983167139d12c3e2199deb97",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 79,
"avg_line_length": 31.51169590643275,
"alnum_prop": 0.6125081191426185,
"repo_name": "ohsu-qin/qixnat",
"id": "996901e80cde7937497ef819bec9210044c63caa",
"size": "10777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qixnat/helpers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "100392"
}
],
"symlink_target": ""
} |
import sys
from twisted.internet import defer, protocol
from twisted.protocols.policies import TimeoutMixin
from zope.interface.verify import verifyObject
import six
from txHL7.receiver import IHL7Receiver
class MinimalLowerLayerProtocol(protocol.Protocol, TimeoutMixin):
"""
Minimal Lower-Layer Protocol (MLLP) takes the form:
<VT>[HL7 Message]<FS><CR>
References:
.. [1] http://www.hl7standards.com/blog/2007/05/02/hl7-mlp-minimum-layer-protocol-defined/
.. [2] http://www.hl7standards.com/blog/2007/02/01/ack-message-original-mode-acknowledgement/
"""
_buffer = b''
start_block = b'\x0b' # <VT>, vertical tab
end_block = b'\x1c' # <FS>, file separator
carriage_return = b'\x0d' # <CR>, \r
def connectionMade(self):
if self.factory.timeout is not None:
self.setTimeout(self.factory.timeout)
def dataReceived(self, data):
self.resetTimeout()
# success callback
def onSuccess(message):
self.writeMessage(message)
# try to find a complete message(s) in the combined the buffer and data
messages = (self._buffer + data).split(self.end_block)
# whatever is in the last chunk is an uncompleted message, so put back
# into the buffer
self._buffer = messages.pop(-1)
for raw_message in messages:
# strip the rest of the MLLP shell from the HL7 message
raw_message = raw_message.strip(self.start_block)
# only pass messages with data
if len(raw_message) > 0:
# convert into unicode, parseMessage expects decoded string
raw_message = self.factory.decode(raw_message)
message_container = self.factory.parseMessage(raw_message)
# error callback (defined here, since error depends on
# current message). rejects the message
def onError(err):
reject = message_container.err(err)
self.writeMessage(reject)
return err
# have the factory create a deferred and pass the message
# to the approriate IHL7Receiver instance
d = self.factory.handleMessage(message_container)
d.addCallback(onSuccess)
d.addErrback(onError)
def writeMessage(self, message):
if message is None:
return
# convert back to a byte string
message = self.factory.encode(message)
# wrap message in payload container
self.transport.write(
self.start_block + message + self.end_block + self.carriage_return
)
class MLLPFactory(protocol.ServerFactory):
protocol = MinimalLowerLayerProtocol
def __init__(self, receiver):
verifyObject(IHL7Receiver, receiver)
self.receiver = receiver
encoding = receiver.getCodec()
if isinstance(encoding, tuple):
encoding, encoding_errors = encoding
else:
encoding_errors = None
self.encoding = encoding or sys.getdefaultencoding()
self.encoding_errors = encoding_errors or 'strict'
self.timeout = receiver.getTimeout()
def parseMessage(self, message_str):
return self.receiver.parseMessage(message_str)
def handleMessage(self, message_container):
# IHL7Receiver allows implementations to return a Deferred or the
# result, so ensure we return a Deferred here
return defer.maybeDeferred(self.receiver.handleMessage, message_container)
def decode(self, value):
# turn value into unicode using the receiver's declared codec
if isinstance(value, six.binary_type):
return value.decode(self.encoding, self.encoding_errors)
return six.text_type(value)
def encode(self, value):
# turn value into byte string using the receiver's declared codec
if isinstance(value, six.text_type):
return value.encode(self.encoding, self.encoding_errors)
return value
| {
"content_hash": "a1721a7c73ac5ef2fc8ed879b5c4264e",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 97,
"avg_line_length": 36.19469026548673,
"alnum_prop": 0.6388753056234718,
"repo_name": "johnpaulett/txHL7",
"id": "f86b2818c6c88b5e670ba7ba2c7abc617c591289",
"size": "4090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txHL7/mllp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "843"
},
{
"name": "Python",
"bytes": "16824"
}
],
"symlink_target": ""
} |
from unittest.mock import patch
import os
# test success under python3.6
def fake_remove(path, *a, **k):
print("remove done")
@patch('os.remove', fake_remove)
def test():
try:
os.remove('%$!?&*') # fake os.remove
except OSError as e:
print(e)
else:
print('test success')
test()
| {
"content_hash": "41f0820163633abe3a9b667d1df4a2e7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 44,
"avg_line_length": 21.266666666666666,
"alnum_prop": 0.6018808777429467,
"repo_name": "tencrance/cool-config",
"id": "f8adb1d5dc7bca854d39fdb1ca99811ae6c593dc",
"size": "319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_tricks/python_unittest/Mock_using_patch_substitute_original_method.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4434"
},
{
"name": "Shell",
"bytes": "1564"
}
],
"symlink_target": ""
} |
from collections import namedtuple
import re
Question = namedtuple('Question', ['id', 'question', 'correct_answer',
'A', 'B', 'C', 'D'])
def generate_next_question(question_file):
with open(question_file) as question_file:
for line in question_file:
yield Question(*line.strip().split("\t"))
cts = open("../data/question_groups/complete-the-sentence_questions.txt", 'w')
fib = open("../data/question_groups/fill-in-the-blank_questions.txt", 'w')
what = open("../data/question_groups/what_questions.txt", 'w')
which = open("../data/question_groups/which_questions.txt", 'w')
why = open("../data/question_groups/why_questions.txt", 'w')
when = open("../data/question_groups/when_questions.txt", 'w')
where = open("../data/question_groups/where_questions.txt", 'w')
how = open("../data/question_groups/how_questions.txt", 'w')
who = open("../data/question_groups/who_questions.txt", 'w')
other = open("../data/question_groups/other_questions.txt", 'w')
files = [cts, fib, what, which, why, when, where, how, who, other]
for f in files:
f.write("id\tquestion\tcorrectAnswer\tanswerA\tanswerB\tanswerC\tanswerD\n")
first = True
for question in generate_next_question('../data/training_set.tsv'):
if first:
first = False
continue
question_text = question.question.lower()
to_write = None
if not bool(re.search(r'(\?)|(__+)' , question_text)):
to_write = cts
elif bool(re.search(r'(__+)', question_text)):
to_write = fib
elif bool(re.search(r'(what( |\?))', question_text)):
to_write = what
elif bool(re.search(r'(which )', question_text)):
to_write = which
elif bool(re.search(r'(why )', question_text)):
to_write = why
elif bool(re.search(r'(when )', question_text)):
to_write = when
elif bool(re.search(r'(where )', question_text)):
to_write = where
elif bool(re.search(r'(how )', question_text)):
to_write = how
elif bool(re.search(r'(who )', question_text)):
to_write = who
else:
to_write = other
to_write.write(question.id + "\t")
to_write.write(question_text + "\t")
to_write.write(question.correct_answer + "\t")
to_write.write(question.A + "\t")
to_write.write(question.B + "\t")
to_write.write(question.C + "\t")
to_write.write(question.D + "\n")
for f in files:
f.close()
| {
"content_hash": "f31ce064712f3483b9720eb15dcec357",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 80,
"avg_line_length": 35.71641791044776,
"alnum_prop": 0.6259924780610113,
"repo_name": "sjvasquez/AIChallenge",
"id": "a0fbc97eba339e889eac65cae1fcb112fa8706dd",
"size": "2393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/question_grouper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86235"
}
],
"symlink_target": ""
} |
from auth import connect_sqlite, create_user, initialize_sqlite
from azure import azure_login, azure_create_server
import json
print "lapis - Minecraft + Azure = Wonders"
print "First, you'll need to link your Azure account."
azure_login()
print "Cool! Now, pick a name for your service."
name = raw_input("Name: ")
print "Awesome! Let's set you up with a username and password."
username = raw_input("User: ")
password = raw_input("Pass: ")
initialize_sqlite()
c = connect_sqlite()
create_user(c, username, password)
config = {}
config["name"] = name
config["online"] = True
with open("server.json", "w") as cfg:
json.dump(config, cfg)
print "Now that we're finished with that, give us some time to spin up your server."
print "Fair warning, this is gonna take a while. Feel free to sing a song or five."
azure_create_server(name)
| {
"content_hash": "42e58b5250cc51af581a3cf5beee8404",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 84,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.7166469893742621,
"repo_name": "Takmo/lapis",
"id": "b28a9db2b359302d0482a6d19eaa1b007598d003",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3691"
},
{
"name": "Java",
"bytes": "1521"
},
{
"name": "Python",
"bytes": "15530"
},
{
"name": "Shell",
"bytes": "909"
}
],
"symlink_target": ""
} |
from mox import IsA # noqa
from django.core.urlresolvers import reverse # noqa
from django.core.urlresolvers import reverse_lazy # noqa
from django import http
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.api import lbaas
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.loadbalancers import workflows
class LoadBalancerTests(test.TestCase):
class AttributeDict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
DASHBOARD = 'project'
INDEX_URL = reverse_lazy('horizon:%s:loadbalancers:index' % DASHBOARD)
ADDPOOL_PATH = 'horizon:%s:loadbalancers:addpool' % DASHBOARD
ADDVIP_PATH = 'horizon:%s:loadbalancers:addvip' % DASHBOARD
ADDMEMBER_PATH = 'horizon:%s:loadbalancers:addmember' % DASHBOARD
ADDMONITOR_PATH = 'horizon:%s:loadbalancers:addmonitor' % DASHBOARD
POOL_DETAIL_PATH = 'horizon:%s:loadbalancers:pooldetails' % DASHBOARD
VIP_DETAIL_PATH = 'horizon:%s:loadbalancers:vipdetails' % DASHBOARD
MEMBER_DETAIL_PATH = 'horizon:%s:loadbalancers:memberdetails' % DASHBOARD
MONITOR_DETAIL_PATH = 'horizon:%s:loadbalancers:monitordetails' % DASHBOARD
UPDATEPOOL_PATH = 'horizon:%s:loadbalancers:updatepool' % DASHBOARD
UPDATEVIP_PATH = 'horizon:%s:loadbalancers:updatevip' % DASHBOARD
UPDATEMEMBER_PATH = 'horizon:%s:loadbalancers:updatemember' % DASHBOARD
UPDATEMONITOR_PATH = 'horizon:%s:loadbalancers:updatemonitor' % DASHBOARD
ADDASSOC_PATH = 'horizon:%s:loadbalancers:addassociation' % DASHBOARD
DELETEASSOC_PATH = 'horizon:%s:loadbalancers:deleteassociation' % DASHBOARD
def set_up_expect(self):
# retrieve pools
vip1 = self.vips.first()
vip2 = self.vips.list()[1]
api.lbaas.pool_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.lbaas.vip_get(IsA(http.HttpRequest), vip1.id).AndReturn(vip1)
api.lbaas.vip_get(IsA(http.HttpRequest), vip2.id).AndReturn(vip2)
# retrieves members
api.lbaas.member_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.members.list())
pool1 = self.pools.first()
pool2 = self.pools.list()[1]
api.lbaas.pool_get(IsA(http.HttpRequest),
self.members.list()[0].pool_id).AndReturn(pool1)
api.lbaas.pool_get(IsA(http.HttpRequest),
self.members.list()[1].pool_id).AndReturn(pool2)
# retrieves monitors
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id).MultipleTimes() \
.AndReturn(self.monitors.list())
def set_up_expect_with_exception(self):
api.lbaas.pool_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndRaise(self.exceptions.neutron)
api.lbaas.member_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndRaise(self.exceptions.neutron)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndRaise(self.exceptions.neutron)
@test.create_stubs({api.lbaas: ('pool_list', 'vip_get',
'member_list', 'pool_get',
'pool_health_monitor_list'),
api.neutron: ('subnet_get',)})
def test_index_pools(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data),
len(self.pools.list()))
@test.create_stubs({api.lbaas: ('pool_list', 'vip_get',
'member_list', 'pool_get',
'pool_health_monitor_list'),
api.neutron: ('subnet_get',)})
def test_index_members(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__members')
self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['memberstable_table'].data),
len(self.members.list()))
@test.create_stubs({api.lbaas: ('pool_list', 'vip_get',
'pool_health_monitor_list',
'member_list', 'pool_get'),
api.neutron: ('subnet_get',)})
def test_index_monitors(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__monitors')
self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['monitorstable_table'].data),
len(self.monitors.list()))
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_exception_pools(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res,
'%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_exception_members(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__members')
self.assertTemplateUsed(res,
'%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['memberstable_table'].data), 0)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_exception_monitors(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__monitors')
self.assertTemplateUsed(res,
'%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['monitorstable_table'].data), 0)
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'provider_list',
'is_extension_supported'),
api.lbaas: ('pool_create', )})
def test_add_pool_post(self):
pool = self.pools.first()
subnet = self.subnets.first()
networks = [{'subnets': [subnet, ]}, ]
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'service-type').AndReturn(True)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
api.neutron.provider_list(IsA(http.HttpRequest)) \
.AndReturn(self.providers.list())
api.lbaas.pool_create(
IsA(http.HttpRequest),
name=pool.name,
description=pool.description,
subnet_id=pool.subnet_id,
protocol=pool.protocol,
lb_method=pool.lb_method,
admin_state_up=pool.admin_state_up,
provider=pool.provider).AndReturn(pool)
self.mox.ReplayAll()
form_data = {'name': pool.name,
'description': pool.description,
'subnet_id': pool.subnet_id,
'protocol': pool.protocol,
'lb_method': pool.lb_method,
'admin_state_up': pool.admin_state_up}
res = self.client.post(reverse(self.ADDPOOL_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'provider_list',
'is_extension_supported')})
def test_add_pool_get(self):
self._test_add_pool_get(with_service_type=True)
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'provider_list',
'is_extension_supported')})
def test_add_pool_get_provider_list_exception(self):
self._test_add_pool_get(with_service_type=True)
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'is_extension_supported')})
def test_add_pool_get_without_service_type_support(self):
self._test_add_pool_get(with_service_type=False)
def _test_add_pool_get(self, with_service_type=True,
with_provider_exception=False):
subnet = self.subnets.first()
default_provider = self.providers.first()['name']
networks = [{'subnets': [subnet, ]}, ]
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'service-type').AndReturn(with_service_type)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
if with_service_type:
prov_list = api.neutron.provider_list(IsA(http.HttpRequest))
if with_provider_exception:
prov_list.AndRaise(self.exceptions.neutron)
else:
prov_list.AndReturn(self.providers.list())
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDPOOL_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddPool.name)
expected_objs = ['<AddPoolStep: addpoolaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
if not with_service_type:
self.assertNotContains(res, default_provider)
self.assertContains(res, ('Provider for Load Balancer '
'is not supported.'))
elif with_provider_exception:
self.assertNotContains(res, default_provider)
self.assertContains(res, 'No provider is available.')
else:
self.assertContains(res, default_provider)
def test_add_vip_post(self):
self._test_add_vip_post()
def test_add_vip_post_no_connection_limit(self):
self._test_add_vip_post(with_conn_limit=False)
@test.create_stubs({api.lbaas: ('pool_get', 'vip_create'),
api.neutron: ('subnet_get', )})
def _test_add_vip_post(self, with_conn_limit=True):
vip = self.vips.first()
subnet = self.subnets.first()
pool = self.pools.first()
api.lbaas.pool_get(
IsA(http.HttpRequest), pool.id).MultipleTimes().AndReturn(pool)
api.neutron.subnet_get(
IsA(http.HttpRequest), subnet.id).AndReturn(subnet)
params = {'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'address': vip.address,
'floatip_address': vip.floatip_address,
'other_address': vip.other_address,
'subnet': vip.subnet,
'subnet_id': vip.subnet_id,
'protocol_port': vip.protocol_port,
'protocol': vip.protocol,
'session_persistence': vip.session_persistence['type'],
'cookie_name': vip.session_persistence['cookie_name'],
'admin_state_up': vip.admin_state_up,
}
if with_conn_limit:
params['connection_limit'] = vip.connection_limit
api.lbaas.vip_create(
IsA(http.HttpRequest), **params).AndReturn(vip)
self.mox.ReplayAll()
form_data = {
'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'address': vip.address,
'floatip_address': vip.floatip_address,
'other_address': vip.other_address,
'subnet_id': vip.subnet_id,
'subnet': vip.subnet,
'protocol_port': vip.protocol_port,
'protocol': vip.protocol,
'session_persistence': vip.session_persistence['type'].lower(),
'cookie_name': vip.session_persistence['cookie_name'],
'admin_state_up': vip.admin_state_up}
if with_conn_limit:
form_data['connection_limit'] = vip.connection_limit
res = self.client.post(
reverse(self.ADDVIP_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get', ),
api.neutron: ('subnet_get', )})
def test_add_vip_post_with_error(self):
vip = self.vips.first()
subnet = self.subnets.first()
pool = self.pools.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.neutron.subnet_get(
IsA(http.HttpRequest), subnet.id).AndReturn(subnet)
self.mox.ReplayAll()
form_data = {
'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'address': vip.address,
'subnet_id': vip.subnet_id,
'protocol_port': 65536,
'protocol': vip.protocol,
'session_persistence': vip.session_persistence['type'].lower(),
'cookie_name': vip.session_persistence['cookie_name'],
'connection_limit': -2,
'admin_state_up': vip.admin_state_up}
res = self.client.post(
reverse(self.ADDVIP_PATH, args=(pool.id,)), form_data)
self.assertFormErrors(res, 2)
@test.create_stubs({api.lbaas: ('pool_get', ),
api.neutron: ('subnet_get', )})
def test_add_vip_get(self):
subnet = self.subnets.first()
pool = self.pools.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.neutron.subnet_get(
IsA(http.HttpRequest), subnet.id).AndReturn(subnet)
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDVIP_PATH, args=(pool.id,)))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddVip.name)
expected_objs = ['<AddVipStep: addvipaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.lbaas: ('pool_health_monitor_create', )})
def test_add_monitor_post(self):
monitor = self.monitors.first()
api.lbaas.pool_health_monitor_create(
IsA(http.HttpRequest),
type=monitor.type,
delay=monitor.delay,
timeout=monitor.timeout,
max_retries=monitor.max_retries,
http_method=monitor.http_method,
url_path=monitor.url_path,
expected_codes=monitor.expected_codes,
admin_state_up=monitor.admin_state_up).AndReturn(
lbaas.PoolMonitor(monitor))
self.mox.ReplayAll()
form_data = {'type': monitor.type,
'delay': monitor.delay,
'timeout': monitor.timeout,
'max_retries': monitor.max_retries,
'http_method': monitor.http_method,
'url_path': monitor.url_path,
'expected_codes': monitor.expected_codes,
'admin_state_up': monitor.admin_state_up}
res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
def test_add_monitor_post_with_error(self):
monitor = self.monitors.first()
form_data = {'type': monitor.type,
'delay': 0,
'timeout': 0,
'max_retries': 11,
'http_method': monitor.http_method,
'url_path': monitor.url_path,
'expected_codes': monitor.expected_codes,
'admin_state_up': monitor.admin_state_up}
res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data)
self.assertFormErrors(res, 3)
def test_add_monitor_post_with_httpmethod_error(self):
monitor = self.monitors.first()
form_data = {'type': 'http',
'delay': monitor.delay,
'timeout': monitor.timeout,
'max_retries': monitor.max_retries,
'http_method': '',
'url_path': '',
'expected_codes': '',
'admin_state_up': monitor.admin_state_up}
res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data)
self.assertFormErrors(res, 3)
def test_add_monitor_get(self):
res = self.client.get(reverse(self.ADDMONITOR_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddMonitor.name)
expected_objs = ['<AddMonitorStep: addmonitoraction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
def test_add_member_post(self):
self._test_add_member_post()
def test_add_member_post_without_weight(self):
self._test_add_member_post(with_weight=False)
@test.create_stubs({api.lbaas: ('pool_list', 'member_create'),
api.neutron: ('port_list',),
api.nova: ('server_list',)})
def _test_add_member_post(self, with_weight=True):
member = self.members.first()
server1 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338042e',
'name': 'vm1'})
server2 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338043e',
'name': 'vm2'})
port1 = self.AttributeDict(
{'fixed_ips': [{'ip_address': member.address}]})
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.nova.server_list(IsA(http.HttpRequest)).AndReturn(
[[server1, server2], False])
api.neutron.port_list(IsA(http.HttpRequest),
device_id=server1.id).AndReturn([port1, ])
params = {'pool_id': member.pool_id,
'address': member.address,
'protocol_port': member.protocol_port,
'members': [server1.id],
'admin_state_up': member.admin_state_up,
}
if with_weight:
params['weight'] = member.weight
api.lbaas.member_create(IsA(http.HttpRequest),
**params).AndReturn(lbaas.Member(member))
self.mox.ReplayAll()
form_data = {'pool_id': member.pool_id,
'address': member.address,
'protocol_port': member.protocol_port,
'members': [server1.id],
'admin_state_up': member.admin_state_up}
if with_weight:
form_data['weight'] = member.weight
res = self.client.post(reverse(self.ADDMEMBER_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_list',),
api.nova: ('server_list',)})
def test_add_member_post_with_error(self):
member = self.members.first()
server1 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338042e',
'name': 'vm1'})
server2 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338043e',
'name': 'vm2'})
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([[server1,
server2],
False])
self.mox.ReplayAll()
# try to create member with invalid protocol port and weight
form_data = {'pool_id': member.pool_id,
'address': member.address,
'protocol_port': 65536,
'weight': -1,
'members': [server1.id],
'admin_state_up': member.admin_state_up}
res = self.client.post(reverse(self.ADDMEMBER_PATH), form_data)
self.assertFormErrors(res, 2)
@test.create_stubs({api.lbaas: ('pool_list',),
api.nova: ('server_list',)})
def test_add_member_get(self):
server1 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338042e',
'name': 'vm1'})
server2 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338043e',
'name': 'vm2'})
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.nova.server_list(
IsA(http.HttpRequest)).AndReturn([[server1, server2], False])
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDMEMBER_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddMember.name)
expected_objs = ['<AddMemberStep: addmemberaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.lbaas: ('pool_get', 'pool_update')})
def test_update_pool_post(self):
pool = self.pools.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
data = {'name': pool.name,
'description': pool.description,
'lb_method': pool.lb_method,
'admin_state_up': pool.admin_state_up}
api.lbaas.pool_update(IsA(http.HttpRequest), pool.id, pool=data)\
.AndReturn(pool)
self.mox.ReplayAll()
form_data = data.copy()
form_data.update({'pool_id': pool.id})
res = self.client.post(
reverse(self.UPDATEPOOL_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get',)})
def test_update_pool_get(self):
pool = self.pools.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
self.mox.ReplayAll()
res = self.client.get(reverse(self.UPDATEPOOL_PATH, args=(pool.id,)))
self.assertTemplateUsed(res, 'project/loadbalancers/updatepool.html')
@test.create_stubs({api.lbaas: ('pool_list', 'vip_get',
'vip_update')})
def test_update_vip_post(self):
vip = self.vips.first()
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.lbaas.vip_get(IsA(http.HttpRequest), vip.id).AndReturn(vip)
data = {'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'session_persistence': {},
'connection_limit': vip.connection_limit,
'admin_state_up': vip.admin_state_up}
api.lbaas.vip_update(IsA(http.HttpRequest), vip.id, vip=data)\
.AndReturn(vip)
self.mox.ReplayAll()
form_data = data.copy()
form_data.update({'vip_id': vip.id})
res = self.client.post(
reverse(self.UPDATEVIP_PATH, args=(vip.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('vip_get', 'pool_list')})
def test_update_vip_get(self):
vip = self.vips.first()
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.lbaas.vip_get(IsA(http.HttpRequest), vip.id).AndReturn(vip)
self.mox.ReplayAll()
res = self.client.get(reverse(self.UPDATEVIP_PATH, args=(vip.id,)))
self.assertTemplateUsed(res, 'project/loadbalancers/updatevip.html')
@test.create_stubs({api.lbaas: ('pool_list', 'member_get',
'member_update')})
def test_update_member_post(self):
member = self.members.first()
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.lbaas.member_get(IsA(http.HttpRequest), member.id)\
.AndReturn(member)
data = {'pool_id': member.pool_id,
'weight': member.weight,
'admin_state_up': member.admin_state_up}
api.lbaas.member_update(IsA(http.HttpRequest), member.id, member=data)\
.AndReturn(member)
self.mox.ReplayAll()
form_data = data.copy()
form_data.update({'member_id': member.id})
res = self.client.post(
reverse(self.UPDATEMEMBER_PATH, args=(member.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('member_get', 'pool_list')})
def test_update_member_get(self):
member = self.members.first()
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.lbaas.member_get(IsA(http.HttpRequest), member.id)\
.AndReturn(member)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEMEMBER_PATH, args=(member.id,)))
self.assertTemplateUsed(res, 'project/loadbalancers/updatemember.html')
@test.create_stubs({api.lbaas: ('pool_health_monitor_get',
'pool_health_monitor_update')})
def test_update_monitor_post(self):
monitor = self.monitors.first()
api.lbaas.pool_health_monitor_get(IsA(http.HttpRequest), monitor.id)\
.AndReturn(monitor)
data = {'delay': monitor.delay,
'timeout': monitor.timeout,
'max_retries': monitor.max_retries,
'admin_state_up': monitor.admin_state_up}
api.lbaas.pool_health_monitor_update(IsA(http.HttpRequest),
monitor.id, health_monitor=data).AndReturn(monitor)
self.mox.ReplayAll()
form_data = data.copy()
form_data.update({'monitor_id': monitor.id})
res = self.client.post(
reverse(self.UPDATEMONITOR_PATH, args=(monitor.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_health_monitor_get',)})
def test_update_monitor_get(self):
monitor = self.monitors.first()
api.lbaas.pool_health_monitor_get(IsA(http.HttpRequest), monitor.id)\
.AndReturn(monitor)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEMONITOR_PATH, args=(monitor.id,)))
self.assertTemplateUsed(
res, 'project/loadbalancers/updatemonitor.html')
@test.create_stubs({api.lbaas: ('pool_get', 'pool_health_monitor_list',
'pool_monitor_association_create')})
def test_add_pool_monitor_association_post(self):
pool = self.pools.first()
monitors = self.monitors.list()
monitor = self.monitors.list()[1]
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(monitors)
api.lbaas.pool_monitor_association_create(
IsA(http.HttpRequest),
monitor_id=monitor.id,
pool_id=pool.id,
pool_monitors=pool.health_monitors,
pool_name=pool.name).AndReturn(None)
self.mox.ReplayAll()
form_data = {'monitor_id': monitor.id,
'pool_id': pool.id,
'pool_monitors': pool.health_monitors,
'pool_name': pool.name}
res = self.client.post(
reverse(self.ADDASSOC_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get', 'pool_health_monitor_list')})
def test_add_pool_monitor_association_get(self):
pool = self.pools.first()
monitors = self.monitors.list()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(monitors)
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDASSOC_PATH, args=(pool.id,)))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddPMAssociation.name)
expected_objs = ['<AddPMAssociationStep: addpmassociationaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.lbaas: ('pool_get',
'pool_health_monitor_list',
'pool_monitor_association_delete')})
def test_delete_pool_monitor_association_post(self):
pool = self.pools.first()
monitors = self.monitors.list()
monitor = monitors[0]
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest)).AndReturn(monitors)
api.lbaas.pool_monitor_association_delete(
IsA(http.HttpRequest),
monitor_id=monitor.id,
pool_id=pool.id,
pool_monitors=pool.health_monitors,
pool_name=pool.name).AndReturn(None)
self.mox.ReplayAll()
form_data = {'monitor_id': monitor.id,
'pool_id': pool.id,
'pool_monitors': pool.health_monitors,
'pool_name': pool.name}
res = self.client.post(
reverse(self.DELETEASSOC_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get',
'pool_health_monitor_list')})
def test_delete_pool_monitor_association_get(self):
pool = self.pools.first()
monitors = self.monitors.list()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest)).AndReturn(monitors)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.DELETEASSOC_PATH, args=(pool.id,)))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.DeletePMAssociation.name)
expected_objs = [
'<DeletePMAssociationStep: deletepmassociationaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
| {
"content_hash": "59ec0d2a2b2633f93b5d3adde6d6df1e",
"timestamp": "",
"source": "github",
"line_count": 863,
"max_line_length": 79,
"avg_line_length": 38.684820393974505,
"alnum_prop": 0.5698367530327991,
"repo_name": "ikargis/horizon_fod",
"id": "cc4f524c6d7aac08e9d4c95086c997b4f3a91d99",
"size": "33976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/loadbalancers/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167455"
},
{
"name": "JavaScript",
"bytes": "1099746"
},
{
"name": "Python",
"bytes": "3023860"
},
{
"name": "Shell",
"bytes": "13740"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(name='pymorse',
version='0.1',
description='Python bindings for the Modular OpenRobots Simulation Engine (MORSE)',
author='Séverin Lemaignan',
author_email='severin.lemaignan@laas.fr',
url='http://morse.openrobots.org',
py_modules=['pymorse'],
)
| {
"content_hash": "6e10556094007e4f2d39c4677e832c1f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 89,
"avg_line_length": 29.5,
"alnum_prop": 0.6440677966101694,
"repo_name": "Arkapravo/morse-0.6",
"id": "36f21c7869aa52925c26a9d1b86e31f603ae040e",
"size": "355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bindings/pymorse/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "46148"
},
{
"name": "C++",
"bytes": "30878"
},
{
"name": "Perl",
"bytes": "1705"
},
{
"name": "Python",
"bytes": "1117700"
},
{
"name": "Shell",
"bytes": "684"
}
],
"symlink_target": ""
} |
import random
import itertools
from simpleai.search import CspProblem, backtrack, min_conflicts
# En el juego Munchkin, cada jugador tiene un personaje al que puede mejorar aplicando diferentes cartas.
# Estas cartas incluyen cosas como armas, armaduras, pociones, maldiciones, y otros modificadores que incrementan el nivel del personaje,
# haciéndolo más capaz de ganar el juego. Pero existen algunas restricciones respecto a qué cartas pueden utilizarse en un mismo personaje,
# de forma de evitar jugadores “superpoderosos” que degradan la jugabilidad.
# Un jugador tiene que elegir 3 cartas para intentar armar su personaje, y las opciones disponibles son:
# Armadura de madera +1 (800 oro)
# Armadura de hierro +3 (1000 oro)
# Armadura de acero +5 (1300 oro)
# Espada de madera +1 (500 oro)
# Espada de hierro +2 (700 oro)
# Espada de acero +4 (1000 oro)
# Garrote gigante de madera +6 (1300 oro)
# Poción de fuego +5 (1500 oro)
# Poción de hielo +2 (800 oro)
# Poción de ácido +3 (1200 oro)
# Y a la vez, deben cumplirse las siguientes restricciones:
# Solo se puede tener 1 armadura
# Solo se puede tener 1 arma de mano (espada o garrote)
# Solo se dispone de 3000 de oro para gastar (es decir, el valor de las cartas sumadas no puede superar ese monto)
# No se pueden mezclar cartas de objetos de fuego con cartas de objetos de madera
# Se tiene que lograr un bonificador total (sumando las cartas) mayor a +15
# ¿Qué 3 cartas puede elegir el jugador para equipar a su personaje?
# ¡ No se encuentra solución porque para obtener una bonificación mayor a +15 solo hay una combinación posible y no cumple ciertas restricciones !
restricciones = []
variables = (1,2,3)
dominios = dict((a, ['AM', 'AH', 'AA', 'EM', 'EH', 'EA', 'G', 'PF', 'PH', 'PA']) for a in variables)
costos = {'AM' : 800, 'AH' : 1000, 'AA' : 1300, 'EM' : 500, 'EH' : 700, 'EA' : 1000, 'G' : 1300, 'PF' : 1500, 'PH' : 800, 'PA' : 1200}
bonificacion = {'AM' : 1, 'AH' : 3, 'AA' : 5, 'EM' : 1, 'EH' : 2, 'EA' : 4, 'G' : 6, 'PF' : 5, 'PH' : 2, 'PA' : 3}
def unaarmadura(var, val): # Una sola armadura
return (val.count('AM') + val.count('AH') + val.count('AA')) <= 1
def unaarma(var, val): # Solo se puede tener 1 arma de mano (espada o garrote)
return (val.count('EM') + val.count('EH') + val.count('EA') + val.count('G')) <= 1
def costomaximo(var, val): # Solo se dispone de 3000 de oro gastar (es decir, el valro de las cartas sumadas no puede superar ese monto)
return (costos[val[0]] + costos[val[1]] + costos[val[2]]) <= 3000
def nomezclar(var, val): # No se pueden mezclar cartas de objetos de fuego con cartas de objetos de madera
return not ('PF' in val and ('AM' in val or 'EM' in val or 'G' in val))
def bonificacionmayor(var, val): # Se tiene que lograr un bonificador total (sumando las cartas) mayor a +15
return (bonificacion[val[0]] + bonificacion[val[1]] + bonificacion[val[2]]) > 15
restricciones.append(((variables), unaarmadura))
restricciones.append(((variables), unaarma))
restricciones.append(((variables), costomaximo))
restricciones.append(((variables), nomezclar))
restricciones.append(((variables), bonificacionmayor))
problem = CspProblem(variables, dominios, restricciones)
resultado = backtrack(problem = problem)
print resultado | {
"content_hash": "24bef0cd45f9efa069a52d2e26a02377",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 146,
"avg_line_length": 53.36065573770492,
"alnum_prop": 0.7093701996927804,
"repo_name": "MaxiTalenti/IA",
"id": "6b0dbdec4abc7a7bf9a15fc1f5c0a6c3bc48415e",
"size": "3287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CSP/CSP_Munchkin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93230"
}
],
"symlink_target": ""
} |
"""
Legendre Series (:mod: `numpy.polynomial.legendre`)
===================================================
.. currentmodule:: numpy.polynomial.polynomial
This module provides a number of objects (mostly functions) useful for
dealing with Legendre series, including a `Legendre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
.. autosummary::
:toctree: generated/
legdomain Legendre series default domain, [-1,1].
legzero Legendre series that evaluates identically to 0.
legone Legendre series that evaluates identically to 1.
legx Legendre series for the identity map, ``f(x) = x``.
Arithmetic
----------
.. autosummary::
:toctree: generated/
legadd add two Legendre series.
legsub subtract one Legendre series from another.
legmulx multiply a Legendre series in ``P_i(x)`` by ``x``.
legmul multiply two Legendre series.
legdiv divide one Legendre series by another.
legpow raise a Legendre series to a positive integer power.
legval evaluate a Legendre series at given points.
legval2d evaluate a 2D Legendre series at given points.
legval3d evaluate a 3D Legendre series at given points.
leggrid2d evaluate a 2D Legendre series on a Cartesian product.
leggrid3d evaluate a 3D Legendre series on a Cartesian product.
Calculus
--------
.. autosummary::
:toctree: generated/
legder differentiate a Legendre series.
legint integrate a Legendre series.
Misc Functions
--------------
.. autosummary::
:toctree: generated/
legfromroots create a Legendre series with specified roots.
legroots find the roots of a Legendre series.
legvander Vandermonde-like matrix for Legendre polynomials.
legvander2d Vandermonde-like matrix for 2D power series.
legvander3d Vandermonde-like matrix for 3D power series.
leggauss Gauss-Legendre quadrature, points and weights.
legweight Legendre weight function.
legcompanion symmetrized companion matrix in Legendre form.
legfit least-squares fit returning a Legendre series.
legtrim trim leading coefficients from a Legendre series.
legline Legendre series representing given straight line.
leg2poly convert a Legendre series to a polynomial.
poly2leg convert a polynomial to a Legendre series.
Classes
-------
Legendre A Legendre series class.
See also
--------
numpy.polynomial.polynomial
numpy.polynomial.chebyshev
numpy.polynomial.laguerre
numpy.polynomial.hermite
numpy.polynomial.hermite_e
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd',
'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder',
'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander',
'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d',
'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion',
'leggauss', 'legweight']
legtrim = pu.trimcoef
def poly2leg(pol):
"""
Convert a polynomial to a Legendre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Legendre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Legendre
series.
See Also
--------
leg2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> c = P.Legendre(P.legendre.poly2leg(p.coef))
>>> c
Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = legadd(legmulx(res), pol[i])
return res
def leg2poly(c):
"""
Convert a Legendre series to a polynomial.
Convert an array representing the coefficients of a Legendre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Legendre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2leg
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Legendre(range(4))
>>> c
Legendre([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([-1. , -3.5, 3. , 7.5], domain=[-1., 1.], window=[-1., 1.])
>>> P.leg2poly(range(4))
array([-1. , -3.5, 3. , 7.5])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Legendre
legdomain = np.array([-1, 1])
# Legendre coefficients representing zero.
legzero = np.array([0])
# Legendre coefficients representing one.
legone = np.array([1])
# Legendre coefficients representing the identity x.
legx = np.array([0, 1])
def legline(off, scl):
"""
Legendre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Legendre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legline(3,2)
array([3, 2])
>>> L.legval(-3, L.legline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def legfromroots(roots):
"""
Generate a Legendre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Legendre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Legendre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, chebfromroots, lagfromroots, hermfromroots, hermefromroots
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.4, 0. , 0.4])
>>> j = complex(0,1)
>>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary
"""
return pu._fromroots(legline, legmul, roots)
def legadd(c1, c2):
"""
Add one Legendre series to another.
Returns the sum of two Legendre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Legendre series of their sum.
See Also
--------
legsub, legmulx, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Legendre series
is a Legendre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legadd(c1,c2)
array([4., 4., 4.])
"""
return pu._add(c1, c2)
def legsub(c1, c2):
"""
Subtract one Legendre series from another.
Returns the difference of two Legendre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their difference.
See Also
--------
legadd, legmulx, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Legendre
series is a Legendre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legsub(c1,c2)
array([-2., 0., 2.])
>>> L.legsub(c2,c1) # -C.legsub(c1,c2)
array([ 2., 0., -2.])
"""
return pu._sub(c1, c2)
def legmulx(c):
"""Multiply a Legendre series by x.
Multiply the Legendre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
See Also
--------
legadd, legmul, legmul, legdiv, legpow
Notes
-----
The multiplication uses the recursion relationship for Legendre
polynomials in the form
.. math::
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> L.legmulx([1,2,3])
array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
j = i + 1
k = i - 1
s = i + j
prd[j] = (c[i]*j)/s
prd[k] += (c[i]*i)/s
return prd
def legmul(c1, c2):
"""
Multiply one Legendre series by another.
Returns the product of two Legendre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their product.
See Also
--------
legadd, legsub, legmulx, legdiv, legpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Legendre polynomial basis set. Thus, to express
the product as a Legendre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2)
>>> L.legmul(c1,c2) # multiplication requires "reprojection"
array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) # may vary
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd)
return legadd(c0, legmulx(c1))
def legdiv(c1, c2):
"""
Divide one Legendre series by another.
Returns the quotient-with-remainder of two Legendre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
quo, rem : ndarrays
Of Legendre series coefficients representing the quotient and
remainder.
See Also
--------
legadd, legsub, legmulx, legmul, legpow
Notes
-----
In general, the (polynomial) division of one Legendre series by another
results in quotient and remainder terms that are not in the Legendre
polynomial basis set. Thus, to express these results as a Legendre
series, it is necessary to "reproject" the results onto the Legendre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legdiv(c1,c2) # quotient "intuitive," remainder not
(array([3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> L.legdiv(c2,c1) # neither "intuitive"
(array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary
"""
return pu._div(legmul, c1, c2)
def legpow(c, pow, maxpower=16):
"""Raise a Legendre series to a power.
Returns the Legendre series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Legendre series of power.
See Also
--------
legadd, legsub, legmulx, legmul, legdiv
Examples
--------
"""
return pu._pow(legmul, c, pow, maxpower)
def legder(c, m=1, scl=1, axis=0):
"""
Differentiate a Legendre series.
Returns the Legendre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Legendre series of the derivative.
See Also
--------
legint
Notes
-----
In general, the result of differentiating a Legendre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3,4)
>>> L.legder(c)
array([ 6., 9., 20.])
>>> L.legder(c, 3)
array([60.])
>>> L.legder(c, scl=-1)
array([ -6., -9., -20.])
>>> L.legder(c, 2,-1)
array([ 9., 60.])
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt = pu._deprecate_as_int(m, "the order of derivation")
iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j - 1)*c[j]
c[j - 2] += c[j]
if n > 1:
der[1] = 3*c[2]
der[0] = c[1]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Legendre series.
Returns the Legendre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Legendre series coefficient array of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
``np.ndim(scl) != 0``.
See Also
--------
legder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3)
>>> L.legint(c)
array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary
>>> L.legint(c, 3)
array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, # may vary
-1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
>>> L.legint(c, k=3)
array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary
>>> L.legint(c, lbnd=-2)
array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary
>>> L.legint(c, scl=2)
array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) # may vary
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt = pu._deprecate_as_int(m, "the order of integration")
iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if np.ndim(lbnd) != 0:
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/3
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = t
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c
def legval(x, c, tensor=True):
"""
Evaluate a Legendre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
legval2d, leggrid2d, legval3d, leggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=False)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*x*(2*nd - 1))/nd
return c0 + c1*x
def legval2d(x, y, c):
"""
Evaluate a 2-D Legendre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Legendre series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
legval, leggrid2d, legval3d, leggrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._valnd(legval, c, x, y)
def leggrid2d(x, y, c):
"""
Evaluate a 2-D Legendre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
legval, legval2d, legval3d, leggrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._gridnd(legval, c, x, y)
def legval3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
legval, legval2d, leggrid2d, leggrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._valnd(legval, c, x, y, z)
def leggrid3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
legval, legval2d, leggrid2d, legval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._gridnd(legval, c, x, y, z)
def legvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Legendre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and
``legval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Legendre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Legendre polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = pu._deprecate_as_int(deg, "deg")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries. This is not as accurate
# as reverse recursion in this application but it is more efficient.
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
return np.moveaxis(v, 0, -1)
def legvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Legendre polynomials.
If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
legvander, legvander3d, legval2d, legval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._vander2d(legvander, x, y, deg)
def legvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Legendre polynomials.
If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
legvander, legvander3d, legval2d, legval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._vander3d(legvander, x, y, z, deg)
def legfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Legendre series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Legendre coefficients ordered from low to high. If `y` was
2-D, the coefficients for the data in column k of `y` are in
column `k`. If `deg` is specified as a list, coefficients for
terms not included in the fit are set equal to zero in the
returned `coef`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
chebfit, polyfit, lagfit, hermfit, hermefit
legval : Evaluates a Legendre series.
legvander : Vandermonde matrix of Legendre series.
legweight : Legendre weight function (= 1).
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Legendre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Legendre series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
return pu._fit(legvander, x, y, deg, rcond, full, w)
def legcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Legendre basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded:: 1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = 1./np.sqrt(2*np.arange(n) + 1)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n]
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1))
return mat
def legroots(c):
"""
Compute the roots of a Legendre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, chebroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such values.
Roots with multiplicity greater than 1 will also show larger errors as
the value of the series near such points is relatively insensitive to
errors in the roots. Isolated roots near the origin can be improved by
a few iterations of Newton's method.
The Legendre series basis polynomials aren't powers of ``x`` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.legendre as leg
>>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots
array([-0.85099543, -0.11407192, 0.51506735]) # may vary
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
# rotated companion matrix reduces error
m = legcompanion(c)[::-1,::-1]
r = la.eigvals(m)
r.sort()
return r
def leggauss(deg):
"""
Gauss-Legendre quadrature.
Computes the sample points and weights for Gauss-Legendre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = pu._deprecate_as_int(deg, "deg")
if ideg <= 0:
raise ValueError("deg must be a positive integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = legcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = legval(x, c)
df = legval(x, legder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = legval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Legendre we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= 2. / w.sum()
return x, w
def legweight(x):
"""
Weight function of the Legendre polynomials.
The weight function is :math:`1` and the interval of integration is
:math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = x*0.0 + 1.0
return w
#
# Legendre series class
#
class Legendre(ABCPolyBase):
"""A Legendre series class.
The Legendre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Legendre coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(legadd)
_sub = staticmethod(legsub)
_mul = staticmethod(legmul)
_div = staticmethod(legdiv)
_pow = staticmethod(legpow)
_val = staticmethod(legval)
_int = staticmethod(legint)
_der = staticmethod(legder)
_fit = staticmethod(legfit)
_line = staticmethod(legline)
_roots = staticmethod(legroots)
_fromroots = staticmethod(legfromroots)
# Virtual properties
nickname = 'leg'
domain = np.array(legdomain)
window = np.array(legdomain)
basis_name = 'P'
| {
"content_hash": "f765edd96a540674d8eb1aa1a991a03e",
"timestamp": "",
"source": "github",
"line_count": 1653,
"max_line_length": 87,
"avg_line_length": 31.732607380520268,
"alnum_prop": 0.6015556487589125,
"repo_name": "MSeifert04/numpy",
"id": "281982d0b66c0cecf14ae6a50c45baa8d24306cc",
"size": "52454",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "numpy/polynomial/legendre.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9050105"
},
{
"name": "C++",
"bytes": "189464"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8251054"
},
{
"name": "Shell",
"bytes": "8345"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
} |
from ctypes import CDLL, py_object
import cv
from os.path import abspath, dirname, join
DN = dirname(abspath(__file__))
def sample(inimage):
out = cv.CreateMat(inimage.height, inimage.width, cv.CV_32FC1)
CDLL(join(DN, '_sample.so')).sample(
py_object(inimage),
py_object(out),
)
return out
if __name__ == '__main__':
im = cv.LoadImage('../test.png')
sample(im)
| {
"content_hash": "24b4e05600a6acc42b88ba426752a0e2",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 66,
"avg_line_length": 21.57894736842105,
"alnum_prop": 0.6121951219512195,
"repo_name": "nwilming/bifl",
"id": "a1cc69826e71708c2de8ac6fb3f01bf61599b010",
"size": "410",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bifl/cpy/sample.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "2730"
},
{
"name": "C++",
"bytes": "3274"
},
{
"name": "Python",
"bytes": "14034"
}
],
"symlink_target": ""
} |
from django.db import models
from six import python_2_unicode_compatible
@python_2_unicode_compatible
class TModelOne(models.Model):
name = models.CharField(max_length=200)
level_one = models.CharField(max_length=20, blank=True, default='one')
def __str__(self):
return self.name
@python_2_unicode_compatible
class TModelTwo(models.Model):
name = models.CharField(max_length=200)
level_two = models.CharField(max_length=20, blank=True, default='two')
parent = models.ForeignKey(
'select2_nestedadmin.TModelOne',
models.CASCADE,
null=True,
blank=True,
related_name='children'
)
def __str__(self):
return self.name
@python_2_unicode_compatible
class TModelThree(models.Model):
name = models.CharField(max_length=200)
parent = models.ForeignKey(
'select2_nestedadmin.TModelTwo',
models.CASCADE,
null=True,
blank=True,
related_name='children'
)
test = models.ForeignKey(
'self',
models.CASCADE,
null=True,
blank=True,
related_name='related_test_models'
)
def __str__(self):
return self.name
| {
"content_hash": "e42a74cdff54ef0a8855471e9deb3fd1",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 74,
"avg_line_length": 22.641509433962263,
"alnum_prop": 0.6325,
"repo_name": "yourlabs/django-autocomplete-light",
"id": "7318ece7cb3eb5f9b9b27f5622ffc2514e6a3120",
"size": "1200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_project/select2_nestedadmin/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11205"
},
{
"name": "HTML",
"bytes": "5709"
},
{
"name": "JavaScript",
"bytes": "27379"
},
{
"name": "Python",
"bytes": "210537"
},
{
"name": "Shell",
"bytes": "1950"
}
],
"symlink_target": ""
} |
import datetime
import errno
from os import getpgid
from os.path import isfile, join as join_paths
from tempfile import gettempdir
from pyramid.settings import asbool
from sqlalchemy.util._collections import lightweight_named_tuple
import venusian
from ines import DOMAIN_NAME, lazy_import_module, NOW, PROCESS_ID, SYSTEM_VERSION
from ines.api import BaseSession, BaseSessionManager
from ines.convert import maybe_list, to_string
from ines.cron import Cron, DATES_RANGES
from ines.exceptions import LockTimeout, NoMoreDates
from ines.interfaces import IBaseSessionManager
from ines.request import make_request
from ines.system import start_system_thread
from ines.utils import sort_with_none
JOBS = set()
RUNNING_JOBS = []
JOBS_REPORT_PATTERN = 'jobs report %s'
JOBS_REPORT_KEY = JOBS_REPORT_PATTERN % DOMAIN_NAME
JOBS_LOCK_KEY = lambda k: 'jobs lock %s' % k
JOBS_IMMEDIATE_KEY = 'jobs immediate run'
FROM_TIMESTAMP = datetime.datetime.fromtimestamp
def to_timestamp(date):
if date:
return int(date.timestamp())
def from_timestamp(timestamp):
if timestamp:
return FROM_TIMESTAMP(timestamp)
class BaseJobsManager(BaseSessionManager):
__api_name__ = 'jobs'
def __init__(self, *args, **kwargs):
super(BaseJobsManager, self).__init__(*args, **kwargs)
self.save_reports = asbool(self.settings.get('save_reports', True))
self.server_domain_name = self.settings.get('server_domain_name')
self.active = bool(
not self.server_domain_name
or self.server_domain_name == DOMAIN_NAME)
self.domain_names = set(self.settings.get('domain_names', ''))
self.domain_names.add(DOMAIN_NAME)
try:
self.transaction = lazy_import_module('transaction')
except ImportError:
self.transaction = None
if self.active:
temporary_dir = gettempdir()
domain_start_file_path = join_paths(temporary_dir, 'jobs domain %s started' % DOMAIN_NAME)
lock_key = 'jobs monitor start check'
self.config.cache.lock(lock_key, timeout=10)
try:
start_thread = not isfile(domain_start_file_path)
if not start_thread:
try:
with open(domain_start_file_path, 'r') as f:
process_id = int(f.read())
except (IOError, ValueError):
start_thread = True
else:
try:
getpgid(process_id)
except OSError as error:
if error.errno is errno.ESRCH:
start_thread = True
else:
raise
if start_thread:
with open(domain_start_file_path, 'w') as f:
f.write(str(PROCESS_ID))
finally:
self.config.cache.unlock(lock_key)
# Start only one Thread for each domain
if start_thread:
start_system_thread('jobs_monitor', self.run_monitor)
print('Running jobs monitor on PID %s' % PROCESS_ID)
def system_session(self, apijob=None):
environ = {
'HTTP_HOST': DOMAIN_NAME,
'PATH_INFO': '/%s' % getattr(apijob, 'name', ''),
'SERVER_NAME': DOMAIN_NAME,
'REMOTE_ADDR': '127.0.0.1',
'wsgi.url_scheme': 'job',
'HTTP_USER_AGENT': SYSTEM_VERSION}
request = make_request(self.config, environ)
return self(request)
def add_job(self, api_name, wrapped, settings):
apijob = APIJob(self, api_name, wrapped.__name__, settings)
JOBS.add(apijob)
def run_job():
return self.register_immediate_job_run(apijob)
wrapped.run_job = run_job
if self.active:
self.update_job_report_info(apijob, called_date=apijob.last_called_date, as_add=True)
def register_immediate_job_run(self, apijob):
self.config.cache.append_value(JOBS_IMMEDIATE_KEY, apijob.name, expire=None)
def immediate_job_run(self, name):
apijob = get_job(name)
if apijob:
return self.register_immediate_job_run(apijob)
def run_monitor(self):
try:
self.validate_daemons()
immediate_jobs = set(
to_string(k)
for k in self.config.cache.get_values(JOBS_IMMEDIATE_KEY, expire=None))
for apijob in list(JOBS):
run_job = False
if apijob.name in immediate_jobs:
run_job = True
immediate_jobs.remove(apijob.name)
elif apijob.will_run() and apijob.next_date <= NOW():
run_job = True
if run_job:
try:
daemon = start_system_thread(
'job_%s' % apijob.name,
apijob,
sleep_method=False)
except KeyError:
pass
else:
RUNNING_JOBS.append((apijob, daemon))
self.config.cache.replace_values(JOBS_IMMEDIATE_KEY, immediate_jobs, expire=None)
except Exception as error:
self.system_session().logging.log_critical('jobs_undefined_error', str(error))
return 5
else:
return 0.5
def update_job_report_info(self, apijob, called_date=None, as_add=False):
if as_add or self.save_reports:
info = self.config.cache.get(JOBS_REPORT_KEY, expire=None) or {}
job_info = info.setdefault(apijob.name, {})
job_info['next'] = to_timestamp(apijob.next_date)
job_info['active'] = apijob.active
if called_date:
job_info.setdefault('called', []).append(to_timestamp(called_date))
if as_add:
job_info['start'] = to_timestamp(NOW())
self.config.cache.put(JOBS_REPORT_KEY, info, expire=None)
def get_active_jobs(self, application_names=None, attributes=None, order_by=None):
jobs = {}
application_names = maybe_list(application_names)
for domain_name in self.domain_names:
domain_info = self.config.cache.get(JOBS_REPORT_PATTERN % domain_name, expire=None)
if not domain_info:
continue
for name, info in domain_info.items():
application_name = get_job_string_application_name(name)
if not application_names or application_name in application_names:
job_info = jobs.get(name)
if not job_info:
jobs[name] = job_info = {}
apijob = get_job(name)
if not apijob:
continue
job_info['key'] = name
job_info['application_name'] = application_name
job_info['description'] = apijob.title
info_next = from_timestamp(info['next'])
if info_next:
added_info_next = job_info.get('next_date')
if not added_info_next or added_info_next > info_next:
job_info['next_date'] = info_next
info_start = from_timestamp(info['start'])
if not job_info.get('start_date') or info_start < job_info['start_date']:
job_info['start_date'] = info_start
called = job_info.setdefault('called', [])
if info.get('called'):
called.extend(from_timestamp(d) for d in info['called'])
called.sort()
job_info['called_length'] = len(called)
if called:
last_date = job_info.get('last_date')
if not last_date:
job_info['last_date'] = called[-1]
elif called[-1] > last_date:
job_info['last_date'] = called[-1]
if not job_info.get('active'):
job_info['active'] = info['active']
# Give SQLAlchemy like response
response = []
attributes = tuple(maybe_list(attributes) or ('application_name', ))
for info in jobs.values():
response.append(
lightweight_named_tuple('result', attributes)
(tuple(info.get(key) for key in attributes)))
if order_by:
column = order_by.column_name
sort_with_none(response, key=column, reverse=order_by.descendant)
else:
sort_with_none(response, key='description')
return response
def validate_daemons(self):
if RUNNING_JOBS:
for apijob, daemon in RUNNING_JOBS:
if not daemon.isAlive():
# Close thread
daemon.join()
try:
# Update report
self.update_job_report_info(apijob, called_date=apijob.last_called_date)
finally:
# Finally, remove daemon reference
RUNNING_JOBS.remove((apijob, daemon))
class BaseJobsSession(BaseSession):
__api_name__ = 'jobs'
def after_job_running(self):
if hasattr(self.api, 'database') and hasattr(self.api.database, 'flush'):
self.api.database.flush()
if self.api_session_manager.transaction:
self.api_session_manager.transaction.commit()
def get_active_jobs(self, *args, **kwargs):
return self.api_session_manager.get_active_jobs(*args, **kwargs)
def immediate_job_run(self, name):
return self.api_session_manager.immediate_job_run(name)
def job(**settings):
def decorator(wrapped):
def callback(context, name, ob):
iob = context.config.registry.queryUtility(IBaseSessionManager, ob.__api_name__)
if iob is not None and issubclass(iob.session, ob):
context.jobs_manager.add_job(ob.__api_name__, wrapped, settings)
venusian.attach(
wrapped,
callback,
category='ines.jobs')
return wrapped
return decorator
class APIJob(object):
def __init__(self, api_session_manager, api_name, wrapped_name, settings):
self.api_session_manager = api_session_manager
self.api_name = api_name
self.wrapped_name = wrapped_name
self.name = (
settings.get('unique_name')
or '%s:%s.%s' % (self.application_name, self.api_name, wrapped_name))
self.active = False
self.next_date = None
self.updating = False
self.last_called_date = None
self.domain_name = settings.get('domain_name', None)
self.title = settings.get('title', None)
cron_settings = {}
for key in DATES_RANGES.keys():
if key in settings:
cron_settings[key] = settings[key]
self.cron = Cron(**cron_settings)
self.enable()
def __eq__(self, other):
return isinstance(other, APIJob) and self.name == other.name
def __hash__(self):
return hash(self.name)
def __repr__(self):
return '%s (%s)' % (self.name, self.next_date)
@property
def application_name(self):
return self.api_session_manager.config.application_name
def disable(self):
if self.active:
self.active = False
self.next_date = None
def enable(self):
if not self.domain_name:
self.active = True
elif self.domain_name == DOMAIN_NAME:
self.active = True
else:
self.active = False
self.next_date = None
if self.active and not self.next_date:
self.find_next()
def find_next(self):
if self.active:
try:
self.next_date = self.cron.find_next(NOW())
except NoMoreDates:
self.next_date = None
else:
self.next_date = None
def will_run(self):
return bool(self.active and not self.updating and self.next_date)
def __call__(self):
if self.will_run():
api_session = self.api_session_manager.system_session(self)
lock_key = JOBS_LOCK_KEY(self.name)
if lock_key not in self.api_session_manager.config.cache.lockme:
try:
self.api_session_manager.config.cache.lock(lock_key, timeout=1)
except LockTimeout:
api_session.logging.log_error('job_locked', 'Job already running.')
else:
self.updating = True
self.last_called_date = NOW()
session = getattr(api_session, self.api_name)
try:
getattr(session, self.wrapped_name)()
except (BaseException, Exception) as error:
api_session.logging.log_critical('jobs_error', str(error))
else:
jobs_session = getattr(api_session, self.api_session_manager.__api_name__)
jobs_session.after_job_running()
finally:
self.updating = False
self.api_session_manager.config.cache.unlock(lock_key)
self.find_next()
def get_job_string_application_name(name):
apijob = get_job(name)
if apijob:
return apijob.application_name
def get_job(name):
for apijob in JOBS:
if apijob.name == name:
return apijob
| {
"content_hash": "77d3aae63ecc5017d5ca377ca5061b8c",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 102,
"avg_line_length": 35.415189873417724,
"alnum_prop": 0.5419257988419472,
"repo_name": "hugobranquinho/ines",
"id": "278ebf22448939070d9adccc139704c3bf4b58e7",
"size": "14014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ines/api/jobs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "435125"
}
],
"symlink_target": ""
} |
from django.shortcuts import get_object_or_404
from django.template import Context, loader as template_loader
from django.conf import settings
from rest_framework import decorators, permissions, viewsets, status
from rest_framework.renderers import JSONPRenderer, JSONRenderer, BrowsableAPIRenderer
from rest_framework.response import Response
from projects.models import Project
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def footer_html(request):
project_slug = request.GET.get('project', None)
version_slug = request.GET.get('version', None)
page_slug = request.GET.get('page', None)
theme = request.GET.get('theme', False)
docroot = request.GET.get('docroot', '')
subproject = request.GET.get('subproject', False)
source_suffix = request.GET.get('source_suffix', '.rst')
new_theme = (theme == "sphinx_rtd_theme")
using_theme = (theme == "default")
project = get_object_or_404(Project, slug=project_slug)
version = project.versions.get(slug=version_slug)
main_project = project.main_language_project or project
context = Context({
'project': project,
'downloads': version.get_downloads(pretty=True),
'current_version': version.slug,
'versions': project.ordered_active_versions(),
'main_project': main_project,
'translations': main_project.translations.all(),
'current_language': project.language,
'using_theme': using_theme,
'new_theme': new_theme,
'settings': settings,
'subproject': subproject,
'github_url': version.get_github_url(docroot, page_slug, source_suffix),
'bitbucket_url': version.get_bitbucket_url(docroot, page_slug, source_suffix),
})
html = template_loader.get_template('restapi/footer.html').render(context)
return Response({
'html': html,
'version_active': version.active,
'version_supported': version.supported,
})
| {
"content_hash": "4910c436e3d8563b3c0f17dfe0d763df",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 86,
"avg_line_length": 41.34,
"alnum_prop": 0.6942428640541848,
"repo_name": "Acidburn0zzz/readthedocs.org",
"id": "4010e71a9f84dfca6fd9daee80b756bbc9f69505",
"size": "2067",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "readthedocs/restapi/views/footer_views.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h=ur!k$#+o(b@)%e-=g08t1@6#+7be^kejzkx&yx+ss1b%dy$j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'searchevents.apps.SearcheventsConfig',
'django.contrib.admin',
'polls.apps.PollsConfig',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "119de3b204cfd6e62c857bc6dd03cec2",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 91,
"avg_line_length": 26.139344262295083,
"alnum_prop": 0.6870492317340859,
"repo_name": "harryyyy/berry",
"id": "c7bf211311936eabe464863b25f6299cf15aac92",
"size": "3189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/backend/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2311"
},
{
"name": "HTML",
"bytes": "7272"
},
{
"name": "JavaScript",
"bytes": "18371"
},
{
"name": "Python",
"bytes": "12312"
}
],
"symlink_target": ""
} |
"""The tests for the device tracker component."""
# pylint: disable=protected-access
import asyncio
import json
import logging
import unittest
from unittest.mock import call, patch
from datetime import datetime, timedelta
import os
from homeassistant.core import callback
from homeassistant.bootstrap import setup_component
from homeassistant.loader import get_component
from homeassistant.util.async import run_coroutine_threadsafe
import homeassistant.util.dt as dt_util
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_ENTITY_PICTURE, ATTR_FRIENDLY_NAME, ATTR_HIDDEN,
STATE_HOME, STATE_NOT_HOME, CONF_PLATFORM)
import homeassistant.components.device_tracker as device_tracker
from homeassistant.exceptions import HomeAssistantError
from homeassistant.remote import JSONEncoder
from tests.common import (
get_test_home_assistant, fire_time_changed, fire_service_discovered,
patch_yaml_files, assert_setup_component)
from ...test_util.aiohttp import mock_aiohttp_client
TEST_PLATFORM = {device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}}
_LOGGER = logging.getLogger(__name__)
class TestComponentsDeviceTracker(unittest.TestCase):
"""Test the Device tracker."""
hass = None # HomeAssistant
yaml_devices = None # type: str
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.yaml_devices = self.hass.config.path(device_tracker.YAML_DEVICES)
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
try:
os.remove(self.yaml_devices)
except FileNotFoundError:
pass
self.hass.stop()
def test_is_on(self):
"""Test is_on method."""
entity_id = device_tracker.ENTITY_ID_FORMAT.format('test')
self.hass.states.set(entity_id, STATE_HOME)
self.assertTrue(device_tracker.is_on(self.hass, entity_id))
self.hass.states.set(entity_id, STATE_NOT_HOME)
self.assertFalse(device_tracker.is_on(self.hass, entity_id))
# pylint: disable=no-self-use
def test_reading_broken_yaml_config(self):
"""Test when known devices contains invalid data."""
files = {'empty.yaml': '',
'nodict.yaml': '100',
'badkey.yaml': '@:\n name: Device',
'noname.yaml': 'my_device:\n',
'allok.yaml': 'My Device:\n name: Device',
'oneok.yaml': ('My Device!:\n name: Device\n'
'bad_device:\n nme: Device')}
args = {'hass': self.hass, 'consider_home': timedelta(seconds=60)}
with patch_yaml_files(files):
assert device_tracker.load_config('empty.yaml', **args) == []
assert device_tracker.load_config('nodict.yaml', **args) == []
assert device_tracker.load_config('noname.yaml', **args) == []
assert device_tracker.load_config('badkey.yaml', **args) == []
res = device_tracker.load_config('allok.yaml', **args)
assert len(res) == 1
assert res[0].name == 'Device'
assert res[0].dev_id == 'my_device'
res = device_tracker.load_config('oneok.yaml', **args)
assert len(res) == 1
assert res[0].name == 'Device'
assert res[0].dev_id == 'my_device'
def test_reading_yaml_config(self):
"""Test the rendering of the YAML configuration."""
dev_id = 'test'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id,
'AB:CD:EF:GH:IJ', 'Test name', picture='http://test.picture',
hide_if_away=True)
device_tracker.update_config(self.yaml_devices, dev_id, device)
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM))
config = device_tracker.load_config(self.yaml_devices, self.hass,
device.consider_home)[0]
self.assertEqual(device.dev_id, config.dev_id)
self.assertEqual(device.track, config.track)
self.assertEqual(device.mac, config.mac)
self.assertEqual(device.config_picture, config.config_picture)
self.assertEqual(device.away_hide, config.away_hide)
self.assertEqual(device.consider_home, config.consider_home)
self.assertEqual(device.vendor, config.vendor)
# pylint: disable=invalid-name
@patch('homeassistant.components.device_tracker._LOGGER.warning')
def test_track_with_duplicate_mac_dev_id(self, mock_warning):
"""Test adding duplicate MACs or device IDs to DeviceTracker."""
devices = [
device_tracker.Device(self.hass, True, True, 'my_device', 'AB:01',
'My device', None, None, False),
device_tracker.Device(self.hass, True, True, 'your_device',
'AB:01', 'Your device', None, None, False)]
device_tracker.DeviceTracker(self.hass, False, True, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert mock_warning.call_count == 1, \
"The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert 'Duplicate device MAC' in args[0], \
'Duplicate MAC warning expected'
mock_warning.reset_mock()
devices = [
device_tracker.Device(self.hass, True, True, 'my_device',
'AB:01', 'My device', None, None, False),
device_tracker.Device(self.hass, True, True, 'my_device',
None, 'Your device', None, None, False)]
device_tracker.DeviceTracker(self.hass, False, True, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert mock_warning.call_count == 1, \
"The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert 'Duplicate device IDs' in args[0], \
'Duplicate device IDs warning expected'
def test_setup_without_yaml_file(self):
"""Test with no YAML file."""
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM))
# pylint: disable=invalid-name
def test_adding_unknown_device_to_config(self):
"""Test the adding of unknown devices to configuration file."""
scanner = get_component('device_tracker.test').SCANNER
scanner.reset()
scanner.come_home('DEV1')
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}}))
# wait for async calls (macvendor) to finish
self.hass.block_till_done()
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert len(config) == 1
assert config[0].dev_id == 'dev1'
assert config[0].track
def test_gravatar(self):
"""Test the Gravatar generation."""
dev_id = 'test'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id,
'AB:CD:EF:GH:IJ', 'Test name', gravatar='test@example.com')
gravatar_url = ("https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar")
self.assertEqual(device.config_picture, gravatar_url)
def test_gravatar_and_picture(self):
"""Test that Gravatar overrides picture."""
dev_id = 'test'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id,
'AB:CD:EF:GH:IJ', 'Test name', picture='http://test.picture',
gravatar='test@example.com')
gravatar_url = ("https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar")
self.assertEqual(device.config_picture, gravatar_url)
def test_mac_vendor_lookup(self):
"""Test if vendor string is lookup on macvendors API."""
mac = 'B8:27:EB:00:00:00'
vendor_string = 'Raspberry Pi Foundation'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, 'test', mac, 'Test name')
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
text=vendor_string)
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
assert aioclient_mock.call_count == 1
self.assertEqual(device.vendor, vendor_string)
def test_mac_vendor_lookup_unknown(self):
"""Prevent another mac vendor lookup if was not found first time."""
mac = 'B8:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, 'test', mac, 'Test name')
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
status=404)
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, 'unknown')
def test_mac_vendor_lookup_error(self):
"""Prevent another lookup if failure during API call."""
mac = 'B8:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, 'test', mac, 'Test name')
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
status=500)
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, 'unknown')
def test_mac_vendor_lookup_exception(self):
"""Prevent another lookup if exception during API call."""
mac = 'B8:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, 'test', mac, 'Test name')
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
exc=asyncio.TimeoutError())
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, 'unknown')
def test_mac_vendor_lookup_on_see(self):
"""Test if macvendor is looked up when device is seen."""
mac = 'B8:27:EB:00:00:00'
vendor_string = 'Raspberry Pi Foundation'
tracker = device_tracker.DeviceTracker(
self.hass, timedelta(seconds=60), 0, [])
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
text=vendor_string)
run_coroutine_threadsafe(
tracker.async_see(mac=mac), self.hass.loop).result()
assert aioclient_mock.call_count == 1, \
'No http request for macvendor made!'
self.assertEqual(tracker.devices['b827eb000000'].vendor, vendor_string)
def test_discovery(self):
"""Test discovery."""
scanner = get_component('device_tracker.test').SCANNER
with patch.dict(device_tracker.DISCOVERY_PLATFORMS, {'test': 'test'}):
with patch.object(scanner, 'scan_devices') as mock_scan:
self.assertTrue(setup_component(
self.hass, device_tracker.DOMAIN, TEST_PLATFORM))
fire_service_discovered(self.hass, 'test', {})
self.assertTrue(mock_scan.called)
def test_update_stale(self):
"""Test stalled update."""
scanner = get_component('device_tracker.test').SCANNER
scanner.reset()
scanner.come_home('DEV1')
register_time = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(2015, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with patch('homeassistant.components.device_tracker.dt_util.utcnow',
return_value=register_time):
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'test',
device_tracker.CONF_CONSIDER_HOME: 59,
}}))
self.assertEqual(STATE_HOME,
self.hass.states.get('device_tracker.dev1').state)
scanner.leave_home('DEV1')
with patch('homeassistant.components.device_tracker.dt_util.utcnow',
return_value=scan_time):
fire_time_changed(self.hass, scan_time)
self.hass.block_till_done()
self.assertEqual(STATE_NOT_HOME,
self.hass.states.get('device_tracker.dev1').state)
def test_entity_attributes(self):
"""Test the entity attributes."""
dev_id = 'test_entity'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
friendly_name = 'Paulus'
picture = 'http://placehold.it/200x200'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id, None,
friendly_name, picture, hide_if_away=True)
device_tracker.update_config(self.yaml_devices, dev_id, device)
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM))
attrs = self.hass.states.get(entity_id).attributes
self.assertEqual(friendly_name, attrs.get(ATTR_FRIENDLY_NAME))
self.assertEqual(picture, attrs.get(ATTR_ENTITY_PICTURE))
def test_device_hidden(self):
"""Test hidden devices."""
dev_id = 'test_entity'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id, None,
hide_if_away=True)
device_tracker.update_config(self.yaml_devices, dev_id, device)
scanner = get_component('device_tracker.test').SCANNER
scanner.reset()
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM))
self.assertTrue(self.hass.states.get(entity_id)
.attributes.get(ATTR_HIDDEN))
def test_group_all_devices(self):
"""Test grouping of devices."""
dev_id = 'test_entity'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id, None,
hide_if_away=True)
device_tracker.update_config(self.yaml_devices, dev_id, device)
scanner = get_component('device_tracker.test').SCANNER
scanner.reset()
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM))
state = self.hass.states.get(device_tracker.ENTITY_ID_ALL_DEVICES)
self.assertIsNotNone(state)
self.assertEqual(STATE_NOT_HOME, state.state)
self.assertSequenceEqual((entity_id,),
state.attributes.get(ATTR_ENTITY_ID))
@patch('homeassistant.components.device_tracker.DeviceTracker.async_see')
def test_see_service(self, mock_see):
"""Test the see service with a unicode dev_id and NO MAC."""
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM))
params = {
'dev_id': 'some_device',
'host_name': 'example.com',
'location_name': 'Work',
'gps': [.3, .8],
'attributes': {
'test': 'test'
}
}
device_tracker.see(self.hass, **params)
self.hass.block_till_done()
assert mock_see.call_count == 1
self.assertEqual(mock_see.call_count, 1)
self.assertEqual(mock_see.call_args, call(**params))
mock_see.reset_mock()
params['dev_id'] += chr(233) # e' acute accent from icloud
device_tracker.see(self.hass, **params)
self.hass.block_till_done()
assert mock_see.call_count == 1
self.assertEqual(mock_see.call_count, 1)
self.assertEqual(mock_see.call_args, call(**params))
def test_new_device_event_fired(self):
"""Test that the device tracker will fire an event."""
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM))
test_events = []
@callback
def listener(event):
"""Helper method that will verify our event got called."""
test_events.append(event)
self.hass.bus.listen("device_tracker_new_device", listener)
device_tracker.see(self.hass, 'mac_1', host_name='hello')
device_tracker.see(self.hass, 'mac_1', host_name='hello')
self.hass.block_till_done()
assert len(test_events) == 1
# Assert we can serialize the event
json.dumps(test_events[0].as_dict(), cls=JSONEncoder)
assert test_events[0].data == {
'entity_id': 'device_tracker.hello',
'host_name': 'hello',
}
# pylint: disable=invalid-name
def test_not_write_duplicate_yaml_keys(self):
"""Test that the device tracker will not generate invalid YAML."""
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM))
device_tracker.see(self.hass, 'mac_1', host_name='hello')
device_tracker.see(self.hass, 'mac_2', host_name='hello')
self.hass.block_till_done()
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert len(config) == 2
# pylint: disable=invalid-name
def test_not_allow_invalid_dev_id(self):
"""Test that the device tracker will not allow invalid dev ids."""
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM))
device_tracker.see(self.hass, dev_id='hello-world')
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert len(config) == 0
def test_see_state(self):
"""Test device tracker see records state correctly."""
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM))
params = {
'mac': 'AA:BB:CC:DD:EE:FF',
'dev_id': 'some_device',
'host_name': 'example.com',
'location_name': 'Work',
'gps': [.3, .8],
'gps_accuracy': 1,
'battery': 100,
'attributes': {
'test': 'test',
'number': 1,
},
}
device_tracker.see(self.hass, **params)
self.hass.block_till_done()
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert len(config) == 1
state = self.hass.states.get('device_tracker.examplecom')
attrs = state.attributes
self.assertEqual(state.state, 'Work')
self.assertEqual(state.object_id, 'examplecom')
self.assertEqual(state.name, 'example.com')
self.assertEqual(attrs['friendly_name'], 'example.com')
self.assertEqual(attrs['battery'], 100)
self.assertEqual(attrs['latitude'], 0.3)
self.assertEqual(attrs['longitude'], 0.8)
self.assertEqual(attrs['test'], 'test')
self.assertEqual(attrs['gps_accuracy'], 1)
self.assertEqual(attrs['number'], 1)
@patch('homeassistant.components.device_tracker._LOGGER.warning')
def test_see_failures(self, mock_warning):
"""Test that the device tracker see failures."""
tracker = device_tracker.DeviceTracker(
self.hass, timedelta(seconds=60), 0, [])
# MAC is not a string (but added)
tracker.see(mac=567, host_name="Number MAC")
# No device id or MAC(not added)
with self.assertRaises(HomeAssistantError):
run_coroutine_threadsafe(
tracker.async_see(), self.hass.loop).result()
assert mock_warning.call_count == 0
# Ignore gps on invalid GPS (both added & warnings)
tracker.see(mac='mac_1_bad_gps', gps=1)
tracker.see(mac='mac_2_bad_gps', gps=[1])
tracker.see(mac='mac_3_bad_gps', gps='gps')
self.hass.block_till_done()
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert mock_warning.call_count == 3
assert len(config) == 4
@patch('homeassistant.components.device_tracker.async_log_exception')
def test_config_failure(self, mock_ex):
"""Test that the device tracker see failures."""
with assert_setup_component(0, device_tracker.DOMAIN):
setup_component(self.hass, device_tracker.DOMAIN,
{device_tracker.DOMAIN: {
device_tracker.CONF_CONSIDER_HOME: -1}})
| {
"content_hash": "da7b3b5af3f60718d14ffea03c1457a4",
"timestamp": "",
"source": "github",
"line_count": 532,
"max_line_length": 79,
"avg_line_length": 41.037593984962406,
"alnum_prop": 0.5880359105899597,
"repo_name": "dmeulen/home-assistant",
"id": "c3087b108e9a816612b3769d54f6ee9c2985f87c",
"size": "21832",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/device_tracker/test_init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1435271"
},
{
"name": "Python",
"bytes": "4390736"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "4473"
}
],
"symlink_target": ""
} |
from unittest import mock
import pytest
from openapi_core.testing.datatypes import ResultMock
from openapi_core.validation.shortcuts import validate_response
class TestSpecValidateData:
@mock.patch(
"openapi_core.validation.shortcuts.openapi_response_validator.validate"
)
def test_validator_valid(self, mock_validate):
spec = mock.sentinel.spec
request = mock.sentinel.request
response = mock.sentinel.response
data = mock.sentinel.data
validation_result = ResultMock(data=data)
mock_validate.return_value = validation_result
result = validate_response(spec, request, response)
assert result == validation_result
mock_validate.aasert_called_once_with(request, response)
@mock.patch(
"openapi_core.validation.shortcuts.openapi_response_validator.validate"
)
def test_validator_error(self, mock_validate):
spec = mock.sentinel.spec
request = mock.sentinel.request
response = mock.sentinel.response
mock_validate.return_value = ResultMock(error_to_raise=ValueError)
with pytest.raises(ValueError):
validate_response(spec, request, response)
mock_validate.aasert_called_once_with(request, response)
| {
"content_hash": "924f7e57b789932891f1d64b178d9aae",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 79,
"avg_line_length": 33.60526315789474,
"alnum_prop": 0.7032106499608457,
"repo_name": "p1c2u/openapi-core",
"id": "fbdcfeec7523d7f2bac31eebabee91fa6a991482",
"size": "1277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/validation/test_response_shortcuts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "663"
},
{
"name": "Python",
"bytes": "408314"
}
],
"symlink_target": ""
} |
import threading, time, requests, json
from raspberric import get_yesterday_consumption, get_informations, get_date_now
POLLING = False
SERVER = "http://devgone.herokuapp.com"
#SERVER = "http://localhost:3000"
def polling(timeInterval, callback, raspberricIds):
i = 0
while True:
callback(i, raspberricIds)
time.sleep(timeInterval)
i+=1
def idling(timeInterval):
i = 0
while True:
ping(i)
time.sleep(timeInterval)
i+=1
def fetchRaspberricData(measureId, raspberricIds):
data = []
print 'Fetching data from raspberrics...'
for raspberricId in raspberricIds :
info = json.loads(get_informations(raspberricId))
measure = json.loads(get_yesterday_consumption(raspberricId))
jsonResult = parseResults(raspberricId, info, measure)
data.append(jsonResult)
print 'Data fetched'
return json.dumps(data)
def sendData(data, url=SERVER+"/measures/"):
headers = {'content-type': 'application/json'}
req = requests.post(url, data=data, headers=headers)
return req.text
def repeatTask(measureId, raspberricIds):
json = fetchRaspberricData(measureId, raspberricIds)
sendData(json)
print 'Data sent'
def startPollingRaspberric(timeInterval, raspberricIds):
if "polling" not in startPollingRaspberric.__dict__:
startPollingRaspberric.polling = True
print 'Start polling'
thread = threading.Thread(target=polling, name='Polling', kwargs={'timeInterval': timeInterval, 'callback': repeatTask, 'raspberricIds':raspberricIds})
thread.daemon = True
thread.start()
def ping(pingId, url=SERVER):
req = requests.get(url)
print 'Pinged Heroku: ' + str(req.status_code)
def startHerokuIdlingPrevention():
if "polling" not in startHerokuIdlingPrevention.__dict__:
startHerokuIdlingPrevention.polling = True
print 'Start idling prevention'
thread = threading.Thread(target=idling, name='Idling', kwargs={'timeInterval': 60*10})
thread.daemon = True
thread.start()
def parseResults(raspberricId, info, measure):
begin_date = info['data'][0]['begin']
price_option = info['data'][0]['price-option']['slug']
# Get delta of the total consumption
measure_lenght = len(measure['data'])
consumption = measure['data'][0]['value'] - measure['data'][measure_lenght-1]['value']
result = {}
result['raspberricId'] = raspberricId
result['begin_date'] = begin_date
result['price_option'] = price_option
result['raising_date'] = get_date_now()
result['delta_consumption'] = consumption
result['measure'] = measure['data']
return result # json.dumps(result) | {
"content_hash": "021dd2c6e775ae9326ee810ed4735a15",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 153,
"avg_line_length": 31.2,
"alnum_prop": 0.7387820512820513,
"repo_name": "DevGone/flask",
"id": "ddb5b282fbbfa25cd038324932080f03432ca78c",
"size": "2496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/distant.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "178"
},
{
"name": "Python",
"bytes": "8681"
}
],
"symlink_target": ""
} |
"""
The Desired Capabilities implementation.
"""
class DesiredCapabilities(object):
"""
Set of default supported desired capabilities.
Use this as a starting point for creating a desired capabilities object for
requesting remote webdrivers for connecting to selenium server or selenium grid.
Usage Example::
from selenium import webdriver
selenium_grid_url = "http://198.0.0.1:4444/wd/hub"
# Create a desired capabilities object as a starting point.
capabilities = DesiredCapabilities.FIREFOX.copy()
capabilities['platform'] = "WINDOWS"
capabilities['version'] = "10"
# Instantiate an instance of Remote WebDriver with the desired capabilities.
driver = webdriver.Remote(desired_capabilities=capabilities,
command_executor=selenium_grid_url)
Note: Always use '.copy()' on the DesiredCapabilities object to avoid the side
effects of altering the Global class instance.
"""
FIREFOX = {
"browserName": "firefox",
"version": "",
"platform": "ANY",
"javascriptEnabled": True,
"marionette": True,
}
INTERNETEXPLORER = {
"browserName": "internet explorer",
"version": "",
"platform": "WINDOWS",
"javascriptEnabled": True,
}
EDGE = {
"browserName": "MicrosoftEdge",
"version": "",
"platform": "WINDOWS"
}
CHROME = {
"browserName": "chrome",
"version": "",
"platform": "ANY",
"javascriptEnabled": True,
}
OPERA = {
"browserName": "opera",
"version": "",
"platform": "ANY",
"javascriptEnabled": True,
}
SAFARI = {
"browserName": "safari",
"version": "",
"platform": "MAC",
"javascriptEnabled": True,
}
HTMLUNIT = {
"browserName": "htmlunit",
"version": "",
"platform": "ANY",
}
HTMLUNITWITHJS = {
"browserName": "htmlunit",
"version": "firefox",
"platform": "ANY",
"javascriptEnabled": True,
}
IPHONE = {
"browserName": "iPhone",
"version": "",
"platform": "MAC",
"javascriptEnabled": True,
}
IPAD = {
"browserName": "iPad",
"version": "",
"platform": "MAC",
"javascriptEnabled": True,
}
ANDROID = {
"browserName": "android",
"version": "",
"platform": "ANDROID",
"javascriptEnabled": True,
}
PHANTOMJS = {
"browserName": "phantomjs",
"version": "",
"platform": "ANY",
"javascriptEnabled": True,
}
| {
"content_hash": "f716afdf13cd1630cb877df6c16ff77c",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 84,
"avg_line_length": 23.789473684210527,
"alnum_prop": 0.543141592920354,
"repo_name": "ASOdesk/selenium-pytest-fix",
"id": "a75e4f3e82c618f57a9bb229f12b6c66e0cb81f4",
"size": "3500",
"binary": false,
"copies": "6",
"ref": "refs/heads/pytest-fix",
"path": "py/selenium/webdriver/common/desired_capabilities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "825"
},
{
"name": "Batchfile",
"bytes": "307"
},
{
"name": "C#",
"bytes": "1101"
},
{
"name": "CSS",
"bytes": "22409"
},
{
"name": "HTML",
"bytes": "1403376"
},
{
"name": "JavaScript",
"bytes": "4091259"
},
{
"name": "Makefile",
"bytes": "4655"
},
{
"name": "Python",
"bytes": "647601"
},
{
"name": "Ragel",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "187714"
},
{
"name": "Shell",
"bytes": "3456"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
} |
"""Add points to TestableResult.
Revision ID: 305b60854a99
Revises: 4d7019b218d4
Create Date: 2013-10-27 22:07:10.819122
"""
# revision identifiers, used by Alembic.
revision = '305b60854a99'
down_revision = '1cf84ddb034c'
from alembic import op
from collections import defaultdict
from sqlalchemy.sql import table, column
import sqlalchemy as sa
make_status = sa.Enum(u'make_failed', u'nonexistent_executable', u'success',
name=u'make_status')
testableresult = table('testableresult',
column('id', sa.Integer),
column('points', sa.Integer),
column('status', make_status),
column('submission_id', sa.Integer),
column('testable_id', sa.Integer))
testcase = table('testcase',
column('id', sa.Integer),
column('points', sa.Integer),
column('testable_id', sa.Integer))
testcaseresult = table('testcaseresult',
column('diff_id', sa.Integer),
column('status',
sa.Enum('nonexistent_executable',
'output_limit_exceeded',
'signal', 'success', 'timed_out',
name='status')),
column('submission_id', sa.Integer),
column('test_case_id', sa.Integer))
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('submission', u'points_possible')
op.drop_column('submission', u'points')
op.add_column('testableresult', sa.Column('points', sa.Integer(),
nullable=True))
conn = op.get_bind()
# Fetch the points for each test case
tc_points = {}
tc_to_t = {}
for (tc_id, points, t_id) in conn.execute(testcase.select()):
tc_points[tc_id] = points
tc_to_t[tc_id] = t_id
# Fetch all the testableresults
tr_scores = defaultdict(int)
s_to_tr = defaultdict(dict)
for (tr_id, _, status, s_id, t_id) in \
conn.execute(testableresult.select()):
if status != 'success':
tr_scores[tr_id] = 0
else:
s_to_tr[s_id][t_id] = tr_id
# update points for the testableresult associated with each testcaseresult
for (diff, status, s_id, tc_id) in conn.execute(testcaseresult.select()):
points = 0
# Assumes diff is None when it matches
if status == 'success' and not diff:
points = tc_points[tc_id]
tr_scores[s_to_tr[s_id][tc_to_t[tc_id]]] += points
scores = [{'tr_id': x, 'points': y} for (x, y) in tr_scores.items()]
conn.execute(testableresult.update()
.where(testableresult.c.id == sa.bindparam('tr_id'))
.values(points=sa.bindparam('points')), scores)
op.alter_column('testableresult', u'points', existing_type=sa.Integer,
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('testableresult', 'points')
op.add_column('submission', sa.Column(u'points', sa.INTEGER(),
server_default='0', nullable=False))
op.add_column('submission', sa.Column(u'points_possible', sa.INTEGER(),
server_default='0', nullable=False))
### end Alembic commands ###
| {
"content_hash": "620130aec921da5167425053444ab94d",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 78,
"avg_line_length": 39.44444444444444,
"alnum_prop": 0.5512676056338028,
"repo_name": "ucsb-cs/submit",
"id": "667fddafde19f9d7202a6219244fb7cb8b5f1697",
"size": "3550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "submit/migrations/versions/305b60854a99_add_points_to_testab.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2234"
},
{
"name": "HTML",
"bytes": "995"
},
{
"name": "JavaScript",
"bytes": "36963"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "284298"
},
{
"name": "Shell",
"bytes": "390"
}
],
"symlink_target": ""
} |
from typing import Optional, List
from thinc.types import Floats2d
from thinc.api import Model, reduce_mean, Linear, list2ragged, Logistic
from thinc.api import chain, concatenate, clone, Dropout, ParametricAttention
from thinc.api import SparseLinear, Softmax, softmax_activation, Maxout, reduce_sum
from thinc.api import with_cpu, Relu, residual, LayerNorm
from thinc.layers.chain import init as init_chain
from ...attrs import ORTH
from ...util import registry
from ..extract_ngrams import extract_ngrams
from ..staticvectors import StaticVectors
from ...tokens import Doc
from .tok2vec import get_tok2vec_width
@registry.architectures.register("spacy.TextCatCNN.v1")
def build_simple_cnn_text_classifier(
tok2vec: Model, exclusive_classes: bool, nO: Optional[int] = None
) -> Model[List[Doc], Floats2d]:
"""
Build a simple CNN text classifier, given a token-to-vector model as inputs.
If exclusive_classes=True, a softmax non-linearity is applied, so that the
outputs sum to 1. If exclusive_classes=False, a logistic non-linearity
is applied instead, so that outputs are in the range [0, 1].
"""
with Model.define_operators({">>": chain}):
cnn = tok2vec >> list2ragged() >> reduce_mean()
if exclusive_classes:
output_layer = Softmax(nO=nO, nI=tok2vec.maybe_get_dim("nO"))
model = cnn >> output_layer
model.set_ref("output_layer", output_layer)
else:
linear_layer = Linear(nO=nO, nI=tok2vec.maybe_get_dim("nO"))
model = cnn >> linear_layer >> Logistic()
model.set_ref("output_layer", linear_layer)
model.set_ref("tok2vec", tok2vec)
model.set_dim("nO", nO)
model.attrs["multi_label"] = not exclusive_classes
return model
@registry.architectures.register("spacy.TextCatBOW.v1")
def build_bow_text_classifier(
exclusive_classes: bool,
ngram_size: int,
no_output_layer: bool,
nO: Optional[int] = None,
) -> Model[List[Doc], Floats2d]:
with Model.define_operators({">>": chain}):
sparse_linear = SparseLinear(nO)
model = extract_ngrams(ngram_size, attr=ORTH) >> sparse_linear
model = with_cpu(model, model.ops)
if not no_output_layer:
output_layer = softmax_activation() if exclusive_classes else Logistic()
model = model >> with_cpu(output_layer, output_layer.ops)
model.set_ref("output_layer", sparse_linear)
model.attrs["multi_label"] = not exclusive_classes
return model
@registry.architectures.register("spacy.TextCatEnsemble.v2")
def build_text_classifier_v2(
tok2vec: Model[List[Doc], List[Floats2d]],
linear_model: Model[List[Doc], Floats2d],
nO: Optional[int] = None,
) -> Model[List[Doc], Floats2d]:
exclusive_classes = not linear_model.attrs["multi_label"]
with Model.define_operators({">>": chain, "|": concatenate}):
width = tok2vec.maybe_get_dim("nO")
attention_layer = ParametricAttention(
width
) # TODO: benchmark performance difference of this layer
maxout_layer = Maxout(nO=width, nI=width)
norm_layer = LayerNorm(nI=width)
cnn_model = (
tok2vec
>> list2ragged()
>> attention_layer
>> reduce_sum()
>> residual(maxout_layer >> norm_layer >> Dropout(0.0))
)
nO_double = nO * 2 if nO else None
if exclusive_classes:
output_layer = Softmax(nO=nO, nI=nO_double)
else:
output_layer = Linear(nO=nO, nI=nO_double) >> Logistic()
model = (linear_model | cnn_model) >> output_layer
model.set_ref("tok2vec", tok2vec)
if model.has_dim("nO") is not False:
model.set_dim("nO", nO)
model.set_ref("output_layer", linear_model.get_ref("output_layer"))
model.set_ref("attention_layer", attention_layer)
model.set_ref("maxout_layer", maxout_layer)
model.set_ref("norm_layer", norm_layer)
model.attrs["multi_label"] = not exclusive_classes
model.init = init_ensemble_textcat
return model
def init_ensemble_textcat(model, X, Y) -> Model:
tok2vec_width = get_tok2vec_width(model)
model.get_ref("attention_layer").set_dim("nO", tok2vec_width)
model.get_ref("maxout_layer").set_dim("nO", tok2vec_width)
model.get_ref("maxout_layer").set_dim("nI", tok2vec_width)
model.get_ref("norm_layer").set_dim("nI", tok2vec_width)
model.get_ref("norm_layer").set_dim("nO", tok2vec_width)
init_chain(model, X, Y)
return model
@registry.architectures.register("spacy.TextCatLowData.v1")
def build_text_classifier_lowdata(
width: int, dropout: Optional[float], nO: Optional[int] = None
) -> Model[List[Doc], Floats2d]:
# Don't document this yet, I'm not sure it's right.
# Note, before v.3, this was the default if setting "low_data" and "pretrained_dims"
with Model.define_operators({">>": chain, "**": clone}):
model = (
StaticVectors(width)
>> list2ragged()
>> ParametricAttention(width)
>> reduce_sum()
>> residual(Relu(width, width)) ** 2
>> Linear(nO, width)
)
if dropout:
model = model >> Dropout(dropout)
model = model >> Logistic()
return model
| {
"content_hash": "927fb3621c884be28ef3b0e508d481a4",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 88,
"avg_line_length": 39.83458646616541,
"alnum_prop": 0.6457153642884107,
"repo_name": "spacy-io/spaCy",
"id": "0234530e65dd39bd15305da9c754f1bcfc3e55c0",
"size": "5298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spacy/ml/models/textcat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "567025"
},
{
"name": "C++",
"bytes": "12785"
},
{
"name": "CSS",
"bytes": "57480"
},
{
"name": "Groff",
"bytes": "188349"
},
{
"name": "HTML",
"bytes": "582292"
},
{
"name": "JavaScript",
"bytes": "54065"
},
{
"name": "M4",
"bytes": "11398"
},
{
"name": "Makefile",
"bytes": "256492"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PostScript",
"bytes": "460967"
},
{
"name": "Python",
"bytes": "682585"
},
{
"name": "Shell",
"bytes": "95525"
}
],
"symlink_target": ""
} |
"""'dns changes list' command."""
from apiclient import errors
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.dns.lib import util
class List(base.Command):
"""List Cloud DNS resource record set changes."""
DEFAULT_MAX_RESULTS = 0
DEFAULT_PAGE_SIZE = 1000
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'--sort_order', required=False, help='Sort order for listing. '
'Valid values are [ascending, descending]. Default: "descending"')
parser.add_argument(
'--max_results', required=False, help='If greater than zero, limit the '
'number of changes returned to <max_results>. '
'Default: %d' % List.DEFAULT_MAX_RESULTS)
def Run(self, args):
"""Run 'dns changes list'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A list object representing the changes resource(s) obtained by the list
operation if the list was successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
dns = self.context['dns']
project = properties.VALUES.core.project.Get(required=True)
sort_order = args.sort_order
if sort_order is None:
sort_order = 'descending'
max_results = List.DEFAULT_MAX_RESULTS
if args.max_results is not None:
max_results = int(args.max_results)
if max_results > 0:
page_size = min(max_results, List.DEFAULT_PAGE_SIZE)
else:
page_size = List.DEFAULT_PAGE_SIZE
request = dns.changes().list(project=project,
managedZone=args.zone,
maxResults=page_size,
sortOrder=sort_order)
try:
result_list = []
result = request.execute()
result_list.extend(result['changes'])
while ((max_results <= 0 or len(result_list) < max_results) and
'nextPageToken' in result and
result['nextPageToken'] is not None):
if max_results > 0:
page_size = min(
max_results - len(result_list), List.DEFAULT_PAGE_SIZE)
request = dns.changes().list(project=project,
managedZone=args.zone,
maxResults=page_size,
pageToken=result['nextPageToken'],
sortOrder=sort_order)
result = request.execute()
result_list.extend(result['changes'])
return result_list
except errors.HttpError as error:
raise exceptions.HttpException(util.GetError(error))
except errors.Error as error:
raise exceptions.ToolException(error)
def Display(self, unused_args, result):
"""Display prints information about what just happened to stdout.
Args:
unused_args: The same as the args in Run.
result: The results of the Run() method.
"""
util.PrettyPrint(result)
| {
"content_hash": "f1139bce4b4e4bb3c52c714e975a4590",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 80,
"avg_line_length": 35.65979381443299,
"alnum_prop": 0.6227233304423244,
"repo_name": "ychen820/microblog",
"id": "fc6f8ae84e57cb467bec888fad1bbd398fb1ba81",
"size": "3510",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/dns/dnstools/changes/list.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
from random import Random
import numpy as np
import scipy.sparse as sp
from nose.tools import assert_equal
from nose.tools import assert_true
from nose.tools import assert_false
from numpy.testing import assert_array_equal
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
v = DictVectorizer(sparse=sparse, dtype=dtype)
X = v.fit_transform(D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(D).A)
else:
assert_array_equal(X, v.transform(D))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in xrange(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in xrange(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_features():
D = [{"camelot": 0, "spamalot": 1}]
v = DictVectorizer(sparse=False).fit(D)
X = v.transform({"push the pram a lot": 2})
assert_array_equal(X, np.zeros((1, 2)))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| {
"content_hash": "63f5ee68a25ac452a92dc80f86405142",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 76,
"avg_line_length": 31.426966292134832,
"alnum_prop": 0.5974258133714694,
"repo_name": "mrshu/scikit-learn",
"id": "ea31f723c36e5d5464185c731b1fb1f0acbd945f",
"size": "2866",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/feature_extraction/tests/test_dict_vectorizer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "10173894"
},
{
"name": "C++",
"bytes": "435332"
},
{
"name": "JavaScript",
"bytes": "4775"
},
{
"name": "Python",
"bytes": "3532352"
},
{
"name": "Shell",
"bytes": "687"
}
],
"symlink_target": ""
} |
import math
import numpy
class ElasticNet:
def __init__(self, cities, param):
self._cities = cities
self._param = param
self._num_iter = 0
self._k = self._param['init_k']
self._num_neurons = int(self._param['num_neurons_factor'] \
* self._cities.shape[0])
self._init_neurons()
def iteration(self):
"""
Perform one iteration of the algorithm.
Return True if the algorithm has finished, False otherwise.
"""
self._num_iter += 1
self._update_k()
self._update_weights()
self._update_neurons()
return not self._stop_criteria()
def _get_dist2(self):
return self._dist2
dist2 = property(fget=_get_dist2)
def _get_neurons(self):
return self._neurons
neurons = property(fget=_get_neurons)
def _get_num_iter(self):
return self._num_iter
num_iter = property(fget=_get_num_iter)
def _get_worst_dist(self):
return self._worst_dist
worst_dist = property(fget=_get_worst_dist)
def _dist_force(self):
"""
Compute the force that minimize the distance between the
cities and the neurons.
"""
return numpy.array(
[numpy.dot(self._weights[:,i],
self._delta[:,i]) for i in range(self._num_neurons)])
def _init_neurons(self):
"""
Initialize the neurons in a circle at the center of the
cities.
"""
theta = numpy.linspace(0, 2 * math.pi, self._num_neurons, False)
centroid = self._cities.mean(axis=0)
self._neurons = numpy.vstack((numpy.cos(theta), numpy.sin(theta)))
self._neurons *= self._param['radius']
self._neurons += centroid[:,numpy.newaxis]
self._neurons = self._neurons.transpose()
def _length_force(self):
"""Compute the force that minimize the length of the elastic."""
return numpy.concatenate((
[self._neurons[1] - 2 * self._neurons[0]
+ self._neurons[self._num_neurons - 1]],
[(self._neurons[i+1]
- 2 * self._neurons[i]
+ self._neurons[i-1])
for i in range(1, self._num_neurons - 1)],
[self._neurons[0]
- 2 * self._neurons[self._num_neurons - 1]
+ self._neurons[self._num_neurons - 2]]))
def _stop_criteria(self):
"""Return True if the algorithm has finished, False otherwise."""
return self._worst_dist < self._param['epsilon'] \
or self._num_iter >= self._param['max_num_iter']
def _update_k(self):
if (self._num_iter % self._param['k_update_period']) == 0:
self._k = max(0.01, self._param['k_alpha'] * self._k)
def _update_neurons(self):
dist_force = self._dist_force()
length_force = self._length_force()
self._neurons += self._param['alpha'] * dist_force \
+ self._param['beta'] * self._k * length_force
def _update_weights(self):
"""Compute w_ij, i = 1, 2, ..., |Cities|; j = 1, 2, ...., |Neurons|"""
self._delta = self._cities[:,numpy.newaxis] - self._neurons
# At this point
# self._delta[i,j] == (delta_x, delta_y) between city i and neuron j
self._dist2 = (self._delta ** 2).sum(axis=2)
# At this point
# self._dist2[i,j] == square of the distance between city i and neuron j
self._worst_dist = numpy.sqrt(self._dist2.min(axis=1).max())
self._weights = numpy.exp(-self._dist2 / (2 * (self._k ** 2)))
# At this point
# self._weights[i,j] == unnormalized weight associated to city
# i and neuron j
self._weights /= self._weights.sum(axis=1)[:,numpy.newaxis]
# At this point
# self._weights[i,j] == normalized weight associated to city i
# and neuron j
| {
"content_hash": "b429ec82e7b1795f515832f55ab093ae",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 80,
"avg_line_length": 31.045801526717558,
"alnum_prop": 0.5320875338087042,
"repo_name": "larose/ena",
"id": "6d66cf44f374803d81b1e7ecb065dcdfca69c1db",
"size": "4067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elastic_net.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "14392"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.http import HttpRequest, HttpResponse, HttpResponseBadRequest
from django.template import Template, Context
from django.template.loader import render_to_string
from django.conf import settings
from honeypot.middleware import HoneypotViewMiddleware, HoneypotResponseMiddleware
from honeypot.decorators import verify_honeypot_value, check_honeypot
def _get_GET_request():
return HttpRequest()
def _get_POST_request():
req = HttpRequest()
req.method = "POST"
return req
def view_func(request):
return HttpResponse()
class HoneypotTestCase(TestCase):
def setUp(self):
# Django ticket #10130 is closed, but it should work even on 1.0
if hasattr(settings, 'HONEYPOT_VALUE'):
delattr(settings._wrapped, 'HONEYPOT_VALUE')
if hasattr(settings, 'HONEYPOT_VERIFIER'):
delattr(settings._wrapped, 'HONEYPOT_VERIFIER')
if hasattr(settings, 'HONEYPOT_FIELD_IS_OPTIONAL'):
delattr(settings._wrapped, 'HONEYPOT_FIELD_IS_OPTIONAL')
settings.HONEYPOT_FIELD_NAME = 'honeypot'
class VerifyHoneypotValue(HoneypotTestCase):
def test_no_call_on_get(self):
""" test that verify_honeypot_value is not called when request.method == GET """
request = _get_GET_request()
resp = verify_honeypot_value(request, None)
self.assertEquals(resp, None)
def test_verifier_false(self):
""" test that verify_honeypot_value fails when HONEYPOT_VERIFIER returns False """
request = _get_POST_request()
request.POST[settings.HONEYPOT_FIELD_NAME] = ''
settings.HONEYPOT_VERIFIER = lambda x: False
resp = verify_honeypot_value(request, None)
self.assertEquals(resp.__class__, HttpResponseBadRequest)
def test_field_missing(self):
""" test that verify_honeypot_value succeeds when HONEYPOT_FIELD_NAME is missing from request.POST """
request = _get_POST_request()
resp = verify_honeypot_value(request, None)
self.assertEquals(resp.__class__, HttpResponseBadRequest)
def test_field_blank(self):
""" test that verify_honeypot_value succeeds when HONEYPOT_VALUE is blank """
request = _get_POST_request()
request.POST[settings.HONEYPOT_FIELD_NAME] = ''
resp = verify_honeypot_value(request, None)
self.assertEquals(resp, None)
def test_honeypot_value_string(self):
""" test that verify_honeypot_value succeeds when HONEYPOT_VALUE is a string """
request = _get_POST_request()
settings.HONEYPOT_VALUE = '(test string)'
request.POST[settings.HONEYPOT_FIELD_NAME] = settings.HONEYPOT_VALUE
resp = verify_honeypot_value(request, None)
self.assertEquals(resp, None)
def test_honeypot_value_callable(self):
""" test that verify_honeypot_value succeeds when HONEYPOT_VALUE is a callable """
request = _get_POST_request()
settings.HONEYPOT_VALUE = lambda: '(test string)'
request.POST[settings.HONEYPOT_FIELD_NAME] = settings.HONEYPOT_VALUE()
resp = verify_honeypot_value(request, None)
self.assertEquals(resp, None)
class CheckHoneypotDecorator(HoneypotTestCase):
def test_default_decorator(self):
""" test that @check_honeypot works and defaults to HONEYPOT_FIELD_NAME """
new_view_func = check_honeypot(view_func)
request = _get_POST_request()
resp = new_view_func(request)
self.assertEquals(resp.__class__, HttpResponseBadRequest)
def test_decorator_argument(self):
""" test that check_honeypot(view, 'fieldname') works """
new_view_func = check_honeypot(view_func, 'fieldname')
request = _get_POST_request()
resp = new_view_func(request)
self.assertEquals(resp.__class__, HttpResponseBadRequest)
def test_decorator_py24_syntax(self):
""" test that @check_honeypot syntax works """
@check_honeypot('field')
def new_view_func(request):
return HttpResponse()
request = _get_POST_request()
resp = new_view_func(request)
self.assertEquals(resp.__class__, HttpResponseBadRequest)
class RenderHoneypotField(HoneypotTestCase):
def _assert_rendered_field(self, template, fieldname, value=''):
correct = render_to_string('honeypot/honeypot_field.html',
{'fieldname':fieldname, 'value': value})
rendered = template.render(Context())
self.assertEquals(rendered, correct)
def test_default_templatetag(self):
""" test that {% render_honeypot_field %} works and defaults to HONEYPOT_FIELD_NAME """
template = Template('{% load honeypot %}{% render_honeypot_field %}')
self._assert_rendered_field(template, settings.HONEYPOT_FIELD_NAME, '')
def test_templatetag_honeypot_value(self):
""" test that {% render_honeypot_field %} uses settings.HONEYPOT_VALUE """
template = Template('{% load honeypot %}{% render_honeypot_field %}')
settings.HONEYPOT_VALUE = '(leave blank)'
self._assert_rendered_field(template, settings.HONEYPOT_FIELD_NAME, settings.HONEYPOT_VALUE)
def test_templatetag_argument(self):
""" test that {% render_honeypot_field 'fieldname' %} works """
template = Template('{% load honeypot %}{% render_honeypot_field "fieldname" %}')
self._assert_rendered_field(template, 'fieldname', '')
class HoneypotMiddleware(HoneypotTestCase):
_response_body = '<form method="POST"></form>'
def test_view_middleware_invalid(self):
""" don't call view when HONEYPOT_VERIFIER returns False """
request = _get_POST_request()
retval = HoneypotViewMiddleware().process_view(request, view_func, (), {})
self.assertEquals(retval.__class__, HttpResponseBadRequest)
def test_view_middleware_valid(self):
""" call view when HONEYPOT_VERIFIER returns True """
request = _get_POST_request()
request.POST[settings.HONEYPOT_FIELD_NAME] = ''
retval = HoneypotViewMiddleware().process_view(request, view_func, (), {})
self.assertEquals(retval, None)
def test_response_middleware_rewrite(self):
""" ensure POST forms are rewritten """
request = _get_POST_request()
request.POST[settings.HONEYPOT_FIELD_NAME] = ''
response = HttpResponse(self._response_body)
HoneypotResponseMiddleware().process_response(request, response)
self.assertNotEqual(response.content, self._response_body)
self.assertContains(response, 'name="%s"' % settings.HONEYPOT_FIELD_NAME)
def test_response_middleware_contenttype_exclusion(self):
""" ensure POST forms are not rewritten for non-html content types """
request = _get_POST_request()
request.POST[settings.HONEYPOT_FIELD_NAME] = ''
response = HttpResponse(self._response_body, content_type='text/javascript')
HoneypotResponseMiddleware().process_response(request, response)
self.assertEquals(response.content, self._response_body)
def test_response_middleware_unicode(self):
""" ensure that POST form rewriting works with unicode templates """
request = _get_GET_request()
unicode_body = u'\u2603'+self._response_body # add unicode snowman
response = HttpResponse(unicode_body)
HoneypotResponseMiddleware().process_response(request, response)
self.assertNotEqual(response.content, unicode_body)
self.assertContains(response, 'name="%s"' % settings.HONEYPOT_FIELD_NAME)
| {
"content_hash": "a3303c322698f78d31fe031addb68f5d",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 110,
"avg_line_length": 45.68862275449102,
"alnum_prop": 0.672346002621232,
"repo_name": "mikek/django-honeypot",
"id": "c8b81410f909f617168c05c9d76d996bf06cd7ea",
"size": "7630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "honeypot/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13760"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import binascii
from twisted.internet.protocol import Factory
from twisted.protocols.basic import Int32StringReceiver
from twisted.internet.error import ConnectionDone
from autobahn.twisted.util import peer2str, transport_channel_id
from autobahn.wamp.exception import ProtocolError, SerializationError, TransportLost
import txaio
__all__ = (
'WampRawSocketServerProtocol',
'WampRawSocketClientProtocol',
'WampRawSocketServerFactory',
'WampRawSocketClientFactory'
)
class WampRawSocketProtocol(Int32StringReceiver):
"""
Base class for Twisted-based WAMP-over-RawSocket protocols.
"""
log = txaio.make_logger()
def connectionMade(self):
self.log.debug("WampRawSocketProtocol: connection made")
# the peer we are connected to
#
try:
peer = self.transport.getPeer()
except AttributeError:
# ProcessProtocols lack getPeer()
self.peer = "?"
else:
self.peer = peer2str(peer)
# this will hold an ApplicationSession object
# once the RawSocket opening handshake has been
# completed
#
self._session = None
# Will hold the negotiated serializer once the opening handshake is complete
#
self._serializer = None
# Will be set to True once the opening handshake is complete
#
self._handshake_complete = False
# Buffer for opening handshake received bytes.
#
self._handshake_bytes = b''
# Clinet requested maximum length of serialized messages.
#
self._max_len_send = None
def _on_handshake_complete(self):
try:
self._session = self.factory._factory()
self._session.onOpen(self)
except Exception as e:
# Exceptions raised in onOpen are fatal ..
self.log.warn("WampRawSocketProtocol: ApplicationSession constructor / onOpen raised ({0})".format(e))
self.abort()
else:
self.log.info("ApplicationSession started.")
def connectionLost(self, reason):
self.log.info("WampRawSocketProtocol: connection lost: reason = '{0}'".format(reason))
try:
wasClean = isinstance(reason.value, ConnectionDone)
self._session.onClose(wasClean)
except Exception as e:
# silently ignore exceptions raised here ..
self.log.warn("WampRawSocketProtocol: ApplicationSession.onClose raised ({0})".format(e))
self._session = None
def stringReceived(self, payload):
self.log.debug("WampRawSocketProtocol: RX octets: {0}".format(binascii.hexlify(payload)))
try:
for msg in self._serializer.unserialize(payload):
self.log.debug("WampRawSocketProtocol: RX WAMP message: {0}".format(msg))
self._session.onMessage(msg)
except ProtocolError as e:
self.log.warn(str(e))
self.log.warn("WampRawSocketProtocol: WAMP Protocol Error ({0}) - aborting connection".format(e))
self.abort()
except Exception as e:
self.log.warn("WampRawSocketProtocol: WAMP Internal Error ({0}) - aborting connection".format(e))
self.abort()
def send(self, msg):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.send`
"""
if self.isOpen():
self.log.debug("WampRawSocketProtocol: TX WAMP message: {0}".format(msg))
try:
payload, _ = self._serializer.serialize(msg)
except Exception as e:
# all exceptions raised from above should be serialization errors ..
raise SerializationError("WampRawSocketProtocol: unable to serialize WAMP application payload ({0})".format(e))
else:
self.sendString(payload)
self.log.debug("WampRawSocketProtocol: TX octets: {0}".format(binascii.hexlify(payload)))
else:
raise TransportLost()
def isOpen(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.isOpen`
"""
return self._session is not None
def close(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.close`
"""
if self.isOpen():
self.transport.loseConnection()
else:
raise TransportLost()
def abort(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.abort`
"""
if self.isOpen():
if hasattr(self.transport, 'abortConnection'):
# ProcessProtocol lacks abortConnection()
self.transport.abortConnection()
else:
self.transport.loseConnection()
else:
raise TransportLost()
class WampRawSocketServerProtocol(WampRawSocketProtocol):
"""
Base class for Twisted-based WAMP-over-RawSocket server protocols.
"""
def dataReceived(self, data):
if self._handshake_complete:
WampRawSocketProtocol.dataReceived(self, data)
else:
remaining = 4 - len(self._handshake_bytes)
self._handshake_bytes += data[:remaining]
if len(self._handshake_bytes) == 4:
self.log.debug("WampRawSocketProtocol: opening handshake received - {0}".format(binascii.b2a_hex(self._handshake_bytes)))
if ord(self._handshake_bytes[0:1]) != 0x7f:
self.log.debug("WampRawSocketProtocol: invalid magic byte (octet 1) in opening handshake: was 0x{0}, but expected 0x7f".format(binascii.b2a_hex(self._handshake_bytes[0])))
self.abort()
# peer requests us to send messages of maximum length 2**max_len_exp
#
self._max_len_send = 2 ** (9 + (ord(self._handshake_bytes[1:2]) >> 4))
self.log.debug("WampRawSocketProtocol: client requests us to send out most {} bytes per message".format(self._max_len_send))
# client wants to speak this serialization format
#
ser_id = ord(self._handshake_bytes[1:2]) & 0x0F
if ser_id in self.factory._serializers:
self._serializer = self.factory._serializers[ser_id]
self.log.debug("WampRawSocketProtocol: client wants to use serializer {}".format(ser_id))
else:
self.log.debug("WampRawSocketProtocol: opening handshake - no suitable serializer found (client requested {0}, and we have {1})".format(ser_id, self.factory._serializers.keys()))
self.abort()
# we request the peer to send message of maximum length 2**reply_max_len_exp
#
reply_max_len_exp = 24
# send out handshake reply
#
reply_octet2 = bytes(bytearray([
((reply_max_len_exp - 9) << 4) | self._serializer.RAWSOCKET_SERIALIZER_ID]))
self.transport.write(b'\x7F') # magic byte
self.transport.write(reply_octet2) # max length / serializer
self.transport.write(b'\x00\x00') # reserved octets
self._handshake_complete = True
self._on_handshake_complete()
self.log.debug("WampRawSocketProtocol: opening handshake completed", self._serializer)
# consume any remaining data received already ..
#
data = data[remaining:]
if data:
self.dataReceived(data)
def get_channel_id(self, channel_id_type=u'tls-unique'):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.get_channel_id`
"""
return transport_channel_id(self.transport, is_server=True, channel_id_type=channel_id_type)
class WampRawSocketClientProtocol(WampRawSocketProtocol):
"""
Base class for Twisted-based WAMP-over-RawSocket client protocols.
"""
def connectionMade(self):
WampRawSocketProtocol.connectionMade(self)
self._serializer = self.factory._serializer
# we request the peer to send message of maximum length 2**reply_max_len_exp
#
request_max_len_exp = 24
# send out handshake reply
#
request_octet2 = bytes(bytearray([
((request_max_len_exp - 9) << 4) | self._serializer.RAWSOCKET_SERIALIZER_ID]))
self.transport.write(b'\x7F') # magic byte
self.transport.write(request_octet2) # max length / serializer
self.transport.write(b'\x00\x00') # reserved octets
def dataReceived(self, data):
if self._handshake_complete:
WampRawSocketProtocol.dataReceived(self, data)
else:
remaining = 4 - len(self._handshake_bytes)
self._handshake_bytes += data[:remaining]
if len(self._handshake_bytes) == 4:
self.log.debug("WampRawSocketProtocol: opening handshake received - {0}".format(binascii.b2a_hex(self._handshake_bytes)))
if ord(self._handshake_bytes[0:1]) != 0x7f:
self.log.debug("WampRawSocketProtocol: invalid magic byte (octet 1) in opening handshake: was 0x{0}, but expected 0x7f".format(binascii.b2a_hex(self._handshake_bytes[0])))
self.abort()
# peer requests us to send messages of maximum length 2**max_len_exp
#
self._max_len_send = 2 ** (9 + (ord(self._handshake_bytes[1:2]) >> 4))
self.log.debug("WampRawSocketProtocol: server requests us to send out most {} bytes per message".format(self._max_len_send))
# client wants to speak this serialization format
#
ser_id = ord(self._handshake_bytes[1:2]) & 0x0F
if ser_id != self._serializer.RAWSOCKET_SERIALIZER_ID:
self.log.debug("WampRawSocketProtocol: opening handshake - no suitable serializer found (server replied {0}, and we requested {1})".format(ser_id, self._serializer.RAWSOCKET_SERIALIZER_ID))
self.abort()
self._handshake_complete = True
self._on_handshake_complete()
self.log.debug("WampRawSocketProtocol: opening handshake completed (using serializer {serializer})", serializer=self._serializer)
# consume any remaining data received already ..
#
data = data[remaining:]
if data:
self.dataReceived(data)
def get_channel_id(self, channel_id_type=u'tls-unique'):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.get_channel_id`
"""
return transport_channel_id(self.transport, is_server=False, channel_id_type=channel_id_type)
class WampRawSocketFactory(Factory):
"""
Base class for Twisted-based WAMP-over-RawSocket factories.
"""
class WampRawSocketServerFactory(WampRawSocketFactory):
"""
Base class for Twisted-based WAMP-over-RawSocket server factories.
"""
protocol = WampRawSocketServerProtocol
def __init__(self, factory, serializers=None):
"""
:param factory: A callable that produces instances that implement
:class:`autobahn.wamp.interfaces.ITransportHandler`
:type factory: callable
:param serializers: A list of WAMP serializers to use (or None for default
serializers). Serializers must implement
:class:`autobahn.wamp.interfaces.ISerializer`.
:type serializers: list
"""
assert(callable(factory))
self._factory = factory
if serializers is None:
serializers = []
# try CBOR WAMP serializer
try:
from autobahn.wamp.serializer import CBORSerializer
serializers.append(CBORSerializer(batched=True))
serializers.append(CBORSerializer())
except ImportError:
pass
# try MsgPack WAMP serializer
try:
from autobahn.wamp.serializer import MsgPackSerializer
serializers.append(MsgPackSerializer(batched=True))
serializers.append(MsgPackSerializer())
except ImportError:
pass
# try JSON WAMP serializer
try:
from autobahn.wamp.serializer import JsonSerializer
serializers.append(JsonSerializer(batched=True))
serializers.append(JsonSerializer())
except ImportError:
pass
if not serializers:
raise Exception("could not import any WAMP serializers")
self._serializers = {}
for ser in serializers:
self._serializers[ser.RAWSOCKET_SERIALIZER_ID] = ser
class WampRawSocketClientFactory(WampRawSocketFactory):
"""
Base class for Twisted-based WAMP-over-RawSocket client factories.
"""
protocol = WampRawSocketClientProtocol
def __init__(self, factory, serializer=None):
"""
:param factory: A callable that produces instances that implement
:class:`autobahn.wamp.interfaces.ITransportHandler`
:type factory: callable
:param serializer: The WAMP serializer to use (or None for default
serializer). Serializers must implement
:class:`autobahn.wamp.interfaces.ISerializer`.
:type serializer: obj
"""
assert(callable(factory))
self._factory = factory
if serializer is None:
# try CBOR WAMP serializer
try:
from autobahn.wamp.serializer import CBORSerializer
serializer = CBORSerializer()
except ImportError:
pass
if serializer is None:
# try MsgPack WAMP serializer
try:
from autobahn.wamp.serializer import MsgPackSerializer
serializer = MsgPackSerializer()
except ImportError:
pass
if serializer is None:
# try JSON WAMP serializer
try:
from autobahn.wamp.serializer import JsonSerializer
serializer = JsonSerializer()
except ImportError:
pass
if serializer is None:
raise Exception("could not import any WAMP serializer")
self._serializer = serializer
| {
"content_hash": "e6a786ffb54f9fc009221ce9f25091e5",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 209,
"avg_line_length": 37.38363171355499,
"alnum_prop": 0.6029965109119518,
"repo_name": "RyanHope/AutobahnPython",
"id": "88bd736039d96eba5cc25f8afe8490044fd83eda",
"size": "15894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autobahn/twisted/rawsocket.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3648"
},
{
"name": "Python",
"bytes": "983364"
}
],
"symlink_target": ""
} |
import numpy as np
class Train:
"""
Iterator class that runs theano functions over data while gathering
the resulting monitoring values for plotting.
"""
def __init__(self, channels, n_batches={'train':None,
'valid':None,
'test':None},
accumulate='mean'):
"""
Channels are passed as list or tuple "channels" of dictionaries.
Expecting each channel as a dictionary with the following entries:
- "names": <Names of channels as tuple of strings>
- "dataset": <Which dataset to average this value over (one of train,
test, validation). Write "None" for values to be evaluated
independently at the end of an epoch.>
- "eval": <Theano function to evaluate, expecting it to take an iteger
index to slice into a shared variable dataset>
- "dimensions": <value dimension as string or Holoviews Dimension>
Also, need to know how many batches are in the dataset to iterate over:
- n_batches - number of batches in the dataset, as above, as dictionary.
"""
self.n_batches = n_batches
# make a dictionary of channel:[dimension]
self.dimensions = {}
for channel in channels:
dimension = channel.get('dimensions',False)
if dimension:
for ch,dim in zip(channel['names'],enforce_iterable(dimension)):
self.dimensions[ch] = [dim]
# store channels
self.channels = channels
self.accumulate = accumulate
def __iter__(self):
return self
def next(self):
self.collected_channels = {}
# iterate over train, validation and test channels:
for dataset_name in ['train', 'valid', 'test']:
# gather right channels for this dataset
channels = [channel for channel in self.channels
if channel['dataset'] == dataset_name]
# check we have some channels to iterate over
if channels != []:
for i in range(self.n_batches[dataset_name]):
# on each batch, execute functions for training channels and
# gather results
for channel in channels:
returned_vals = enforce_iterable(channel['eval'](i))
# match them to channel names
for name, rval in zip(channel['names'],returned_vals):
if not self.collected_channels.get(name, False):
self.collected_channels[name] = []
self.collected_channels[name].append(rval)
# take the mean over this epoch for each channel
if self.accumulate=='mean':
for channel in channels:
for name in channel['names']:
self.collected_channels[name] = \
np.mean(self.collected_channels[name])
# finally, gather the independent channels
channels = [channel for channel in self.channels
if channel['dataset'] == 'None']
for channel in channels:
# assume the function requires no input (could be useful to add
# inputs later)
returned_vals = enforce_iterable(channel['eval']())
for name, rval in zip(channel['names'], returned_vals):
self.collected_channels[name] = rval
return self.collected_channels
def enforce_iterable(foo):
"""
Function to make sure anything passed to it is returned iterable.
"""
if not hasattr(foo, '__iter__'):
foo = [foo]
elif isinstance(foo, np.ndarray):
if foo.shape == ():
foo = [foo.tolist()]
return foo
| {
"content_hash": "b1295fbfcbf2a63142f606aaf2b44fa8",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 80,
"avg_line_length": 45.40229885057471,
"alnum_prop": 0.5506329113924051,
"repo_name": "gngdb/holo-nets",
"id": "ed38c61296494e19ac38f30f413d3721d96e1a4c",
"size": "3973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holonets/train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32607"
}
],
"symlink_target": ""
} |
"""
Misc. useful functions that can be used at many places in the program.
"""
import subprocess as sp
import sys
import warnings
def sys_write_flush(s):
""" Writes and flushes without delay a text in the console """
sys.stdout.write(s)
sys.stdout.flush()
def verbose_print(verbose, s):
""" Only prints s (with sys_write_flush) if verbose is True."""
if verbose:
sys_write_flush(s)
def subprocess_call(cmd, verbose=True, errorprint=True):
""" Executes the given subprocess command."""
verbose_print(verbose, "\nMoviePy Running:\n>>> "+ " ".join(cmd))
proc = sp.Popen(cmd, stderr = sp.PIPE)
out, err = proc.communicate() # proc.wait()
proc.stderr.close()
if proc.returncode:
verbose_print(errorprint, "\nMoviePy: This command returned an error !")
raise IOError(err.decode('utf8'))
else:
verbose_print(verbose, "\n... command successful.\n")
del proc
def cvsecs(*args):
"""
Converts a time to second. Either cvsecs(min,secs) or
cvsecs(hours,mins,secs).
>>> cvsecs(5.5) # -> 5.5 seconds
>>> cvsecs(10, 4.5) # -> 604.5 seconds
>>> cvsecs(1, 0, 5) # -> 3605 seconds
"""
if len(args) == 1:
return args[0]
elif len(args) == 2:
return 60*args[0]+args[1]
elif len(args) ==3:
return 3600*args[0]+60*args[1]+args[2]
def deprecated_version_of(f, oldname, newname=None):
""" Indicates that a function is deprecated and has a new name.
`f` is the new function, `oldname` the name of the deprecated
function, `newname` the name of `f`, which can be automatically
found.
Returns
========
f_deprecated
A function that does the same thing as f, but with a docstring
and a printed message on call which say that the function is
deprecated and that you should use f instead.
Examples
=========
>>> # The badly named method 'to_file' is replaced by 'write_file'
>>> class Clip:
>>> def write_file(self, some args):
>>> # blablabla
>>>
>>> Clip.to_file = deprecated_version_of(Clip.write_file, 'to_file')
"""
if newname is None: newname = f.__name__
warning= ("The function ``%s`` is deprecated and is kept temporarily "
"for backwards compatibility.\nPlease use the new name, "
"``%s``, instead.")%(oldname, newname)
def fdepr(*a, **kw):
warnings.warn("MoviePy: " + warning, PendingDeprecationWarning)
return f(*a, **kw)
fdepr.__doc__ = warning
return fdepr | {
"content_hash": "6d04065ca3d7a61fef606cfb60f3af18",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 80,
"avg_line_length": 28.074468085106382,
"alnum_prop": 0.5930276619931792,
"repo_name": "DevinGeo/moviepy",
"id": "7278c38c90deb0845fab0e4b7b1655124ed91501",
"size": "2639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moviepy/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "295"
},
{
"name": "Python",
"bytes": "220312"
},
{
"name": "Shell",
"bytes": "6748"
}
],
"symlink_target": ""
} |
import argparse
import collections
import os
import sys
from spinnaker.run import check_run_and_monitor
from spinnaker.run import check_run_quick
from spinnaker.run import run_and_monitor
from spinnaker.run import run_quick
def get_repository_dir(name):
"""Determine the local directory that a given repository is in.
We assume that refresh_source is being run in the build directory
that contains all the repositories. Except spinnaker/ itself is not
in the build directory so special case it.
Args:
name [string]: The repository name.
"""
if name == 'spinnaker':
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
else:
return name
class SourceRepository(
collections.namedtuple('SourceRepository', ['name', 'owner'])):
"""Denotes a github repository.
Attributes:
name: The [short] name of the repository.
owner: The github user name owning the repository
"""
pass
class Refresher(object):
"""Provides branch management capabilities across Spinnaker repositories.
The Spinnaker repositories are federated across several independent
repositories. This class provides convenient support to update local
repositories from remote and vice-versa.
The origin repository is specified using --github_user option. This specifies
the github repository owner for the origin repositories. It is only relevant
when a repository needs to be cloned to establish a local repository. The
value 'upstream' can be used to indicate that the repository should be cloned
from its authoritative source as opposed to another user's fork.
When the refresher clones new repositories, it establishes an "upstream"
remote to the authoritative repository (based on hard-coded mappings)
unless the origin is the upstream. Upstream pulls are disabled (including
when the origin is the upstream) and only the master branch can be pulled
from upstream.
If --pull_branch is used then the local repositories will pull their current
branch from the origin repository. If a local repository does not yet exist,
then it will be cloned from the --github_user using the branch specified
by --pull_branch. The --pull_origin option is similar but implies that the
branch is 'master'. This is intended to perform complete updates of the
local repositories.
--push_branch (or --push_master, implying 'master' branch) will push the
local repository branch back to the origin, but only if the local repository
is in the specified branch. This is for safety to prevent accidental pushes.
It is assumed that multi-repository changes will have a common feature-branch
name, and not all repositories will be affected.
Of course, individual repositories can still be managed using explicit git
commands. This class is intended for cross-cutting management.
"""
__OPTIONAL_REPOSITORIES = [
SourceRepository('citest', 'google'),
SourceRepository('spinnaker-monitoring', 'spinnaker'),
SourceRepository('halyard', 'spinnaker')]
__REQUIRED_REPOSITORIES = [
SourceRepository('spinnaker', 'spinnaker'),
SourceRepository('clouddriver', 'spinnaker'),
SourceRepository('orca', 'spinnaker'),
SourceRepository('front50', 'spinnaker'),
SourceRepository('echo', 'spinnaker'),
SourceRepository('rosco', 'spinnaker'),
SourceRepository('gate', 'spinnaker'),
SourceRepository('fiat', 'spinnaker'),
SourceRepository('igor', 'spinnaker'),
SourceRepository('deck', 'spinnaker')]
@property
def pull_branch(self):
"""Gets the branch that we want to pull.
This may raise a ValueError if the specification is inconsistent.
This is determined lazily rather than at construction to be consistent
with the push_branch property.
"""
if self.__options.pull_origin:
if (self.__options.pull_branch
and self.__options.pull_branch != 'master'):
raise ValueError(
'--pull_origin is incompatible with --pull_branch={branch}'
.format(branch=self.__options.pull_branch))
return 'master'
return self.__options.pull_branch
@property
def push_branch(self):
"""Gets the branch that we want to push.
This may raise a ValueError if the specification is inconsistent.
This is determined lazily rather than at construction because the
option to push is not necessarily present depending on the use case.
"""
if self.__options.push_master:
if (self.__options.push_branch
and self.__options.push_branch != 'master'):
raise ValueError(
'--push_origin is incompatible with --push_branch={branch}'
.format(branch=self.__options.push_branch))
return 'master'
return self.__options.push_branch
def __init__(self, options):
self.__options = options
self.__extra_repositories = self.__OPTIONAL_REPOSITORIES
if options.extra_repos:
for extra in options.extra_repos.split(','):
pair = extra.split('=')
if len(pair) != 2:
raise ValueError(
'Invalid --extra_repos value "{extra}"'.format(extra=extra))
self.__extra_repositories.append(SourceRepository(pair[0], pair[1]))
def get_remote_repository_url(self, path, which='origin'):
"""Determine the repository that a given path is from.
Args:
path [string]: The path to the repository
which [string]: The remote repository name (origin or upstream).
Returns:
The origin url for path, or None if not a git repository.
"""
result = run_quick('git -C {path} config --get remote.{which}.url'
.format(path=path, which=which),
echo=False)
if result.returncode:
return None
return result.stdout.strip()
def get_local_branch_name(self, name):
"""Determine which git branch a local repository is in.
Args:
name [string]: The repository name.
Returns:
The name of the branch.
"""
result = run_quick('git -C "{dir}" rev-parse --abbrev-ref HEAD'
.format(dir=get_repository_dir(name)),
echo=False)
if result.returncode:
error = 'Could not determine branch: ' + result.stdout.strip()
raise RuntimeError(error)
return result.stdout.strip()
def get_github_repository_url(self, repository, owner=None):
"""Determine the URL for a given github repository.
Args:
repository [string]: The upstream SourceRepository.
owner [string]: The explicit owner for the repository we want.
If not provided then use the github_user in the bound options.
"""
user = owner or self.__options.github_user
if not user:
raise ValueError('No --github_user specified.')
if user == 'default' or user == 'upstream':
user = repository.owner
url_pattern = ('https://github.com/{user}/{name}.git'
if self.__options.use_https
else 'git@github.com:{user}/{name}.git')
return url_pattern.format(user=user, name=repository.name)
def git_clone(self, repository, owner=None):
"""Clone the specified repository
Args:
repository [string]: The name of the github repository (without owner).
owner [string]: An explicit repository owner.
If not provided use the configured options.
"""
name = repository.name
repository_dir = get_repository_dir(name)
upstream_user = repository.owner
branch = self.pull_branch or 'master'
origin_url = self.get_github_repository_url(repository, owner=owner)
upstream_url = 'https://github.com/{upstream_user}/{name}.git'.format(
upstream_user=upstream_user, name=name)
# Don't echo because we're going to hide some failure.
print 'Cloning {name} from {origin_url} -b {branch}.'.format(
name=name, origin_url=origin_url, branch=branch)
shell_result = run_and_monitor(
'git clone {url} -b {branch}'.format(url=origin_url, branch=branch),
echo=False)
if not shell_result.returncode:
if shell_result.stdout:
print shell_result.stdout
else:
if repository in self.__extra_repositories:
sys.stderr.write('WARNING: Missing optional repository {name}.\n'
.format(name=name))
sys.stderr.write(' Continue on without it.\n')
return
sys.stderr.write(shell_result.stderr or shell_result.stdout)
sys.stderr.write(
'FATAL: Cannot continue without required repository {name}.\n'
' Consider using github to fork one from {upstream}.\n'.
format(name=name, upstream=upstream_url))
raise SystemExit('Repository {url} not found.'.format(url=origin_url))
if self.__options.add_upstream and origin_url != upstream_url:
print ' Adding upstream repository {upstream}.'.format(
upstream=upstream_url)
check_run_quick('git -C "{dir}" remote add upstream {url}'
.format(dir=repository_dir, url=upstream_url),
echo=False)
if self.__options.disable_upstream_push:
which = 'upstream' if origin_url != upstream_url else 'origin'
print ' Disabling git pushes to {which} {upstream}'.format(
which=which, upstream=upstream_url)
check_run_quick(
'git -C "{dir}" remote set-url --push {which} disabled'
.format(dir=repository_dir, which=which),
echo=False)
def pull_from_origin(self, repository):
"""Pulls the current branch from the git origin.
Args:
repository [string]: The local repository to update.
"""
name = repository.name
repository_dir = get_repository_dir(name)
if not os.path.exists(repository_dir):
self.git_clone(repository)
return
print 'Updating {name} from origin'.format(name=name)
branch = self.get_local_branch_name(name)
if branch != self.pull_branch:
if self.__options.force_pull:
sys.stderr.write(
'WARNING: Updating {name} branch={branch}, *NOT* "{want}"\n'
.format(name=name, branch=branch, want=self.pull_branch))
else:
sys.stderr.write(
'WARNING: Skipping {name} because branch={branch},'
' *NOT* "{want}"\n'
.format(name=name, branch=branch, want=self.pull_branch))
return
try:
check_run_and_monitor('git -C "{dir}" pull origin {branch} --tags'
.format(dir=repository_dir, branch=branch),
echo=True)
except RuntimeError:
result = check_run_and_monitor('git -C "{dir}" branch -r'
.format(dir=repository_dir),
echo=False)
if result.stdout.find('origin/{branch}\n') >= 0:
raise
sys.stderr.write(
'WARNING {name} branch={branch} is not known to the origin.\n'
.format(name=name, branch=branch))
def pull_from_upstream_if_master(self, repository):
"""Pulls the master branch from the upstream repository.
This will only have effect if the local repository exists
and is currently in the master branch.
Args:
repository [string]: The name of the local repository to update.
"""
name = repository.name
repository_dir = get_repository_dir(name)
if not os.path.exists(repository_dir):
self.pull_from_origin(repository)
if not os.path.exists(repository_dir):
return
branch = self.get_local_branch_name(name)
if branch != 'master':
sys.stderr.write('Skipping {name} because it is in branch={branch}.\n'
.format(name=name, branch=branch))
return
print 'Pulling master {name} from upstream'.format(name=name)
check_run_and_monitor('git -C "{dir}" pull upstream master --tags'
.format(dir=repository_dir),
echo=True)
def push_to_origin_if_target_branch(self, repository):
"""Pushes the current target branch of the local repository to the origin.
This will only have effect if the local repository exists
and is currently in the target branch.
Args:
repository [string]: The name of the local repository to push from.
"""
name = repository.name
repository_dir = get_repository_dir(name)
if not os.path.exists(repository_dir):
sys.stderr.write('Skipping {name} because it does not yet exist.\n'
.format(name=name))
return
branch = self.get_local_branch_name(name)
if branch != self.push_branch:
sys.stderr.write(
'Skipping {name} because it is in branch={branch}, not {want}.\n'
.format(name=name, branch=branch, want=self.push_branch))
return
print 'Pushing {name} to origin.'.format(name=name)
check_run_and_monitor('git -C "{dir}" push origin {branch} --tags'.format(
dir=repository_dir, branch=self.push_branch),
echo=True)
def push_all_to_origin_if_target_branch(self):
"""Push all the local repositories current target branch to origin.
This will skip any local repositories that are not currently in the
target branch.
"""
all_repos = self.__REQUIRED_REPOSITORIES + self.__extra_repositories
for repository in all_repos:
self.push_to_origin_if_target_branch(repository)
def pull_all_from_upstream_if_master(self):
"""Pull all the upstream master branches into their local repository.
This will skip any local repositories that are not currently in the master
branch.
"""
all_repos = self.__REQUIRED_REPOSITORIES + self.__extra_repositories
for repository in all_repos:
self.pull_from_upstream_if_master(repository)
def pull_all_from_origin(self):
"""Pull all the origin target branches into their local repository.
This will skip any local repositories that are not currently in the
target branch.
"""
all_repos = self.__REQUIRED_REPOSITORIES + self.__extra_repositories
for repository in all_repos:
try:
self.pull_from_origin(repository)
except RuntimeError as ex:
if repository in self.__extra_repositories and not os.path.exists(
get_repository_dir(repository)):
sys.stderr.write(
'IGNORING error "{msg}" in optional repository {name}'
' because the local repository does not yet exist.\n'
.format(msg=ex.message, name=repository.name))
else:
raise
def __determine_spring_config_location(self):
root = '{dir}/config'.format(
dir=os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
home = os.path.join(os.environ['HOME'] + '/.spinnaker')
return '{root}/,{home}/'.format(home=home, root=root)
def write_gradle_run_script(self, repository):
"""Generate a dev_run.sh script for the local repository.
Args:
repository [string]: The name of the local repository to generate in.
"""
name = repository.name
path = '{name}/start_dev.sh'.format(name=name)
with open(path, 'w') as f:
f.write("""#!/bin/bash
d=$(dirname "$0")
cd "$d"
LOG_DIR=${{LOG_DIR:-../logs}}
DEF_SYS_PROPERTIES="-Dspring.config.location='{spring_location}'"
bash -c "(./gradlew $DEF_SYS_PROPERTIES $@ > '$LOG_DIR/{name}.log') 2>&1\
| tee -a '$LOG_DIR/{name}.log' >& '$LOG_DIR/{name}.err' &"
""".format(name=name,
spring_location=self.__determine_spring_config_location()))
os.chmod(path, 0777)
def write_deck_run_script(self, repository):
"""Generate a dev_run.sh script for running deck locally.
Args:
repository [string]: The name of the local repository to generate in.
"""
name = repository.name
path = '{name}/start_dev.sh'.format(name=name)
with open(path, 'w') as f:
f.write("""#!/bin/bash
d=$(dirname "$0")
cd "$d"
LOG_DIR=${{LOG_DIR:-../logs}}
if [[ node_modules -ot .git ]]; then
# Update npm, otherwise assume nothing changed and we're good.
npm install >& "$LOG_DIR/deck.log"
else
echo "deck npm node_modules looks up to date already."
fi
# Append to the log file we just started.
bash -c "(npm start >> '$LOG_DIR/{name}.log') 2>&1\
| tee -a '$LOG_DIR/{name}.log' >& '$LOG_DIR/{name}.err' &"
""".format(name=name))
os.chmod(path, 0777)
def update_spinnaker_run_scripts(self):
"""Regenerate the local dev_run.sh script for each local repository."""
for repository in self.__REQUIRED_REPOSITORIES:
name = repository.name
if not os.path.exists(name):
continue
if name == 'deck':
self.write_deck_run_script(repository)
else:
self.write_gradle_run_script(repository)
@classmethod
def init_extra_argument_parser(cls, parser):
"""Initialize additional arguments for managing remote repositories.
This is to sync the origin and upstream repositories. The intent
is to ultimately sync the origin from the upstream repository, but
this might be in two steps so the upstream can be verified [again]
before pushing the changes to the origin.
"""
# Note that we only pull the master branch from upstream.
# Pulling other branches don't normally make sense.
parser.add_argument('--pull_upstream', default=False,
action='store_true',
help='If the local branch is master, then refresh it'
' from the upstream repository.'
' Otherwise leave as is.')
parser.add_argument('--nopull_upstream',
dest='pull_upstream',
action='store_false')
# Note we only push target branches to origin specified by --push_branch
# To push another branch, you must explicitly push it with git
# (or another invocation).
parser.add_argument('--push_master', action='store_true',
help='Push the current branch to origin if it is'
' master. This is the same as --push_branch=master.')
parser.add_argument('--nopush_master', dest='push_master',
action='store_false')
parser.add_argument('--push_branch', default='',
help='If specified and the local repository is in'
' this branch then push it to the origin'
' repository. Otherwise do not push it.')
@classmethod
def init_argument_parser(cls, parser):
"""Initialize command-line arguments."""
parser.add_argument('--use_https', default=True, action='store_true',
help='Use https when cloning github repositories.')
parser.add_argument('--use_ssh', dest='use_https', action='store_false',
help='Use SSH when cloning github repositories.')
parser.add_argument('--add_upstream', default=True,
action='store_true',
help='Add upstream repository when cloning.')
parser.add_argument('--noadd_upstream', dest='add_upstream',
action='store_false')
parser.add_argument('--disable_upstream_push', default=True,
action='store_true',
help='Disable future pushes to the upstream'
' repository when cloning a repository.')
parser.add_argument('--nodisable_upstream_push',
dest='disable_upstream_push',
action='store_false')
parser.add_argument('--pull_origin', default=False,
action='store_true',
help='Refresh the local branch from the origin.'
' If cloning, then clone the master branch.'
' See --pull_branch for a more general option.')
parser.add_argument('--nopull_origin', dest='pull_origin',
action='store_false')
parser.add_argument('--pull_branch', default='',
help='Refresh the local branch from the origin if'
' it is in the specified branch,'
' otherwise skip it.'
' If cloning, then clone this branch.')
parser.add_argument('--force_pull', default=False,
help='Force pulls, even if the current branch'
' differs from the pulled branch.')
parser.add_argument(
'--extra_repos', default=None,
help='A comma-delimited list of name=owner optional repositories.'
'name is the repository name,'
' owner is the authoritative github user name owning it.'
' The --github_user will still be used to determine the origin.')
parser.add_argument('--github_user', default=None,
help='Pull from this github user\'s repositories.'
' If the user is "default" then use the'
' authoritative (upstream) repository.')
parser.add_argument('--update_run_scripts', default=True,
action='store_true',
help='Update the run script for each component.')
parser.add_argument('--noupdate_run_scripts',
dest='update_run_scripts',
action='store_false')
@classmethod
def main(cls):
parser = argparse.ArgumentParser()
cls.init_argument_parser(parser)
cls.init_extra_argument_parser(parser)
options = parser.parse_args()
refresher = cls(options)
in_repository_url = refresher.get_remote_repository_url('.')
if in_repository_url:
sys.stderr.write(
'ERROR: You cannot run this script from within a local repository.\n'
' This directory is from "{url}".\n'
' Did you intend to be in the parent directory?\n'
.format(url=in_repository_url))
return -1
try:
# This is ok. Really we want to look for an exception validating these
# properties so we can fail with a friendly error rather than stack.
if (refresher.pull_branch != refresher.push_branch
and refresher.pull_branch and refresher.push_branch):
sys.stderr.write(
'WARNING: pulling branch {pull} and pushing branch {push}'
.format(pull=refresher.pull_branch,
push=refresher.push_branch))
except Exception as ex:
sys.stderr.write('FAILURE: {0}\n'.format(ex.message))
return -1
nothing = True
if options.pull_upstream:
nothing = False
refresher.pull_all_from_upstream_if_master()
if refresher.push_branch:
nothing = False
refresher.push_all_to_origin_if_target_branch()
if refresher.pull_branch:
nothing = False
refresher.pull_all_from_origin()
if options.update_run_scripts:
print 'Updating Spinnaker component run scripts'
refresher.update_spinnaker_run_scripts()
if nothing:
sys.stderr.write('No pull/push options were specified.\n')
else:
print 'DONE'
return 0
if __name__ == '__main__':
sys.exit(Refresher.main())
| {
"content_hash": "79c60d604e2e3524d5d2ae1f7bb33f56",
"timestamp": "",
"source": "github",
"line_count": 593,
"max_line_length": 80,
"avg_line_length": 40.693086003372684,
"alnum_prop": 0.6109983009406987,
"repo_name": "Roshan2017/spinnaker",
"id": "370581ec148f75ddbc9c5d65f0337270e7036240",
"size": "24748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/refresh_source.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "8650"
},
{
"name": "Python",
"bytes": "776206"
},
{
"name": "Shell",
"bytes": "149555"
}
],
"symlink_target": ""
} |
from pocket_change.ui import core
from pocket_change import sqlalchemy_db
from flask import render_template
@core.route('/case_execution_details/<int:case_execution_id>')
def case_execution_details(case_execution_id):
CaseExecution = sqlalchemy_db.models['CaseExecution']
execution = (sqlalchemy_db.session.query(CaseExecution)
.filter(CaseExecution.id==case_execution_id)
.one())
return render_template('case_execution_details.html',
case_execution=execution) | {
"content_hash": "f5def90c776e8bfa6e7992ae86e34ba7",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 62,
"avg_line_length": 38.57142857142857,
"alnum_prop": 0.687037037037037,
"repo_name": "psusloparov/sneeze",
"id": "0504a9662b87d57b314891b681ca6d4cdbf5a238",
"size": "540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pocket_change/pocket_change/ui/views/case_execution_details.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "26776"
},
{
"name": "Java",
"bytes": "9088"
},
{
"name": "JavaScript",
"bytes": "4450"
},
{
"name": "Python",
"bytes": "96033"
}
],
"symlink_target": ""
} |
from functools import update_wrapper
class Task:
def __init__(self, func):
self.__wrapped__ = None
update_wrapper(self, func)
def __call__(self, *args, **kwargs):
return self.__wrapped__(*args, **kwargs)
def __str__(self):
return "<{cls} {name}()>".format(
cls=self.__class__.__name__, name=self.__wrapped__.__name__
)
def __repr__(self):
return str(self)
task = Task
| {
"content_hash": "d669f8f62b896cf69d5965437ae64738",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 71,
"avg_line_length": 21.476190476190474,
"alnum_prop": 0.5232815964523282,
"repo_name": "odrling/peony-twitter",
"id": "5f3354eacc00904a3365655e408328bb8d421ba7",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peony/commands/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1149"
},
{
"name": "Python",
"bytes": "215277"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import sys
sys.path.insert(0, os.path.abspath(os.pardir))
import pyos
from util import option_chooser
pyos.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyos.set_credential_file(creds_file)
cm = pyos.cloud_monitoring
# We need the IP address of the entity for this check
ents = cm.list_entities()
if not ents:
print("You must create an entity before you can create a notification.")
sys.exit()
print("Select the entity on which you wish to create the notification:")
ent = option_chooser(ents, attr="name")
entity = ents[ent]
print(entity
)
checks = entity.list_checks()
print("Select a check to notify about:")
check_num = option_chooser(checks, attr="label")
check = checks[check_num]
plans = cm.list_notification_plans()
plan_num = option_chooser(plans, attr="label")
plan = plans[plan_num]
# Create an alarm which causes your notification plan's `warning` to be
# notified whenever the average ping time goes over 5 seconds. Otherwise,
# the status will be `ok`.
alarm = cm.create_alarm(entity, check, plan,
("if (rate(metric['average']) > 5) { return new AlarmStatus(WARNING); } "
"return new AlarmStatus(OK);"), label="sample alarm")
print("Created Alarm %s" % alarm.id)
| {
"content_hash": "c8ea9eb6d5f5d843c30b5d90073f9a23",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 30.53488372093023,
"alnum_prop": 0.7273419649657273,
"repo_name": "emonty/pyos",
"id": "1e30bb71fbebff719c28b83a969c5db0a3162755",
"size": "1997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/cloud_monitoring/create_alarm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1097643"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^blog/', include('blog.urls')),
url(r'^$', 'opos.views.dashboard', name='dashboard'),
url(r'^admin/', include(admin.site.urls)),
url(r'^customers/$', 'opos.views.customers', name='customers'),
url(r'^customers/edit/(?P<customerpk>.*)/$', 'opos.views.customeredit', name='customer-edit'),
url(r'^customers/add/$', 'opos.views.customeradd', name='customer-add'),
url(r'^customers/sales/(?P<customerpk>.*)/$', 'opos.views.customersales', name='customer-sales'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login', name='logout'),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^self/debt/$', 'opos.views.selfdebtcheck', name='self-debtcheck'),
)
| {
"content_hash": "66041b91af062c32298b41ccd8a3de5a",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 101,
"avg_line_length": 41.13636363636363,
"alnum_prop": 0.6607734806629835,
"repo_name": "Pr0jectX/O2",
"id": "4b47038bbd182ba01ec97d91bbe29347698d20f6",
"size": "905",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "O2/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29"
},
{
"name": "HTML",
"bytes": "5929"
},
{
"name": "Python",
"bytes": "41266"
}
],
"symlink_target": ""
} |
'''
This file is part of uvscada
Licensed under 2 clause BSD license, see COPYING for details
'''
'''
smartctl --scan doesn't seem to work very well, it misses my external HDD
Bug of feature?
[mcmaster@gespenst icons]$ hddtemp /dev/sda
WARNING: Drive /dev/sda doesn't seem to have a temperature sensor.
WARNING: This doesn't mean it hasn't got one.
WARNING: If you are sure it has one, please contact me (hddtemp@guzu.net).
WARNING: See --help, --debug and --drivebase options.
/dev/sda: OCZ-AGILITY: no sensor
[root@gespenst ~]# hddtemp /dev/sdb
/dev/sdb: WDC WD5000BEVT-00A0RT0: drive supported, but it doesn't have a temperature sensor.
palimsest idicates a temperature though
'''
__author__ = "John McMaster"
import sys
from PyQt4 import QtGui, QtCore
from operator import itemgetter
from execute import Execute
import re
import os
import os.path
from util import print_debug
class Device:
file_name = None
temperature = None
dev_label = None
value_label = None
session_high = None
session_low = None
def temp_str(self):
raw = get_temperature(self.file_name)
if raw is None:
return '%s: <N/A>' % self.file_name
else:
(cur_temp, worst_temp) = raw
if self.session_high:
self.session_high = max(cur_temp, self.session_high)
else:
self.session_high = cur_temp
if self.session_low:
self.session_low = min(cur_temp, self.session_low)
else:
self.session_low = cur_temp
return '%s: cur: %0.1f, worst ever: %0.1f, low: %0.1f, high: %0.1f' % (self.file_name, cur_temp, worst_temp, self.session_low, self.session_high)
class MainWindow(QtGui.QWidget):
devices = dict()
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.resize(800, 600)
self.setWindowTitle('UVNet temperature monitor')
self.setWindowIcon(QtGui.QIcon('icons/uvnet.png'))
self.layout = QtGui.QVBoxLayout()
#self.layout.setSpacing(1)
exit = QtGui.QAction('Quit', self)
exit.setShortcut('Ctrl+Q')
self.connect(exit, QtCore.SIGNAL('triggered()'), QtCore.SLOT('close()'))
self.timer = QtCore.QTimer()
QtCore.QObject.connect(self.timer, QtCore.SIGNAL("timeout()"), self.update)
self.timer.start(1000);
'''
self.menubar = QtGui.QMenuBar(self)
file = self.menubar.addMenu('&File')
file.addAction(exit)
self.layout.setMenuBar(self.menubar)
'''
self.regen_devices()
self.setLayout(self.layout)
def get_hdd_device_file_names(self):
ret = set()
for dir in os.listdir("/dev"):
raw = re.match("[hs]d[a-z]", dir)
if raw:
ret.add(os.path.join("/dev", raw.group()))
print_debug(ret)
return ret
def regen_devices(self):
cur_devices = self.get_hdd_device_file_names()
row = 0
for device_file_name in sorted(cur_devices):
cur_devices.add(device_file_name)
if self.devices and device_file_name in self.devices:
# old device, update
device = self.devices[device_file_name]
#device.value_label.setText(device.temp_str())
device.value_label.setText(device.temp_str())
continue
else:
# new device
device = Device()
print '***added %s' % device_file_name
device.file_name = device_file_name
#device.dev_label = QtGui.QLabel(self)
#device.dev_label.setText(device.file_name)
device.value_label = QtGui.QLabel(self)
to_set = device.temp_str()
print to_set
device.value_label.setText(to_set)
if True:
self.layout.addWidget(device.value_label)
elif True:
self.layout.addWidget(device.dev_label, row, 0)
self.layout.addWidget(device.value_label, row, 1)
else:
device.layout = QtGui.QHBoxLayout()
device.layout.addWidget(device.dev_label)
device.layout.addWidget(device.value_label)
print 'setting up layout'
device.widget = QtGui.QWidget(self)
print 'setting up layout 2'
device.widget.setLayout(self.layout)
print 'setting up layout 3'
# Does not like this
self.layout.addWidget(device.widget)
print 'setting up layout done'
#self.devices.add(device)
self.devices[device.file_name] = device
row += 1
# Get rid of removed HDDs
old_devices = set(self.devices)
print_debug('cur devices: %s' % cur_devices)
print_debug('old devices: %s' % old_devices)
removed_devices = old_devices - cur_devices
print_debug('removed devices: %s' % removed_devices)
for device_file_name in removed_devices:
print '***removed %s' % device_file_name
device = self.devices[device_file_name]
if True:
#self.layout.removeWidget(device.dev_label)
self.layout.removeWidget(device.value_label)
device.value_label.setParent(None)
else:
self.layout.removeWidget(device.layout)
del self.devices[device_file_name]
def __cmp__(self, other):
print 'cmp'
return self.file_name.__cmp__(other.file_name)
def update(self):
print_debug()
print_debug('update')
self.regen_devices()
def __del__(self):
pass
def get_temperature(device):
# multiplatform
return get_temperature_by_smartctl(device)
return get_temperature_by_hddtemp(device)
def get_temperature_by_hddtemp(device):
"""
'''
'''
command = "hddtemp"
args = list()
args.append(device)
# go go go
(rc, output) = Execute.with_output(command, args)
'''
'''
rc_adj = rc / 256
if not rc == 0:
print output
print 'Bad rc: %d (%d)' % (rc_adj, rc)
return None
print_debug()
print_debug()
print_debug(output)
print_debug()
print_debug()
'''
[root@gespenst uvtemp]# hddtemp /dev/sda
WARNING: Drive /dev/sda doesn't seem to have a temperature sensor.
WARNING: This doesn't mean it hasn't got one.
WARNING: If you are sure it has one, please contact me (hddtemp@guzu.net).
WARNING: See --help, --debug and --drivebase options.
/dev/sda: OCZ-AGILITY: no sensor
[root@gespenst uvtemp]# echo $?
0
[root@gespenst uvtemp]# hddtemp /dev/hdf
/dev/hdf: open: No such file or directory
[root@gespenst uvtemp]# echo $?
1
'''
if output.find('no sensor') >= 0:
return None
# 194 Temperature_Celsius 0x0022 117 097 000 Old_age Always - 30
re_res = re.search(".*Temperature_Celsius.*", output)
if re_res is None:
return None
line = re_res.group()
print_debug('line: %s' % repr(line))
worst_temp = float(line.split()[4])
print_debug('worst: %s' % worst_temp)
cur_temp = float(line.split()[9])
print_debug('cur: %s' % cur_temp)
return (cur_temp, worst_temp)
"""
return None
def get_temperature_by_smartctl(device):
'''
[root@gespenst ~]# smartctl --all /dev/sdb
...
SMART Attributes Data Structure revision number: 16
Vendor Specific SMART Attributes with Thresholds:
ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE
1 Raw_Read_Error_Rate 0x002f 200 200 051 Pre-fail Always - 0
3 Spin_Up_Time 0x0027 188 180 021 Pre-fail Always - 1566
4 Start_Stop_Count 0x0032 100 100 000 Old_age Always - 129
5 Reallocated_Sector_Ct 0x0033 200 200 140 Pre-fail Always - 0
7 Seek_Error_Rate 0x002e 100 253 000 Old_age Always - 0
9 Power_On_Hours 0x0032 099 099 000 Old_age Always - 863
10 Spin_Retry_Count 0x0032 100 100 051 Old_age Always - 0
11 Calibration_Retry_Count 0x0032 100 100 000 Old_age Always - 0
12 Power_Cycle_Count 0x0032 100 100 000 Old_age Always - 124
192 Power-Off_Retract_Count 0x0032 200 200 000 Old_age Always - 103
193 Load_Cycle_Count 0x0032 180 180 000 Old_age Always - 62042
194 Temperature_Celsius 0x0022 118 097 000 Old_age Always - 29
196 Reallocated_Event_Count 0x0032 200 200 000 Old_age Always - 0
197 Current_Pending_Sector 0x0032 200 200 000 Old_age Always - 0
198 Offline_Uncorrectable 0x0030 100 253 000 Old_age Offline - 0
199 UDMA_CRC_Error_Count 0x0032 200 200 000 Old_age Always - 0
200 Multi_Zone_Error_Rate 0x0008 100 253 051 Old_age Offline - 0
..
'''
command = "smartctl"
args = list()
args.append('-a')
args.append(device)
# go go go
(rc, output) = Execute.with_output(command, args)
'''
[root@gespenst uvtemp]# smartctl /dev/sdf
smartctl 5.40 2010-10-16 r3189 [i386-redhat-linux-gnu] (local build)
Copyright (C) 2002-10 by Bruce Allen, http://smartmontools.sourceforge.net
Smartctl open device: /dev/sdf failed: No such device
[root@gespenst uvtemp]# echo $?
2
'''
"""
Too many obsecure conditions
Try to parse and ignore if the parse fails
[mcmaster@gespenst uvtemp]$ smartctl /dev/sda
smartctl 5.40 2010-10-16 r3189 [i386-redhat-linux-gnu] (local build)
Copyright (C) 2002-10 by Bruce Allen, http://smartmontools.sourceforge.net
Smartctl open device: /dev/sda failed: Permission denied
rc_adj = rc / 256
if rc_adj == 4:
'''
...
smartctl 5.40 2010-10-16 r3189 [i386-redhat-linux-gnu] (local build)
Copyright (C) 2002-10 by Bruce Allen, http://smartmontools.sourceforge.net
=== START OF INFORMATION SECTION ===
Model Family: Indilinx Barefoot based SSDs
Device Model: OCZ-AGILITY
...
Warning: device does not support Error Logging
Warning! SMART ATA Error Log Structure error: invalid SMART checksum.
SMART Error Log Version: 1
No Errors Logged
Warning! SMART Self-Test Log Structure error: invalid SMART checksum.
SMART Self-test log structure revision number 1
No self-tests have been logged. [To run self-tests, use: smartctl -t]
Device does not support Selective Self Tests/Logging
Still had table info though, but not temp
'''
return None
elif not rc == 0:
print output
# This happens for a number of reasons, hard to guage
print 'Bad rc: %d (%d)' % (rc_adj, rc)
return None
"""
if output is None:
return None
print_debug()
print_debug()
print_debug(output)
print_debug()
print_debug()
# 194 Temperature_Celsius 0x0022 117 097 000 Old_age Always - 30
re_res = re.search(".*Temperature_Celsius.*", output)
if re_res is None:
return None
line = re_res.group()
if line is None:
return None
print_debug('line: %s' % repr(line))
worst_temp = float(line.split()[4])
print_debug('worst: %s' % worst_temp)
cur_temp = float(line.split()[9])
print_debug('cur: %s' % cur_temp)
return (cur_temp, worst_temp)
if __name__ == "__main__":
if False:
for dev in get_hdd_devices():
print 'Fetching %s' % dev
raw = get_temperature(dev)
if raw:
(cur_temp, worst_temp) = raw
sys.exit(1)
app = QtGui.QApplication(sys.argv)
main = MainWindow()
main.show()
rc = app.exec_()
# Destroy early so modules don't get unloaded
app = None
main = None
sys.exit(rc)
| {
"content_hash": "5b3c965650d48d7bb1cb05091c6b2f9a",
"timestamp": "",
"source": "github",
"line_count": 370,
"max_line_length": 152,
"avg_line_length": 29.994594594594595,
"alnum_prop": 0.6473238421337177,
"repo_name": "JohnDMcMaster/uvscada",
"id": "e565cc7ac4c4c22340aa255a25e8652730339e1b",
"size": "11098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hdd/main.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "24934"
},
{
"name": "C++",
"bytes": "19099"
},
{
"name": "CMake",
"bytes": "724"
},
{
"name": "Makefile",
"bytes": "244"
},
{
"name": "Python",
"bytes": "353081"
},
{
"name": "Shell",
"bytes": "559"
}
],
"symlink_target": ""
} |
"""Defines fixtures available to all tests."""
import pytest
from webtest import TestApp
from op_mon.app import create_app
from op_mon.database import db as _db
from op_mon.settings import TestConfig
from .factories import UserFactory
@pytest.yield_fixture(scope='function')
def app():
"""An application for the tests."""
_app = create_app(TestConfig)
ctx = _app.test_request_context()
ctx.push()
yield _app
ctx.pop()
@pytest.fixture(scope='function')
def testapp(app):
"""A Webtest app."""
return TestApp(app)
@pytest.yield_fixture(scope='function')
def db(app):
"""A database for the tests."""
_db.app = app
with app.app_context():
_db.create_all()
yield _db
# Explicitly close DB connection
_db.session.close()
_db.drop_all()
@pytest.fixture
def user(db):
"""A user for the tests."""
user = UserFactory(password='myprecious')
db.session.commit()
return user
| {
"content_hash": "bd578b888a02f469a3aca484fdbbbc26",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 46,
"avg_line_length": 19.2,
"alnum_prop": 0.6572916666666667,
"repo_name": "joyider/op_mon",
"id": "b7f8491177ba06968834e451154edf840af6f6bc",
"size": "984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "228476"
},
{
"name": "CoffeeScript",
"bytes": "83631"
},
{
"name": "HTML",
"bytes": "133029"
},
{
"name": "JavaScript",
"bytes": "1142336"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "64114"
},
{
"name": "Shell",
"bytes": "444"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_bones_evil_fire_small.iff"
result.attribute_template_id = -1
result.stfName("lair_n","bones")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "675eca64a064a8f1142833ab5cf9ddeb",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 92,
"avg_line_length": 24.23076923076923,
"alnum_prop": 0.692063492063492,
"repo_name": "obi-two/Rebelion",
"id": "09371a3f0212f5a1c3b521d8a8eedeb5b7f5a0bc",
"size": "460",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_bones_evil_fire_small.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
"""Python client for Google Calendar V3 REST API.
"""
__author__ = 'static@siftcal.com (Ashutosh Priyadarshy)'
from requests_oauthlib import OAuth2Session
class GoogleCalendarAPI(object):
def __init__(self, client_id=None, client_secret=None,
acc_token=None, ref_token=None, expires_in=None,
token_updater=None):
"""Construct a new authenticated instance of GoogleCalendarAPI V3.
:param client_id: Client Id obtained in application creation.
:param client_secret: Client Secret obtained in application creation.
:param access_token: Token obtained via standard OAuth2 flow.
:param refresh_token: Additional token obtained via standard OAuth2 flow.
:param expires_in: Time until access_token expires.
:param auto_refresh_url: HTTP endpoint to request new access token on refresh.
:param token_updater: Method with one argument, token, to be used to update
your token database on automatic token refresh. If not
set a TokenUpdated warning will be raised when a token
has been refreshed. This warning will carry the token
in its token argument.
:param kwargs: Arguments to pass to the Session (requests.session) constructor.
"""
self.refresh_url = u'https://accounts.google.com/o/oauth2/token'
self.base_url = u'https://www.googleapis.com/calendar/v3/'
self.client_id = client_id
self.client_secret = client_secret
self.acc_token = acc_token
self.ref_token = ref_token
self.expires_in = expires_in
token_dict = self.__construct_token_dictionary()
refresh_dict = self.__construct_refresh_dictionary()
self.session = OAuth2Session(client_id, token=token_dict,
auto_refresh_url=self.refresh_url,
auto_refresh_kwargs=refresh_dict,
token_updater=token_updater)
def __repr__(self):
return u'<GoogleCalendarAPI Instance>'
# Parameter constructors.
def __construct_token_dictionary(self):
return {u'access_token': self.acc_token,
u'refresh_token': self.ref_token,
u'token_type': u'Bearer',
u'expires_in': self.expires_in}
def __construct_refresh_dictionary(self):
return {u'client_id':self.client_id,
u'client_secret':self.client_secret}
# URL Construction helpers.
def __events_exturl_calendar_id(self, calendar_id):
return self.base_url + u'calendars/{calendarId}/events/'.format(calendarId=calendar_id)
def __events_exturl_calendar_id_event_id(self, calendar_id, event_id):
return self.__events_exturl_calendar_id(calendar_id) + u'{eventId}/'.format(eventId=event_id)
def __calendar_list_base_url(self):
return self.base_url + u'users/me/calendarList/'
def __calendar_list_ext_url_calendar_id(self, calendar_id):
return self.base_url + u'users/me/calendarList/{calendarId}/'.format(calendarId=calendar_id)
def __calendars_base_url(self):
return self.base_url + u'calendars/'
def __calendars_ext_url_calendar_id(self, calendar_id):
self.__calendars_base_url() + u'{calendarId}/'.format(calendarId=calendar_id)
def __settings_base_url(self):
return self.base_url + u'users/me/settings/'
def __acl_base_url(self, calendar_id):
return self.base_url + u'calendars/{calendarId}/acl/'.format(calendarId=calendar_id)
def __acl_ext_url_rule_id(self, calendar_id, rule_id):
return __acl_base_url(calendar_id) + u'{ruleId}/'.format(ruleId=rule_id)
# Acl Resource Calls.
def acl_delete(self, calendar_id, rule_id):
url = self.__acl_ext_url_rule_id(calendar_id, rule_id)
return self.session.delete(url)
def acl_get(self, calendar_id, rule_id):
url = self.__acl_ext_url_rule_id(calendar_id, rule_id)
return self.session.get(url)
def acl_insert(self, calendar_id, body):
url = self.__acl_base_url(calendar_id)
return self.session.post(url, data=body)
def acl_list(self, calendar_id, **kwargs):
url = self.__acl_base_url(calendar_id)
return self.session.get(url, {u'params':kwargs})
def acl_patch(self, calendar_id, rule_id, body):
url = self.__acl_ext_url_rule_id(calendar_id, rule_id)
return self.session.patch(url, data=body)
def acl_update(self, calendar_id, rule_id, body):
url = self.__acl_ext_url_rule_id(calendar_id, rule_id)
return self.session.put(url, data=body)
def acl_watch(self, calendarId, body):
url = self.__acl_base_url(calendar_id) + u'watch/'
return self.session.post(url, data=body)
# CalendarList Resource Calls.
def calendar_list_delete(self, calendar_id):
url = __calendar_list_ext_url_calendar_id(calendar_id)
return self.session.delete(url)
def calendar_list_get(self, calendar_id):
url = self.__calendar_list_ext_url_calendar_id(calendar_id)
return self.session.get(url)
def calendar_list_insert(self, body, **kwargs):
url = self.__calendar_list_base_url()
return self.session.post(url, data=body, **{'params':kwargs})
def calendar_list_list(self, **kwargs):
url = self.__calendar_list_base_url()
return self.session.get(url, **{'params':kwargs})
def calendar_list_patch(self, body, **kwargs):
url =self.__calendar_list_ext_url_calendar_id(calendar_id)
return self.session.patch(url, data=body, **{'params':kwargs})
def calendar_list_update(self, body, **kwargs):
url = self.__calendar_list_ext_url_calendar_id(calendar_id)
return self.session.put(url, data=body, **{'params':kwargs})
def calendar_list_watch(self, body):
url = self.__calendar_list_base_url() + u'watch/'
return self.session.post(url, data=body)
# Calendars Resource Calls.
def calendars_clear(self, calendar_id):
url = self.__calendars_ext_url_calendar_id(calendar_id) + u'clear/'
return self.session.post(url)
def calendars_delete(self, calendar_id):
url = self.__calendars_ext_url_calendar_id(calendar_id)
return self.session.delete(url)
def calendars_get(self, calendar_id):
url = self.__calendars_ext_url_calendar_id(calendar_id)
return self.session.get(url)
def calendars_insert(self, body):
url = self.__calendars_base_url()
return self.session.post(url, data=body)
def calendars_patch(self, calendar_id, body):
url = self.__calendars_ext_url_calendar_id(calendar_id)
return self.session.patch(url, data=body)
def calendars_update(self, calendar_id, body):
url = self.__calendars_ext_url_calendar_id(calendar_id)
return self.session.put(url, data=body)
# Colors Resource Calls.
def colors_get(self):
url = self.base_url + u'colors/'
return self.session.get(url)
# Events Resource Calls.
def events_delete(self, calendar_id, event_id, **kwargs):
url = self. __events_exturl_calendar_id_event_id(calendar_id, event_id)
return self.session.delete(url, **{'params':kwargs})
def events_get(self, calendar_id, event_id, **kwargs):
url = self.__events_exturl_calendar_id_event_id(calendar_id, event_id)
return self.session.get(url, **{'params':kwargs})
def events_import(self, calendar_id, body):
url = self.__events_exturl_calendar_id(calendar_id) + u'import/'
return self.session.post(url, data=body, **{'params':kwargs})
def events_insert(self, calendar_id, body, **kwargs):
url = self.__events_exturl_calendar_id(calendar_id)
return self.session.post(url, data=body, **{'params':kwargs})
def events_instances(self, calendar_id, event_id, **kwargs):
url = self.__events_exturl_calendar_id_event_id(calendar_id, event_id) + u'instances/'
return self.session.get(url, **{'params':kwargs})
def events_list(self, calendar_id, **kwargs):
url = self.__events_exturl_calendar_id(calendar_id)
return self.session.get(url, **{'params':kwargs})
def events_move(self, calendar_id, event_id, destination, **kwargs):
url = self.__events_exturl_calendar_id_event_id(calendar_id, event_id) + u'move/'
kwargs[u'destination'] = destination # Add
return self.session.post(url, data=body, **{'params':kwargs})
def events_patch(self, calendar_id, event_id, body, **kwargs):
url = self.__events_exturl_calendar_id_event_id(calendar_id, event_id)
return self.session.patch(url, data=body, **{'params':kwargs})
def events_quick_add(self, calendar_id, text, **kwargs):
url = self.__events_exturl_calendar_id(calendar_id) + u'quickAdd/'
kwargs[u'text'] = text
return self.session.post(url, **{'params':kwargs})
def events_update(self, calendar_id, event_id, body, **kwargs):
url = self.__events_exturl_calendar_id_event_id(calendar_id, event_id)
return self.session.put(url, data=body, **{'params':kwargs})
def events_watch(self, calendar_id, body):
url = self.__events_exturl_calendar_id(calendar_id) + u'watch/'
return self.session.post(url, data=body)
# Freebusy Resource Calls.
def freebusy_query(self, body):
url = self.base_url + u'freeBusy/'
return self.session.post(url, data=body)
# Settings Resource Calls.
def settings_get(self, setting):
url = self.__settings_base_url() + u'{setting}/'.format(setting=setting)
return self.session.get(url)
def settings_list(self, **kwargs):
url = self.__settings_base_url()
return self.session.get(url, **{u'params':kwargs})
def settings_watch(self, body):
url = self.__settings_base_url + u'watch/'
return self.session.post(url, data=body)
# Channels Resource Calls.
def channels_stop(self, body):
url = self.base_url + u'channels/stop/'
return self.session.post(url, data=body)
| {
"content_hash": "dd61a32ab3f2fa09ab5b571f2ccef32e",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 101,
"avg_line_length": 40.16015625,
"alnum_prop": 0.6347631553350841,
"repo_name": "priyadarshy/google-calendar-v3",
"id": "d31f507034cc0712927b58ff4afa5d580ac0c8b8",
"size": "10360",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "GoogleCalendarV3/google_calendar_v3/google_calendar_v3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11087"
}
],
"symlink_target": ""
} |
import os
from digoie.conf.storage import __reverb_input_dir__, __elastic_search_dir__, REVERB_INPUT_EXT
from digoie.core.http.stream.httpclient import HTTPClient
from digoie.core.http.elasticsearch.query import *
from digoie.core.http.elasticsearch.url import NAMESTREAM_URL, SENTENCESTREAM_URL
from digoie.core.parser.elasticsearch import *
from StringIO import StringIO
from digoie.utils.symbols import do_newline_symbol
def stream_names():
names = fetch_names()
for name in names:
filename = name + REVERB_INPUT_EXT
path = os.path.join(__reverb_input_dir__, filename)
sentence_file = open(path, 'wb')
sentence_file.writelines(line + do_newline_symbol() for line in fetch_sentences(name))
sentence_file.close()
def fetch_names():
query = es_names_query()
buf = StringIO()
HTTPClient().fetch2buf(NAMESTREAM_URL, query, buf)
names_json = buf.getvalue()
names = [str(name) for name in parse_name(names_json)]
# write names
path = os.path.join(__elastic_search_dir__, 'names')
sentence_file = open(path, 'wb')
sentence_file.writelines(line + do_newline_symbol() for line in names)
sentence_file.close()
return names
def fetch_sentences(name):
print 'fetch sentences for name ' + name
query = es_sentences_query(name)
buf = StringIO()
HTTPClient().fetch2buf(SENTENCESTREAM_URL, query, buf)
sentences_json = buf.getvalue()
return parse_sentence(sentences_json) | {
"content_hash": "5fc3e2d9f42a479decab65fa5b0cac79",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 94,
"avg_line_length": 35.26190476190476,
"alnum_prop": 0.700877785280216,
"repo_name": "ZwEin27/digoie-annotation",
"id": "4a2c38f7d8c25e2e4f8d9510a30853c76deb4251",
"size": "1482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "digoie/core/http/stream/names.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "68116"
}
],
"symlink_target": ""
} |
"""Datasets class to provide images and labels in tf batch.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import abc
import json
import os
import tensorflow.compat.v1 as tf
from tensorflow.contrib.data import AUTOTUNE
from tensorflow.contrib.lookup import index_table_from_tensor
from preprocess import get_preprocess_fn
FLAGS = flags.FLAGS
class AbstractDataset(object):
"""Base class for datasets using the simplied input pipeline."""
def __init__(self,
filenames,
reader,
num_epochs,
shuffle,
shuffle_buffer_size=10000,
random_seed=None,
filter_fn=None,
num_reader_threads=64,
drop_remainder=True):
"""Creates a new dataset. Sub-classes have to implement _parse_fn().
Args:
filenames: A list of filenames.
reader: A dataset reader, e.g. `tf.data.TFRecordDataset`.
`tf.data.TextLineDataset` and `tf.data.FixedLengthRecordDataset`.
num_epochs: An int, defaults to `None`. Number of epochs to cycle
through the dataset before stopping. If set to `None` this will read
samples indefinitely.
shuffle: A boolean, defaults to `False`. Whether output data are
shuffled.
shuffle_buffer_size: `int`, number of examples in the buffer for
shuffling.
random_seed: Optional int. Random seed for shuffle operation.
filter_fn: Optional function to use for filtering dataset.
num_reader_threads: An int, defaults to None. Number of threads reading
from files. When `shuffle` is False, number of threads is set to 1. When
using default value, there is one thread per filenames.
drop_remainder: If true, then the last incomplete batch is dropped.
"""
self.filenames = filenames
self.reader = reader
self.num_reader_threads = num_reader_threads
self.num_epochs = num_epochs
self.shuffle = shuffle
self.shuffle_buffer_size = shuffle_buffer_size
self.random_seed = random_seed
self.drop_remainder = drop_remainder
self.filter_fn = filter_fn
# Additional options for optimizing TPU input pipelines.
self.num_parallel_batches = 8
def _make_source_dataset(self):
"""Reads the files in self.filenames and returns a `tf.data.Dataset`.
This does not parse the examples!
Returns:
`tf.data.Dataset` repeated for self.num_epochs and shuffled if
self.shuffle is `True`. Files are always read in parallel and sloppy.
"""
# Shuffle the filenames to ensure better randomization.
dataset = tf.data.Dataset.list_files(self.filenames, shuffle=self.shuffle,
seed=self.random_seed)
def fetch_dataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
# Read the data from disk in parallel
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
fetch_dataset,
cycle_length=self.num_reader_threads,
sloppy=self.shuffle and self.random_seed is None))
return dataset
@abc.abstractmethod
def _parse_fn(self, value):
"""Parses an image and its label from a serialized TFExample.
Args:
value: serialized string containing an TFExample.
Returns:
Returns a tuple of (image, label) from the TFExample.
"""
raise NotImplementedError
def input_fn(self, batch_size):
"""Input function which provides a single batch for train or eval.
Args:
batch_size: the batch size for the current shard. The # of shards is
computed according to the input pipeline deployment. See
tf.contrib.tpu.RunConfig for details.
Returns:
A `tf.data.Dataset` object.
"""
dataset = self._make_source_dataset()
# Use the fused map-and-batch operation.
#
# For XLA, we must used fixed shapes. Because we repeat the source training
# dataset indefinitely, we can use `drop_remainder=True` to get fixed-size
# batches without dropping any training examples.
#
# When evaluating, `drop_remainder=True` prevents accidentally evaluating
# the same image twice by dropping the final batch if it is less than a full
# batch size. As long as this validation is done with consistent batch size,
# exactly the same images will be used.
dataset = dataset.map(self._parse_fn,
num_parallel_calls=self.num_parallel_batches)
# Possibly filter out data.
if self.filter_fn is not None:
dataset = dataset.filter(self.filter_fn)
if FLAGS.cache_dataset:
dataset = dataset.cache()
dataset = dataset.repeat(self.num_epochs)
if self.shuffle:
dataset = dataset.shuffle(self.shuffle_buffer_size, seed=self.random_seed)
dataset = dataset.map(self._decode_fn,
num_parallel_calls=self.num_parallel_batches)
dataset = dataset.batch(batch_size=batch_size,
drop_remainder=self.drop_remainder)
# Prefetch overlaps in-feed with training
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def generate_sharded_filenames(filename):
base, count = filename.split('@')
count = int(count)
return ['{}-{:05d}-of-{:05d}'.format(base, i, count)
for i in range(count)]
class DatasetImagenet(AbstractDataset):
"""Provides train/val/trainval/test splits for Imagenet data.
-> trainval split represents official Imagenet train split.
-> train split is derived by taking the first 984 of 1024 shards of
the offcial training data.
-> val split is derived by taking the last 40 shard of the official
training data.
-> test split represents official Imagenet test split.
"""
COUNTS = {'train': 1231121,
'val': 50046,
'trainval': 1281167,
'test': 50000}
NUM_CLASSES = 1000
IMAGE_KEY = 'image/encoded'
LABEL_KEY = 'image/class/label'
FLAG_KEY = 'image/class/label_flag'
FILENAME_KEY = 'image/filename'
FEATURE_MAP = {
IMAGE_KEY: tf.FixedLenFeature(shape=[], dtype=tf.string),
LABEL_KEY: tf.FixedLenFeature(shape=[], dtype=tf.int64),
FILENAME_KEY: tf.FixedLenFeature(shape=[], dtype=tf.string),
}
LABEL_OFFSET = 1
def __init__(self,
split_name,
preprocess_fn,
num_epochs,
shuffle,
random_seed=None,
filter_filename=None,
drop_remainder=True):
"""Initialize the dataset object.
Args:
split_name: A string split name, to load from the dataset.
preprocess_fn: Preprocess a single example. The example is already
parsed into a dictionary.
num_epochs: An int, defaults to `None`. Number of epochs to cycle
through the dataset before stopping. If set to `None` this will read
samples indefinitely.
shuffle: A boolean, defaults to `False`. Whether output data are
shuffled.
random_seed: Optional int. Random seed for shuffle operation.
filter_filename: Optional filename to use for filtering.
drop_remainder: If true, then the last incomplete batch is dropped.
"""
# This is an instance-variable instead of a class-variable because it
# depends on FLAGS, which is not parsed yet at class-parse-time.
files = os.path.join(os.path.expanduser(FLAGS.dataset_dir),
'image_imagenet-%s@%i')
filenames = {
'train': generate_sharded_filenames(files % ('train', 1024))[:-40],
'val': generate_sharded_filenames(files % ('train', 1024))[-40:],
'trainval': generate_sharded_filenames(files % ('train', 1024)),
'test': generate_sharded_filenames(files % ('dev', 128))
}
super(DatasetImagenet, self).__init__(
filenames=filenames[split_name],
reader=tf.data.TFRecordDataset,
num_epochs=num_epochs,
shuffle=shuffle,
random_seed=random_seed,
filter_fn=self.get_filter() if filter_filename is not None else None,
drop_remainder=drop_remainder)
self.split_name = split_name
self.preprocess_fn = preprocess_fn
self.filename_list = None
if filter_filename is not None:
with tf.gfile.Open(filter_filename, 'r') as f:
filename_list = json.load(f)
filename_list = tf.constant(filename_list['values'])
filename_list = index_table_from_tensor(
mapping=filename_list, num_oov_buckets=0, default_value=-1)
self.filename_list = filename_list
def _parse_fn(self, value):
"""Parses an image and its label from a serialized TFExample.
Args:
value: serialized string containing an TFExample.
Returns:
Returns a tuple of (image, label) from the TFExample.
"""
if FLAGS.get_flag_value('pseudo_label_key', None):
self.ORIGINAL_LABEL_KEY = FLAGS.get_flag_value(
'original_label_key', None)
assert self.ORIGINAL_LABEL_KEY is not None, (
'You must set original_label_key for pseudo labeling.')
#Replace original label_key with pseudo_label_key.
self.LABEL_KEY = FLAGS.get_flag_value('pseudo_label_key', None)
self.FEATURE_MAP.update({
self.LABEL_KEY: tf.FixedLenFeature(shape=[], dtype=tf.int64),
self.ORIGINAL_LABEL_KEY: tf.FixedLenFeature(
shape=[], dtype=tf.int64),
self.FLAG_KEY: tf.FixedLenFeature(shape=[], dtype=tf.int64),
})
return tf.parse_single_example(value, self.FEATURE_MAP)
def _decode_fn(self, example):
image = tf.image.decode_jpeg(example[self.IMAGE_KEY], channels=3)
# Subtract LABEL_OFFSET so that labels are in [0, 1000).
label = tf.cast(example[self.LABEL_KEY], tf.int32) - self.LABEL_OFFSET
if FLAGS.get_flag_value('pseudo_label_key', None):
# Always use original label for val / test set.
label_original = tf.cast(example[self.ORIGINAL_LABEL_KEY],
tf.int32) - self.LABEL_OFFSET
if self.split_name in ['val', 'test']:
label = label_original
elif self.split_name in ['train', 'trainval']:
label_flag = tf.cast(example[self.FLAG_KEY], tf.int32)
label = tf.cond(
tf.math.equal(label_flag, tf.constant(1, dtype=tf.int32)),
true_fn=lambda: label_original,
false_fn=lambda: label)
else:
raise ValueError('Unkown split{}'.format(self.split_name))
return self.preprocess_fn({'image': image, 'label': label})
def get_filter(self): # pylint: disable=missing-docstring
def _filter_fn(example):
index = self.filename_list.lookup(example[self.FILENAME_KEY])
return tf.math.greater_equal(index, 0)
return _filter_fn
DATASET_MAP = {
'imagenet': DatasetImagenet,
}
def get_data_batch(batch_size, # pylint: disable=missing-docstring
split_name,
is_training,
preprocessing,
filename_list=None,
shuffle=True,
num_epochs=None,
drop_remainder=False):
dataset = DATASET_MAP[FLAGS.dataset]
preprocess_fn = get_preprocess_fn(preprocessing, is_training)
return dataset(
split_name=split_name,
preprocess_fn=preprocess_fn,
shuffle=shuffle,
num_epochs=num_epochs,
random_seed=FLAGS.random_seed,
filter_filename=filename_list,
drop_remainder=drop_remainder).input_fn(batch_size)
def get_data(params,
split_name,
is_training,
shuffle=True,
num_epochs=None,
drop_remainder=False,
preprocessing=None):
"""Produces image/label tensors for a given dataset.
Args:
params: dictionary with `batch_size` entry (thanks TPU...).
split_name: data split, e.g. train, val, test
is_training: whether to run pre-processing in train or test mode.
shuffle: if True, shuffles the data
num_epochs: number of epochs. If None, proceeds indefenitely
drop_remainder: Drop remainings examples in the last dataset batch. It is
useful for third party checkpoints with fixed batch size.
preprocessing: a string that encodes preprocessing.
Returns:
image, label, example counts
"""
batch_mult = FLAGS.unsup_batch_mult if is_training else 1
filename_list = None
data = get_data_batch(int(params['batch_size'] * batch_mult),
split_name, is_training,
preprocessing, filename_list,
shuffle, num_epochs,
drop_remainder)
if is_training:
if FLAGS.filename_list_template:
# Explicitly filter labelled samples by specific filenames
filename_list = FLAGS.filename_list_template.format(
FLAGS.num_supervised_examples)
preproc = FLAGS.sup_preprocessing
else:
preproc = FLAGS.get_flag_value('sup_preprocessing_eval',
FLAGS.sup_preprocessing)
sup_data = get_data_batch(params['batch_size'],
split_name, is_training,
preproc, filename_list,
shuffle, num_epochs,
drop_remainder)
data = tf.data.Dataset.zip((data, sup_data))
# NOTE: y['label'] is not actually used, but it's required by
# Tensorflow's tf.Estimator and tf. TPUEstimator API.
return data.map(lambda x, y: ((x, y), y['label']))
def get_count(split_name):
return DATASET_MAP[FLAGS.dataset].COUNTS[split_name]
def get_num_classes():
return DATASET_MAP[FLAGS.dataset].NUM_CLASSES
def get_auxiliary_num_classes():
return DATASET_MAP[FLAGS.dataset].NUM_CLASSES
| {
"content_hash": "0266af290aa513d31426f8ed088e5dad",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 80,
"avg_line_length": 36.467362924281986,
"alnum_prop": 0.6412257464022338,
"repo_name": "google-research/s4l",
"id": "61ebbf2a0705cc3457e2bdc56464df4b009e43cf",
"size": "14543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datasets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "211462"
},
{
"name": "Shell",
"bytes": "13700"
}
],
"symlink_target": ""
} |
class RawResult:
def __init__(self, title, authors, url):
self.title = title
self.authors = authors
self.url = url
@property
def can_download(self):
return bool(self.url)
def __repr__(self):
return '{}(title={}, authors={}, url={})'.format(
self.__class__.__name__,
repr(self.title),
repr(self.authors),
repr(self.url),
)
| {
"content_hash": "f2e7d616bf1794c5195e1cbab65c326f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 57,
"avg_line_length": 25.647058823529413,
"alnum_prop": 0.49311926605504586,
"repo_name": "dduong42/Booksee",
"id": "07bfb1d9d241f2f96d6a6846f76a9225b129b52d",
"size": "436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "booksee/parser/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11996"
}
],
"symlink_target": ""
} |
"Pythonic simple JSON RPC Client implementation"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "0.05"
import urllib
from xmlrpclib import Transport, SafeTransport
from cStringIO import StringIO
import random
import sys
try:
import gluon.contrib.simplejson as json # try web2py json serializer
except ImportError:
try:
import json # try stdlib (py2.6)
except:
import simplejson as json # try external module
class JSONRPCError(RuntimeError):
"Error object for remote procedure call fail"
def __init__(self, code, message, data=None):
value = "%s: %s\n%s" % (code, message, '\n'.join(data))
RuntimeError.__init__(self, value)
self.code = code
self.message = message
self.data = data
class JSONDummyParser:
"json wrapper for xmlrpclib parser interfase"
def __init__(self):
self.buf = StringIO()
def feed(self, data):
self.buf.write(data)
def close(self):
return self.buf.getvalue()
class JSONTransportMixin:
"json wrapper for xmlrpclib transport interfase"
def send_content(self, connection, request_body):
connection.putheader("Content-Type", "application/json")
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
connection.send(request_body)
# todo: add gzip compression
def getparser(self):
# get parser and unmarshaller
parser = JSONDummyParser()
return parser, parser
class JSONTransport(JSONTransportMixin, Transport):
pass
class JSONSafeTransport(JSONTransportMixin, SafeTransport):
pass
class ServerProxy(object):
"JSON RPC Simple Client Service Proxy"
def __init__(self, uri, transport=None, encoding=None, verbose=0):
self.location = uri # server location (url)
self.trace = verbose # show debug messages
self.exceptions = True # raise errors? (JSONRPCError)
self.timeout = None
self.json_request = self.json_response = ''
type, uri = urllib.splittype(uri)
if type not in ("http", "https"):
raise IOError("unsupported JSON-RPC protocol")
self.__host, self.__handler = urllib.splithost(uri)
if transport is None:
if type == "https":
transport = JSONSafeTransport()
else:
transport = JSONTransport()
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
def __getattr__(self, attr):
"pseudo method that can be called"
return lambda *args: self.call(attr, *args)
def call(self, method, *args):
"JSON RPC communication (method invocation)"
# build data sent to the service
request_id = random.randint(0, sys.maxint)
data = {'id': request_id, 'method': method, 'params': args, }
request = json.dumps(data)
# make HTTP request (retry if connection is lost)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
# store plain request and response for further debugging
self.json_request = request
self.json_response = response
# parse json data coming from service
# {'version': '1.1', 'id': id, 'result': result, 'error': None}
response = json.loads(response)
if response['id'] != request_id:
raise JSONRPCError(0, "JSON Request ID != Response ID")
self.error = response.get('error', {})
if self.error and self.exceptions:
raise JSONRPCError(self.error.get('code', 0),
self.error.get('message', ''),
self.error.get('data', None))
return response.get('result')
ServiceProxy = ServerProxy
if __name__ == "__main__":
# basic tests:
location = "http://www.web2py.com.ar/webservices/sample/call/jsonrpc"
client = ServerProxy(location, verbose='--verbose' in sys.argv,)
print client.add(1, 2)
| {
"content_hash": "6ad56a250b7a279fddd5c2e541c41774",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 76,
"avg_line_length": 30.70921985815603,
"alnum_prop": 0.6036951501154735,
"repo_name": "jefftc/changlab",
"id": "5143cea7a1cb97554ee2372c61cb41467c00867d",
"size": "4857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web2py/gluon/contrib/simplejsonrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "116953"
},
{
"name": "CSS",
"bytes": "75418"
},
{
"name": "Groff",
"bytes": "10237"
},
{
"name": "HTML",
"bytes": "200459"
},
{
"name": "JavaScript",
"bytes": "159618"
},
{
"name": "Makefile",
"bytes": "11719"
},
{
"name": "Python",
"bytes": "9300228"
},
{
"name": "R",
"bytes": "94670"
},
{
"name": "Shell",
"bytes": "63514"
},
{
"name": "TeX",
"bytes": "64"
}
],
"symlink_target": ""
} |
import json
from udparsers.helpers.errludP_Helpers import hexDump, memConcat, hexConcat, intConcat
class errludP_expscom:
#Explorer Active (RAM) Log Data
def UdParserExpActiveLog(subType, ver, data):
# ***** Memory Layout *****
# Header
# 2 bytes : Ordering byte (0=first packet)
# 4 bytes : Offset where data portion started in full explorer log
# 2 bytes : Size of data portion following header
#
# N bytes : Error Data
headerSize = 2 + 4 + 2
d = dict()
subd = dict()
i = 0
subd['Order Packet'], i=memConcat(data, i, i+2)
subd['Data Starting Offset'], i=hexConcat(data, i, i+4)
errorDataSize, i=intConcat(data, i, i+2)
subd['Size Of Data Section']=f'0x{errorDataSize:04x}'
d['Explorer Active (RAM) Log Data']=subd
if errorDataSize <= (len(data) - headerSize):
d['Error Data'], i=hexDump(data, i, i+errorDataSize)
else:
subd2 = dict()
subd2['Expected Data Size']= hex(len(data) - headerSize)
subd2['Hex Dump']= hexDump(data, 0, len(data))
d['ERROR DATA MISSING - Printing entire section in Hex']=subd2
jsonStr = json.dumps(d, indent=2)
return jsonStr
#Explorer Saved (SPI flash) Log Data
def UdParserExpSavedLog(subType, ver, data, image):
# ***** Memory Layout *****
# Header
# 2 bytes : Ordering byte (0=first packet)
# 4 bytes : Offset where data portion started in full explorer log
# 2 bytes : Size of data portion following header
#
# N bytes : Error Data
headerSize = 2 + 4 + 2
d = dict()
subd = dict()
i = 0
subd['Order Packet'], i=memConcat(data, i, i+2)
subd['Data Starting Offset'], i=hexConcat(data, i, i+4)
errorDataSize, i= intConcat(data, i, i+2)
subd['Size Of Data Section']=f'0x{errorDataSize:04x}'
if (image == "A")
d['Explorer Saved (SPI flash) Log Data Image A']=subd
else:
d['Explorer Saved (SPI flash) Log Data Image B']=subd
if errorDataSize <= (len(data) - headerSize):
d['Error Data']=hexDump(data, i, i+errorDataSize)
else:
subd2 = dict()
subd2['Expected Data Size']= hex(len(data) - headerSize)
subd2['Hex Dump']= hexDump(data, 0, len(data))
d['ERROR DATA MISSING - Printing entire section in Hex']=subd2
jsonStr = json.dumps(d, indent=2)
return jsonStr
#Explorer Saved (SPI flash) Log Data Image A
def UdParserExpSavedLogA(subType, ver, data):
return UdParserExpSavedLog(subType, ver, data, "A")
#Explorer Saved (SPI flash) Log Data Image B
def UdParserExpSavedLogB(subType, ver, data):
return UdParserExpSavedLog(subType, ver, data, "B")
#Dictionary with parser functions for each subtype
#Values are from UserDetailsTypes enum in src/include/usr/expscom/expscom_reasoncodes.H
UserDetailsTypes = { 1: "UdParserExpActiveLog",
2: "UdParserExpSavedLogA",
3: "UdParserExpSavedLogB"}
def parseUDToJson(subType, ver, data):
args = (subType, ver, data)
return getattr(errludP_expscom, UserDetailsTypes[subType])(*args)
| {
"content_hash": "ba8b767c3e186d1bf07300c01fc7dc6b",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 87,
"avg_line_length": 35.53191489361702,
"alnum_prop": 0.5967065868263473,
"repo_name": "open-power/hostboot",
"id": "f307746f33b9629e63a428fcd76b4fede6d39023",
"size": "4157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master-p10",
"path": "src/usr/expaccess/plugins/ebmc/b3600.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "84276"
},
{
"name": "C",
"bytes": "29945981"
},
{
"name": "C++",
"bytes": "126613976"
},
{
"name": "CMake",
"bytes": "1852"
},
{
"name": "Lex",
"bytes": "8996"
},
{
"name": "M4",
"bytes": "5738"
},
{
"name": "Makefile",
"bytes": "772285"
},
{
"name": "Meson",
"bytes": "23911"
},
{
"name": "Perl",
"bytes": "2605582"
},
{
"name": "Python",
"bytes": "2602753"
},
{
"name": "Shell",
"bytes": "290164"
},
{
"name": "Tcl",
"bytes": "76031"
},
{
"name": "XSLT",
"bytes": "9553"
},
{
"name": "Yacc",
"bytes": "29440"
}
],
"symlink_target": ""
} |
from django.views.generic import TemplateView, FormView, DetailView, ListView
from django.shortcuts import render,render_to_response
from .models import Post
from django.core.urlresolvers import reverse_lazy
from django.template import RequestContext
from .forms import ConfessForm
# from uuslug import uuslug
# Import PILLOW to render and image of the text
# from PIL import Image,ImageDraw
from endless_pagination.decorators import page_template
class PostListView(ListView):
model = Post
template_name="post_list.html"
paginate_by = 15
@page_template('post_page.html') # just add this decorator
def postList(
request, template='confess.html', extra_context=None):
context = {}
if extra_context is not None:
context.update(extra_context)
obj = Post.objects.all()
context['objects']=obj
return render_to_response(
template, context, context_instance=RequestContext(request))
@page_template('post_page.html') # just add this decorator
def popularList(
request, template='confess.html', extra_context=None):
context = {}
if extra_context is not None:
context.update(extra_context)
obj = Post.objects.filter(category='PO')
context['objects']=obj
return render_to_response(
template, context, context_instance=RequestContext(request))
@page_template('post_page.html') # just add this decorator
def newList(
request, template='confess.html', extra_context=None):
context = {}
if extra_context is not None:
context.update(extra_context)
obj = Post.objects.filter(category='NE')
context['objects']=obj
return render_to_response(
template, context, context_instance=RequestContext(request))
class ConfessForm(FormView):
template_name="post_form.html"
form_class=ConfessForm
success_url=reverse_lazy('list')
def form_valid(self,form):
category ='NE'
# every post will be set to the new category,
# the popular will be handpicked by admin
post=Post()
post.title=form.cleaned_data['title']
post.text=form.cleaned_data['text']
post.total_likes = 1
post.category = category
# This piece of code can turn any text to image
# transform text to an image
# img= Image.new('RGB',(500,500),(255,255,255))
# d=ImageDraw.Draw(img)
# d.text((20,20),post.text,fill=(255,0,0))
# # save image to the uploads directory for media
# img.save(settings.MEDIA_ROOT+"/uploads/"+post.text+".png","PNG")
# # # save it to the model
# post.image="/uploads/"+post.text+".png"
post.save()
return super(ConfessForm,self).form_valid(form)
class PostDetailView(DetailView):
template_name = 'post_detail.html'
model= Post | {
"content_hash": "a5aa7d6e27d99a6453fdac78591ef322",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 77,
"avg_line_length": 29.48913043478261,
"alnum_prop": 0.7025433099889421,
"repo_name": "amartinez1/confessions",
"id": "345df1455e0871a07a6e6e430ea4bdaa41c2ddab",
"size": "2741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "confess/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56859"
},
{
"name": "JavaScript",
"bytes": "91746"
},
{
"name": "Python",
"bytes": "16206"
}
],
"symlink_target": ""
} |
'''
NetNS
=====
A NetNS object is IPRoute-like. It runs in the main network
namespace, but also creates a proxy process running in
the required netns. All the netlink requests are done via
that proxy process.
NetNS supports standard IPRoute API, so can be used instead
of IPRoute, e.g., in IPDB::
# start the main network settings database:
ipdb_main = IPDB()
# start the same for a netns:
ipdb_test = IPDB(nl=NetNS('test'))
# create VETH
ipdb_main.create(ifname='v0p0', kind='veth', peer='v0p1').commit()
# move peer VETH into the netns
with ipdb_main.interfaces.v0p1 as veth:
veth.net_ns_fd = 'test'
# please keep in mind, that netns move clears all the settings
# on a VETH interface pair, so one should run netns assignment
# as a separate operation only
# assign addresses
# please notice, that `v0p1` is already in the `test` netns,
# so should be accessed via `ipdb_test`
with ipdb_main.interfaces.v0p0 as veth:
veth.add_ip('172.16.200.1/24')
veth.up()
with ipdb_test.interfaces.v0p1 as veth:
veth.add_ip('172.16.200.2/24')
veth.up()
Please review also the test code, under `tests/test_netns.py` for
more examples.
By default, NetNS creates requested netns, if it doesn't exist,
or uses existing one. To control this behaviour, one can use flags
as for `open(2)` system call::
# create a new netns or fail, if it already exists
netns = NetNS('test', flags=os.O_CREAT | os.O_EXIST)
# create a new netns or use existing one
netns = NetNS('test', flags=os.O_CREAT)
# the same as above, the default behaviour
netns = NetNS('test')
To remove a network namespace::
from pyroute2 import NetNS
netns = NetNS('test')
netns.close()
netns.remove()
One should stop it first with `close()`, and only after that
run `remove()`.
'''
import os
import errno
import atexit
import select
import signal
import struct
import threading
import traceback
from socket import SOL_SOCKET
from socket import SO_RCVBUF
from pyroute2.config import MpPipe
from pyroute2.config import MpProcess
from pyroute2.iproute import IPRoute
from pyroute2.netlink.nlsocket import NetlinkMixin
from pyroute2.netlink.rtnl.iprsocket import MarshalRtnl
from pyroute2.iproute import IPRouteMixin
from pyroute2.netns import setns
from pyroute2.netns import remove
def NetNServer(netns, rcvch, cmdch, flags=os.O_CREAT):
'''
The netns server supposed to be started automatically by NetNS.
It has two communication channels: one simplex to forward incoming
netlink packets, `rcvch`, and other synchronous duplex to get
commands and send back responses, `cmdch`.
Channels should support standard socket API, should be compatible
with poll/select and should be able to transparently pickle objects.
NetNS uses `multiprocessing.Pipe` for this purpose, but it can be
any other implementation with compatible API.
The first parameter, `netns`, is a netns name. Depending on the
`flags`, the netns can be created automatically. The `flags` semantics
is exactly the same as for `open(2)` system call.
...
The server workflow is simple. The startup sequence::
1. Create or open a netns.
2. Start `IPRoute` instance. It will be used only on the low level,
the `IPRoute` will not parse any packet.
3. Start poll/select loop on `cmdch` and `IPRoute`.
On the startup, the server sends via `cmdch` the status packet. It can be
`None` if all is OK, or some exception.
Further data handling, depending on the channel, server side::
1. `IPRoute`: read an incoming netlink packet and send it unmodified
to the peer via `rcvch`. The peer, polling `rcvch`, can handle
the packet on its side.
2. `cmdch`: read tuple (cmd, argv, kwarg). If the `cmd` starts with
"send", then take `argv[0]` as a packet buffer, treat it as one
netlink packet and substitute PID field (offset 12, uint32) with
its own. Strictly speaking, it is not mandatory for modern netlink
implementations, but it is required by the protocol standard.
'''
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
nsfd = setns(netns, flags)
except OSError as e:
cmdch.send(e)
return e.errno
except Exception as e:
cmdch.send(OSError(errno.ECOMM, str(e), netns))
return 255
#
try:
ipr = IPRoute()
rcvch_lock = ipr._sproxy.lock
ipr._s_channel = rcvch
poll = select.poll()
poll.register(ipr, select.POLLIN | select.POLLPRI)
poll.register(cmdch, select.POLLIN | select.POLLPRI)
except Exception as e:
cmdch.send(e)
return 255
# all is OK so far
cmdch.send(None)
# 8<-------------------------------------------------------------
while True:
events = poll.poll()
for (fd, event) in events:
if fd == ipr.fileno():
bufsize = ipr.getsockopt(SOL_SOCKET, SO_RCVBUF) // 2
with rcvch_lock:
rcvch.send(ipr.recv(bufsize))
elif fd == cmdch.fileno():
try:
cmdline = cmdch.recv()
if cmdline is None:
poll.unregister(ipr)
poll.unregister(cmdch)
ipr.close()
os.close(nsfd)
return
(cmd, argv, kwarg) = cmdline
if cmd[:4] == 'send':
# Achtung
#
# It's a hack, but we just have to do it: one
# must use actual pid in netlink messages
#
# FIXME: there can be several messages in one
# call buffer; but right now we can ignore it
msg = argv[0][:12]
msg += struct.pack("I", os.getpid())
msg += argv[0][16:]
argv = list(argv)
argv[0] = msg
cmdch.send(getattr(ipr, cmd)(*argv, **kwarg))
except Exception as e:
e.tb = traceback.format_exc()
cmdch.send(e)
class NetNSProxy(object):
netns = 'default'
flags = os.O_CREAT
def __init__(self, *argv, **kwarg):
self.cmdlock = threading.Lock()
self.rcvch, rcvch = MpPipe()
self.cmdch, cmdch = MpPipe()
self.server = MpProcess(target=NetNServer,
args=(self.netns, rcvch, cmdch, self.flags))
self.server.start()
error = self.cmdch.recv()
if error is not None:
self.server.join()
raise error
else:
atexit.register(self.close)
def recv(self, bufsize, flags=0):
return self.rcvch.recv()
def close(self):
self.cmdch.send(None)
self.server.join()
def proxy(self, cmd, *argv, **kwarg):
with self.cmdlock:
self.cmdch.send((cmd, argv, kwarg))
response = self.cmdch.recv()
if isinstance(response, Exception):
raise response
return response
def fileno(self):
return self.rcvch.fileno()
def bind(self, *argv, **kwarg):
if 'async' in kwarg:
kwarg['async'] = False
return self.proxy('bind', *argv, **kwarg)
def send(self, *argv, **kwarg):
return self.proxy('send', *argv, **kwarg)
def sendto(self, *argv, **kwarg):
return self.proxy('sendto', *argv, **kwarg)
def getsockopt(self, *argv, **kwarg):
return self.proxy('getsockopt', *argv, **kwarg)
def setsockopt(self, *argv, **kwarg):
return self.proxy('setsockopt', *argv, **kwarg)
class NetNSocket(NetlinkMixin, NetNSProxy):
def bind(self, *argv, **kwarg):
return NetNSProxy.bind(self, *argv, **kwarg)
def close(self):
NetNSProxy.close(self)
def _sendto(self, *argv, **kwarg):
return NetNSProxy.sendto(self, *argv, **kwarg)
def _recv(self, *argv, **kwarg):
return NetNSProxy.recv(self, *argv, **kwarg)
class NetNS(IPRouteMixin, NetNSocket):
'''
NetNS is the IPRoute API with network namespace support.
**Why not IPRoute?**
The task to run netlink commands in some network namespace, being in
another network namespace, requires the architecture, that differs
too much from a simple Netlink socket.
NetNS starts a proxy process in a network namespace and uses
`multiprocessing` communication channels between the main and the proxy
processes to route all `recv()` and `sendto()` requests/responses.
**Any specific API calls?**
Nope. `NetNS` supports all the same, that `IPRoute` does, in the same
way. It provides full `socket`-compatible API and can be used in
poll/select as well.
The only difference is the `close()` call. In the case of `NetNS` it
is **mandatory** to close the socket before exit.
**NetNS and IPDB**
It is possible to run IPDB with NetNS::
from pyroute2 import NetNS
from pyroute2 import IPDB
ip = IPDB(nl=NetNS('somenetns'))
...
ip.release()
Do not forget to call `release()` when the work is done. It will shut
down `NetNS` instance as well.
'''
def __init__(self, netns, flags=os.O_CREAT):
self.netns = netns
self.flags = flags
super(NetNS, self).__init__()
self.marshal = MarshalRtnl()
def post_init(self):
pass
def remove(self):
'''
Try to remove this network namespace from the system.
'''
remove(self.netns)
| {
"content_hash": "b75e92d6a9ff4025dd4d029229577d9a",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 77,
"avg_line_length": 31.813504823151124,
"alnum_prop": 0.6016777845158682,
"repo_name": "little-dude/pyroute2",
"id": "6fbd9928422d43c73c999cc80eee1a7e5876d892",
"size": "9894",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyroute2/netns/nslink.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "413"
},
{
"name": "C",
"bytes": "4259"
},
{
"name": "Makefile",
"bytes": "4163"
},
{
"name": "Python",
"bytes": "638607"
},
{
"name": "Shell",
"bytes": "1914"
}
],
"symlink_target": ""
} |
import logging
import subprocess
import sys
import time
from pathlib import Path
cmd_index = "python3 ./build.py index".split()
logging.basicConfig(
format="%(asctime)s %(message)s", level=logging.INFO, stream=sys.stdout
)
for path_str in sys.argv[1:]:
path = Path(path_str)
if path.is_file():
continue
logging.info(path.name)
start = time.time()
proc = subprocess.run(
cmd_index + [path], capture_output=True, text=True, check=False
)
print(proc.stdout)
elapsed = time.time() - start
logging.info("%s done: %.1f seconds", path.name, elapsed)
| {
"content_hash": "45efe161898e0ceab20ab933a2931e04",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 75,
"avg_line_length": 25.083333333333332,
"alnum_prop": 0.6611295681063123,
"repo_name": "johntellsall/shotglass",
"id": "9cdfc40173e7778a06ae22f17174205409e5023e",
"size": "626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dec/stats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "503"
},
{
"name": "HTML",
"bytes": "132808"
},
{
"name": "Jupyter Notebook",
"bytes": "932983"
},
{
"name": "Makefile",
"bytes": "12518"
},
{
"name": "Python",
"bytes": "200303"
},
{
"name": "Shell",
"bytes": "3099"
}
],
"symlink_target": ""
} |
"""
views for the mysmeuh timeline application.
this module grows and gets bloated ... needs a serious refactoring soon, with
an abstraction over pages with meta things over timeline, photos, tracks and
blogs is strongly needed.
"""
import datetime
from django.db.models import Q
from django.views.generic import TemplateView # , RedirectView
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib import messages
from django.utils.translation import ugettext
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect
# model import
from microblogging.models import Tweet
from photos.models import Image
from audiotracks.models import get_track_model
Track = get_track_model()
from blog.models import Post
from threadedcomments.models import ThreadedComment
from friends.models import friend_set_for
from microblogging.models import get_following_followers_lists
from friends.forms import InviteFriendForm
from friends.models import FriendshipInvitation, Friendship
from microblogging.models import Following
from tagging.models import TaggedItem, Tag
from timeline.models import TimeLineItem
class TimeLineView(TemplateView):
template_name = "timeline/index.html"
def get_context_data(self, **kwargs):
context = super(TimeLineView, self).get_context_data(**kwargs)
# TODO use a query parameter for the time delta. here is 3 months
ago = datetime.datetime.now() - datetime.timedelta(30)
tweets = [
TimeLineItem(item, item.sent, item.sender, "timeline/_tweet.html")
for item in Tweet.objects.all().filter(sent__gte=ago).order_by("-sent")
]
posts = [
TimeLineItem(item, item.updated_at, item.author, "timeline/_post.html")
for item in Post.objects.all().filter(publish__gte=ago, status=2).order_by("-publish")
]
image_filter = Q(is_public=True)
if self.request.user.is_authenticated():
image_filter = image_filter | Q(member=self.request.user) | Q(member__in=friend_set_for(self.request.user))
images = [
TimeLineItem(item, item.date_added, item.member, "timeline/_photo.html")
for item in Image.objects.all().filter(image_filter).filter(date_added__gte=ago).order_by("-date_added")
]
tracks = [
TimeLineItem(item, item.updated_at, item.user, "timeline/_track.html")
for item in Track.objects.all().filter(created_at__gte=ago).order_by("-created_at")
]
comments = [
TimeLineItem(item, item.date_submitted, item.user, "timeline/_comment.html")
for item in ThreadedComment.objects.all().filter(date_submitted__gte=ago).order_by("-date_submitted")
]
items = merge(tweets, images, posts, tracks, comments, field="date")
for index, item in enumerate(items):
item.index = index
context['timelineitems'] = group_comments(items)
context['posts'] = posts
context['prefix_sender'] = True
return context
def merge_lists(left, right, field=None):
i, j = 0, 0
result = []
while (i < len(left) and j < len(right)):
if getattr(left[i], field) >= getattr(right[j], field):
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
result.extend(left[i:])
result.extend(right[j:])
return result
def merge(*querysets, **kwargs):
field = kwargs.get('field')
if field is None:
raise TypeError('you need to provide a key to do comparisons on')
if len(querysets) == 1:
return querysets[0]
qs = [list(x) for x in querysets]
q1, q2 = qs.pop(), qs.pop()
result = merge_lists(q1, q2, field)
for q in qs:
result = merge_lists(result, q, field)
return result
def group_comments(items):
grouped = []
for tlitem in items:
item = tlitem.item
if isinstance(item, ThreadedComment):
key = (item.content_type_id, item.object_id)
if grouped:
prev = grouped[-1]
if isinstance(prev.item, ThreadedComment) and key == (prev.item.content_type_id, prev.item.object_id):
if hasattr(prev, "comments"):
prev.comments.append(tlitem)
else:
prev = grouped.pop()
group_item = TimeLineItem(item, item.date_submitted, item.user, "timeline/_comment_group.html")
group_item.firstcomment = prev.item
group_item.comments = [ prev, group_item ]
grouped.append(group_item)
else:
grouped.append(tlitem)
else:
grouped.append(tlitem)
else:
grouped.append(tlitem)
return grouped
class HomePageView(TimeLineView):
template_name = "timeline/homepage/homepage.html"
def get_context_data(self, **kwargs):
context = super(HomePageView, self).get_context_data(**kwargs)
# reduce the timeline items
context['timelineitems'] = group_comments(context['timelineitems'][:16])
image_filter = Q(is_public=True)
if self.request.user.is_authenticated():
image_filter = image_filter | Q(member=self.request.user) | Q(member__in=friend_set_for(self.request.user))
context['latest_photos'] = Image.objects.all().filter(image_filter).order_by("-date_added")[:16]
context['latest_blogs'] = Post.objects.all().filter(status=2).order_by("-publish")[:10]
context['latest_tracks'] = Track.objects.all().order_by("-created_at")[:6]
return context
class FriendsPageView(TemplateView):
template_name = "timeline/friends.html"
def get_context_data(self, **kwargs):
context = super(FriendsPageView, self).get_context_data(**kwargs)
# TODO use a query parameter for the time delta. here is 3 months
ago = datetime.datetime.now() - datetime.timedelta(30)
friends = friend_set_for(self.request.user)
tweets = [
TimeLineItem(item, item.sent, item.sender, "timeline/_tweet.html")
for item in Tweet.objects.all().filter(sent__gte=ago, sender_id__in=[user.id for user in friends], sender_type__model="user").order_by("-sent")
]
posts = [
TimeLineItem(item, item.publish, item.author, "timeline/_post.html")
for item in Post.objects.all().filter(publish__gte=ago, status=2, author__in=friends).order_by("-publish")
]
images = [
TimeLineItem(item, item.date_added, item.member, "timeline/_photo.html")
for item in Image.objects.all().filter(Q(is_public=True) | Q(member__in=friend_set_for(self.request.user))).filter(date_added__gte=ago, member__in=friends).order_by("-date_added")
]
tracks = [
TimeLineItem(item, item.updated_at, item.user, "timeline/_track.html")
for item in Track.objects.all().filter(created_at__gte=ago, user__in=friends).order_by("-created_at")
]
comments = [
TimeLineItem(item, item.date_submitted, item.user, "timeline/_comment.html")
for item in ThreadedComment.objects.all().filter(date_submitted__gte=ago, user__in=friends).order_by("-date_submitted")
]
items = merge(tweets, images, posts, tracks, comments, field="date")
for index, item in enumerate(items):
item.index = index + 1
context['timelineitems'] = group_comments(items)
context['prefix_sender'] = True
return context
class FollowingPageView(TemplateView):
template_name = "timeline/following.html"
def get_context_data(self, **kwargs):
context = super(FollowingPageView, self).get_context_data(**kwargs)
# TODO use a query parameter for the time delta. here is 3 months
ago = datetime.datetime.now() - datetime.timedelta(30)
following_list, followers_list = get_following_followers_lists(self.request.user)
tweets = [
TimeLineItem(item, item.sent, item.sender, "timeline/_tweet.html")
for item in Tweet.objects.all().filter(sent__gte=ago, sender_id__in=[user.id for user in following_list], sender_type__model="user").order_by("-sent")
]
posts = [
TimeLineItem(item, item.updated_at, item.author, "timeline/_post.html")
for item in Post.objects.all().filter(publish__gte=ago, status=2, author__in=following_list).order_by("-publish")
]
images = [
TimeLineItem(item, item.date_added, item.member, "timeline/_photo.html")
for item in Image.objects.all().filter(Q(is_public=True) | Q(member__in=friend_set_for(self.request.user))).filter(date_added__gte=ago, member__in=following_list).order_by("-date_added")
]
tracks = [
TimeLineItem(item, item.updated_at, item.user, "timeline/_track.html")
for item in Track.objects.all().filter(created_at__gte=ago, user__in=following_list).order_by("-created_at")
]
comments = [
TimeLineItem(item, item.date_submitted, item.user, "timeline/_comment.html")
for item in ThreadedComment.objects.all().filter(date_submitted__gte=ago, user__in=following_list).order_by("-date_submitted")
]
items = merge(tweets, images, posts, tracks, comments, field="date")
for index, item in enumerate(items):
item.index = index + 1
context['timelineitems'] = group_comments(items)
context['prefix_sender'] = True
return context
class UserPageView(TemplateView):
template_name = "timeline/user.html"
def get_context_data(self, **kwargs):
context = super(UserPageView, self).get_context_data(**kwargs)
name = context.get('username', None)
limit = 64
if name:
user = other_user = get_object_or_404(User, username=name)
else:
user = other_user = self.request.user
if self.request.user == other_user:
context['is_me'] = True
context['is_friend'] = False
elif self.request.user.is_authenticated():
context['is_friend'] = Friendship.objects.are_friends(self.request.user, other_user)
context['is_following'] = Following.objects.is_following(self.request.user, other_user)
context['other_friends'] = Friendship.objects.friends_for_user(other_user)
context['other_user'] = other_user
tweets = [
TimeLineItem(item, item.sent, item.sender, "timeline/_tweet.html")
for item in Tweet.objects.all().filter(sender_id=user.id, sender_type__model="user").order_by("-sent")[:limit]
]
context['latest_blogs'] = Post.objects.all().filter(status=2, author=user).order_by("-publish")[:limit]
posts = [
TimeLineItem(item, item.updated_at, item.author, "timeline/_post.html")
for item in context['latest_blogs']
]
image_filter = Q(is_public=True, member=user)
if self.request.user.is_authenticated():
image_filter = image_filter | Q(member=user, member__in=friend_set_for(self.request.user))
context['latest_photos'] = Image.objects.all().filter(image_filter).order_by("-date_added")[:limit]
images = [
TimeLineItem(item, item.date_added, item.member, "timeline/_photo.html")
for item in context['latest_photos']
]
context['latest_tracks'] = Track.objects.all().filter(user=user).order_by("-created_at")[:limit]
tracks = [
TimeLineItem(item, item.updated_at, item.user, "timeline/_track.html")
for item in context['latest_tracks']
]
comments = [
TimeLineItem(item, item.date_submitted, item.user, "timeline/_comment.html")
for item in ThreadedComment.objects.all().filter(user=user).order_by("-date_submitted")[:limit]
]
items = merge(tweets, images, posts, tracks, comments, field="date")
for index, item in enumerate(items):
item.index = index + 1
context['timelineitems'] = group_comments(items)[:limit]
context['prefix_sender'] = True
return context
class UserHomePageView(TemplateView):
template_name = "timeline/homepage/user.html"
def get_context_data(self, **kwargs):
context = super(UserHomePageView, self).get_context_data(**kwargs)
other_friends = None
username = name = context.get('username', None)
if name:
user = other_user = get_object_or_404(User, username=name)
else:
user = other_user = self.request.user
if self.request.user == other_user:
context['is_me'] = True
is_friend = False
elif self.request.user.is_authenticated():
is_friend = context['is_friend'] = Friendship.objects.are_friends(self.request.user, other_user)
context['is_following'] = Following.objects.is_following(self.request.user, other_user)
else:
is_friend = False
context['other_friends'] = Friendship.objects.friends_for_user(other_user)
context['other_user'] = other_user
tweets = [
TimeLineItem(item, item.sent, item.sender, "timeline/_tweet.html")
for item in Tweet.objects.all().filter(sender_id=user.id, sender_type__model="user").order_by("-sent")[:32]
]
context['latest_blogs'] = Post.objects.all().filter(status=2, author=user).order_by("-publish")[:32]
posts = [
TimeLineItem(item, item.updated_at, item.author, "timeline/_post.html")
for item in context['latest_blogs']
]
image_filter = Q(is_public=True, member=user)
if self.request.user.is_authenticated():
image_filter = image_filter | Q(member=user, member__in=friend_set_for(self.request.user))
context['latest_photos'] = Image.objects.all().filter(image_filter).order_by("-date_added")[:32]
images = [
TimeLineItem(item, item.date_added, item.member, "timeline/_photo.html")
for item in context['latest_photos']
]
context['latest_tracks'] = Track.objects.all().filter(user=user).order_by("-created_at")[:32]
tracks = [
TimeLineItem(item, item.updated_at, item.user, "timeline/_track.html")
for item in context['latest_tracks']
]
comments = [
TimeLineItem(item, item.date_submitted, item.user, "timeline/_comment.html")
for item in ThreadedComment.objects.all().filter(user=user).order_by("-date_submitted")[:32]
]
items = merge(tweets, images, posts, tracks, comments, field="date")[:16]
for index, item in enumerate(items):
item.index = index + 1
context['timelineitems'] = group_comments(items)
context['prefix_sender'] = True
invite_form = None
if is_friend:
previous_invitations_to = None
previous_invitations_from = None
if self.request.method == "POST":
if self.request.POST.get("action") == "remove": # @@@ perhaps the form should just post to friends and be redirected here
Friendship.objects.remove(self.request.user, other_user)
messages.add_message(self.request, messages.SUCCESS,
ugettext("You have removed %(from_user)s from friends") % {
"from_user": other_user
}
)
is_friend = False
invite_form = InviteFriendForm(self.request.user, {
"to_user": username,
"message": ugettext("Let's be friends!"),
})
else:
if self.request.user.is_authenticated() and self.request.method == "POST":
pass
else:
invite_form = InviteFriendForm(self.request.user, {
"to_user": username,
"message": ugettext("Let's be friends!"),
})
previous_invitations_to = None
previous_invitations_from = None
context['invite_form'] = invite_form
context['previous_invitations_to'] = previous_invitations_to
context['previous_invitations_from'] = previous_invitations_from
context['other_friends'] = other_friends
return context
def post(self, *args, **kw):
if self.request.POST.get("action") == "invite":
username = self.request.POST.get("to_user")
other_user = get_object_or_404(User, username=username)
invite_form = InviteFriendForm(self.request.user, self.request.POST)
if invite_form.is_valid():
invite_form.save()
messages.success(self.request, _("Friendship requested with %(username)s") % {
'username': invite_form.cleaned_data['to_user']
})
elif self.request.POST.get("action") == "remove":
username = kw['username']
other_user = get_object_or_404(User, username=username)
Friendship.objects.remove(self.request.user, other_user)
messages.add_message(self.request, messages.SUCCESS,
ugettext("You have removed %(from_user)s from friends") % {
"from_user": other_user
}
)
else:
username = kw['username']
other_user = get_object_or_404(User, username=username)
invite_form = InviteFriendForm(self.request.user, {
"to_user": username,
"message": ugettext("Let's be friends!"),
})
invitation_id = self.request.POST.get("invitation", None)
if self.request.POST.get("action") == "accept":
try:
invitation = FriendshipInvitation.objects.get(id=invitation_id)
if invitation.to_user == self.equest.user:
invitation.accept()
messages.add_message(self.request, messages.SUCCESS,
ugettext("You have accepted the friendship request from %(from_user)s") % {
"from_user": invitation.from_user
}
)
except FriendshipInvitation.DoesNotExist:
pass
elif self.request.POST.get("action") == "decline":
try:
invitation = FriendshipInvitation.objects.get(id=invitation_id)
if invitation.to_user == self.request.user:
invitation.decline()
messages.add_message(self.request, messages.SUCCESS,
ugettext("You have declined the friendship request from %(from_user)s") % {
"from_user": invitation.from_user
}
)
except FriendshipInvitation.DoesNotExist:
pass
return HttpResponseRedirect(reverse("profile_detail",
kwargs={"username": username}))
class TagPageView(TemplateView):
template_name = "timeline/tag.html"
def get_context_data(self, **kwargs):
context = super(TagPageView, self).get_context_data(**kwargs)
tag_instance = get_object_or_404(Tag, name__iexact=context.get("tagname"))
context['tag'] = tag = tag_instance.name
# ago = datetime.datetime.now() - datetime.timedelta(30)
# limit = 64
tweets = [
TimeLineItem(item, item.sent, item.sender, "timeline/_tweet.html")
for item in TaggedItem.objects.get_by_model(Tweet, tag).order_by("-sent") # [:limit]
]
context['latest_blogs'] = TaggedItem.objects.get_by_model(Post, tag).filter(status=2).order_by("-publish") # [:limit]
posts = [
TimeLineItem(item, item.updated_at, item.author, "timeline/_post.html")
for item in context['latest_blogs']
]
image_filter = Q(is_public=True)
if self.request.user.is_authenticated():
image_filter = image_filter | Q(member=self.request.user) | Q(member__in=friend_set_for(self.request.user))
context['latest_photos'] = TaggedItem.objects.get_by_model(Image, tag).filter(image_filter).order_by("-date_added") # [:limit]
images = [
TimeLineItem(item, item.date_added, item.member, "timeline/_photo.html")
for item in context['latest_photos']
]
context['latest_tracks'] = TaggedItem.objects.get_by_model(Track, tag).order_by("-created_at")
tracks = [
TimeLineItem(item, item.created_at, item.user, "timeline/_track.html")
for item in context['latest_tracks']
]
comments = [
TimeLineItem(item, item.date_submitted, item.user, "timeline/_comment.html")
for item in TaggedItem.objects.get_by_model(ThreadedComment, tag).order_by("-date_submitted")
]
# no tag for comment yet. so : no comment :)
# Tag.objects.get_for_object(self.obj.resolve(context))
items = merge(tweets, images, posts, tracks, comments, field="date")
for index, item in enumerate(items):
item.index = index + 1
context['timelineitems'] = group_comments(items)
context['prefix_sender'] = True
return context
class TagHomePageView(TemplateView):
template_name = "timeline/homepage/tag.html"
def get_context_data(self, **kwargs):
context = super(TagHomePageView, self).get_context_data(**kwargs)
tag_instance = get_object_or_404(Tag, name__iexact=context.get("tagname"))
context['tag'] = tag = tag_instance.name
# ago = datetime.datetime.now() - datetime.timedelta(30)
tweets = [
TimeLineItem(item, item.sent, item.sender, "timeline/_tweet.html")
for item in TaggedItem.objects.get_by_model(Tweet, tag).order_by("-sent")[:16]
]
context['latest_blogs'] = TaggedItem.objects.get_by_model(Post, tag).filter(status=2).order_by("-publish")[:10]
posts = [
TimeLineItem(item, item.publish, item.author, "timeline/_post.html")
for item in context['latest_blogs']
]
image_filter = Q(is_public=True)
if self.request.user.is_authenticated():
image_filter = image_filter | Q(member=self.request.user) | Q(member__in=friend_set_for(self.request.user))
context['latest_photos'] = TaggedItem.objects.get_by_model(Image, tag).filter(image_filter).order_by("-date_added")[:16]
images = [
TimeLineItem(item, item.date_added, item.member, "timeline/_photo.html")
for item in context['latest_photos']
]
context['latest_tracks'] = TaggedItem.objects.get_by_model(Track, tag).order_by("-created_at")
tracks = [
TimeLineItem(item, item.updated_at, item.user, "timeline/_track.html")
for item in context['latest_tracks']
]
comments = [
TimeLineItem(item, item.date_submitted, item.user, "timeline/_comment.html")
for item in TaggedItem.objects.get_by_model(ThreadedComment, tag).order_by("-date_submitted")
]
# no tag for comment yet. so : no comment :)
# Tag.objects.get_for_object(self.obj.resolve(context))
items = merge(tweets, images, posts, tracks, comments, field="date")[:16]
for index, item in enumerate(items):
item.index = index + 1
context['timelineitems'] = group_comments(items)
context['prefix_sender'] = True
return context
# old stuff extracted from the main urls.py file, to run the 5 column home page
class LegacyHomePageView(TemplateView):
template_name = "timeline/homepage/legacy.html"
def get_context_data(self, **kwargs):
context = super(LegacyHomePageView, self).get_context_data(**kwargs)
context['latest_tweets'] = lambda: Tweet.objects.all().order_by(
"-sent")[:12]
context['latest_blogs'] = lambda: Post.objects.filter(
status=2).order_by("-publish")[:10]
image_filter = Q(is_public=True)
if self.request.user.is_authenticated():
image_filter = image_filter | Q(member=self.request.user) | Q(member__in=friend_set_for(self.request.user))
context['latest_photos'] = lambda: Image.objects.all().filter(image_filter).order_by(
"-date_added")[:18]
context['latest_tracks'] = lambda: Track.objects.all().order_by(
"-created_at")[:6]
context['prefix_sender'] = True
return context
timeline = TimeLineView.as_view()
home = HomePageView.as_view()
legacy = LegacyHomePageView.as_view()
friends = login_required(FriendsPageView.as_view())
following = login_required(FollowingPageView.as_view())
user_timeline = UserPageView.as_view()
user_home = UserHomePageView.as_view()
tag_timeline = TagPageView.as_view()
tag_home = TagHomePageView.as_view()
| {
"content_hash": "c0a12224832d4c0d59466c17e9fe7b1c",
"timestamp": "",
"source": "github",
"line_count": 633,
"max_line_length": 198,
"avg_line_length": 41.467614533965246,
"alnum_prop": 0.5926321002704865,
"repo_name": "amarandon/smeuhsocial",
"id": "fbd679154ae25e1244dbf6e091a3f54b14f1e701",
"size": "26249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/timeline/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "72383"
},
{
"name": "HTML",
"bytes": "205774"
},
{
"name": "JavaScript",
"bytes": "18844"
},
{
"name": "Makefile",
"bytes": "781"
},
{
"name": "Python",
"bytes": "604547"
}
],
"symlink_target": ""
} |
"""Tests of grpc.channel_ready_future."""
import threading
import unittest
import logging
import grpc
from tests.unit.framework.common import test_constants
from tests.unit import thread_pool
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._value = None
def accept_value(self, value):
with self._condition:
self._value = value
self._condition.notify_all()
def block_until_called(self):
with self._condition:
while self._value is None:
self._condition.wait()
return self._value
class ChannelReadyFutureTest(unittest.TestCase):
def test_lonely_channel_connectivity(self):
channel = grpc.insecure_channel('localhost:12345')
callback = _Callback()
ready_future = grpc.channel_ready_future(channel)
ready_future.add_done_callback(callback.accept_value)
with self.assertRaises(grpc.FutureTimeoutError):
ready_future.result(timeout=test_constants.SHORT_TIMEOUT)
self.assertFalse(ready_future.cancelled())
self.assertFalse(ready_future.done())
self.assertTrue(ready_future.running())
ready_future.cancel()
value_passed_to_callback = callback.block_until_called()
self.assertIs(ready_future, value_passed_to_callback)
self.assertTrue(ready_future.cancelled())
self.assertTrue(ready_future.done())
self.assertFalse(ready_future.running())
channel.close()
def test_immediately_connectable_channel_connectivity(self):
recording_thread_pool = thread_pool.RecordingThreadPool(
max_workers=None)
server = grpc.server(
recording_thread_pool, options=(('grpc.so_reuseport', 0),))
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
callback = _Callback()
ready_future = grpc.channel_ready_future(channel)
ready_future.add_done_callback(callback.accept_value)
self.assertIsNone(
ready_future.result(timeout=test_constants.LONG_TIMEOUT))
value_passed_to_callback = callback.block_until_called()
self.assertIs(ready_future, value_passed_to_callback)
self.assertFalse(ready_future.cancelled())
self.assertTrue(ready_future.done())
self.assertFalse(ready_future.running())
# Cancellation after maturity has no effect.
ready_future.cancel()
self.assertFalse(ready_future.cancelled())
self.assertTrue(ready_future.done())
self.assertFalse(ready_future.running())
self.assertFalse(recording_thread_pool.was_used())
channel.close()
server.stop(None)
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)
| {
"content_hash": "16831e9f68b32a3289d6e9470e7a1d7c",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 71,
"avg_line_length": 34.44047619047619,
"alnum_prop": 0.6543380573798825,
"repo_name": "sreecha/grpc",
"id": "cda157d5c5692e18275f4fa1231d0f59f7ad3359",
"size": "3470",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "34038"
},
{
"name": "C",
"bytes": "2461432"
},
{
"name": "C#",
"bytes": "2017000"
},
{
"name": "C++",
"bytes": "31371388"
},
{
"name": "CMake",
"bytes": "653774"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Dockerfile",
"bytes": "156890"
},
{
"name": "Go",
"bytes": "34791"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "6907"
},
{
"name": "JavaScript",
"bytes": "56944"
},
{
"name": "M4",
"bytes": "47783"
},
{
"name": "Makefile",
"bytes": "1005023"
},
{
"name": "Mako",
"bytes": "6211"
},
{
"name": "Objective-C",
"bytes": "561529"
},
{
"name": "Objective-C++",
"bytes": "50927"
},
{
"name": "PHP",
"bytes": "474349"
},
{
"name": "PowerShell",
"bytes": "621"
},
{
"name": "Python",
"bytes": "2838169"
},
{
"name": "Ruby",
"bytes": "1007743"
},
{
"name": "Shell",
"bytes": "472679"
},
{
"name": "Swift",
"bytes": "3516"
},
{
"name": "TSQL",
"bytes": "4901"
},
{
"name": "XSLT",
"bytes": "9673"
}
],
"symlink_target": ""
} |
from pandac.PandaModules import URLSpec
class InstalledHostData:
""" A list of instances of this class is returned by
AppRunner.scanInstalledPackages(). Each of these corresponds to a
particular host that has provided packages that have been
installed on the local client. """
def __init__(self, host, dirnode):
self.host = host
self.pathname = dirnode.pathname
self.totalSize = dirnode.getTotalSize()
self.packages = []
if self.host:
self.hostUrl = self.host.hostUrl
self.descriptiveName = self.host.descriptiveName
if not self.descriptiveName:
self.descriptiveName = URLSpec(self.hostUrl).getServer()
else:
self.hostUrl = 'unknown'
self.descriptiveName = 'unknown'
| {
"content_hash": "35f2059a58ab531dda683297cc48e2dd",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 72,
"avg_line_length": 37.22727272727273,
"alnum_prop": 0.6422466422466423,
"repo_name": "toontownfunserver/Panda3D-1.9.0",
"id": "470c300da32627924269053828883bf7804abf4a",
"size": "819",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "direct/p3d/InstalledHostData.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1186"
},
{
"name": "C",
"bytes": "1824481"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5471478"
},
{
"name": "Emacs Lisp",
"bytes": "147093"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "MAXScript",
"bytes": "1745"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009441"
},
{
"name": "Objective-C",
"bytes": "15934"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30052"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "17733821"
},
{
"name": "Shell",
"bytes": "12056"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
def format_track_data(track):
try:
if track:
artist = ', '.join(track.artists)
title = track.name
return '%u - %u' % (artist, title)
else:
return '<Nothing playing>'
except:
return '<(Exception in format_track_data)>'
def format_play_time(msecs=0):
try:
if not msecs or msecs == 0xFFFFFFFF:
strTime = '<n/a>'
else:
secs = msecs / 1000
strTime = '%dm%02ds' % (secs // 60, secs % 60)
return strTime
except:
return '<(Exception in format_play_time)>'
| {
"content_hash": "a29b6b1160854b8ae0b671044e46c2f3",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 58,
"avg_line_length": 25.92,
"alnum_prop": 0.5169753086419753,
"repo_name": "ismailof/mopidy-btsource",
"id": "a9e1717d7fc3e4d26dd3f334bf39d721faa2800c",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mopidy_btsource/helper.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "27546"
}
],
"symlink_target": ""
} |
from model.contact import Contact
def test_add_new_contact(app, db, json_contact):
contact = json_contact
old_contacts_list = db.get_contacts_list()
app.contact.create(contact)
# assert len(old_contacts_list) + 1 == app.contact.count()
new_contacts_list = db.get_contacts_list()
old_contacts_list.append(contact)
assert sorted(old_contacts_list, key=Contact.id_or_max) == sorted(new_contacts_list, key=Contact.id_or_max)
| {
"content_hash": "c818b589632edcd28e55c11ef40fdb5e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 111,
"avg_line_length": 41,
"alnum_prop": 0.7095343680709535,
"repo_name": "tkapriyan/python_training",
"id": "932f892a069b38e5d66f243cbad555c85876cf11",
"size": "475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_addnewcontact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "29309"
}
],
"symlink_target": ""
} |
"""Provide the CGI graphing interface to the sniffer data."""
import cgi
import os
GRAPHS = '/var/www/graphs/graph-data'
print 'Content-Type: text/html\n'
def IsValidImage(imgfname):
if '/' in imgfname or not os.path.exists(os.path.join(GRAPHS, imgfname)):
return False
return True
def GetMatchingImg(files, must_include, closest=None, next_img=False,
exact=False):
possible_match = []
for fname in files:
found_all = True
for must in must_include:
if must not in fname:
found_all = False
break
if found_all:
if not closest:
return fname
possible_match.append(fname)
if len(possible_match) == 1:
return possible_match[0]
if not possible_match:
return None
if closest not in possible_match:
possible_match.append(closest)
possible_match.sort()
return_next = False
for index, fname in enumerate(possible_match):
if return_next:
return fname
if fname == closest:
if next_img:
# return the next one
return_next = True
elif index == 0:
if exact:
return None
return_next = True
else:
return possible_match[index-1]
if return_next and exact:
return None
return possible_match[-2]
def GetDefaultImg(files):
files.sort()
files.reverse()
fname = GetMatchingImg(files, ('-daily-', '-all-', 'pps-stacked'))
if fname:
return fname
return files[-1]
def MakeLink(fname, title, is_link=True):
if is_link and fname is not None:
return '<a href="/graphs/?img=%s">%s</a>' % (fname, title)
return title
def PrintHeader(files, img):
links = []
img_group = img.split('-')
matching = img_group[:-1]
img_file = GetMatchingImg(files, matching + ['-log'], closest=img)
links.append(MakeLink(img_file, 'Logview', is_link=bool('-log' not in img)))
img_file = GetMatchingImg(files, matching + ['-stacked'], closest=img)
links.append(
MakeLink(img_file, 'Stacked', is_link=bool('-stacked' not in img)))
print '<font size=-2>[ %s] </font> ' % (' | '.join(links))
links = []
matching = img_group[:-2] + [(img_group[-1])]
img_file = GetMatchingImg(files, matching + ['-pps'], closest=img)
links.append(MakeLink(img_file, 'Packets', is_link=bool('-pps' not in img)))
img_file = GetMatchingImg(files, matching + ['-bps'], closest=img)
links.append(MakeLink(img_file, 'Bits', is_link=bool('-bps' not in img)))
print '<font size=-2>[ %s] </font> ' % (' | '.join(links))
links = []
matching = img_group[3:]
img_file = GetMatchingImg(files, matching + [('-daily')], closest=img)
links.append(
MakeLink(img_file, 'Daily',
is_link=bool('-daily' not in img and img_file != img)))
img_file = GetMatchingImg(files, matching + [('-weekly')], closest=img)
links.append(
MakeLink(img_file, 'Weekly',
is_link=bool('-weekly' not in img and img_file != img)))
img_file = GetMatchingImg(files, matching + [('-monthly')], closest=img)
links.append(
MakeLink(img_file, 'Monthly',
is_link=bool('-monthly' not in img and img_file != img)))
print '<font size=-2>[ %s ]</font> ' % (' | '.join(links))
links = []
matching = [img_group[0], img_group[1], img_group[2],
img_group[-2], img_group[-1]]
img_file = GetMatchingImg(files, matching + [('-all-')], closest=img)
links.append(MakeLink(img_file, 'All', is_link=bool('-all-' not in img)))
img_file = GetMatchingImg(files, matching + [('-1.1.1.0-')], closest=img)
links.append(
MakeLink(img_file, '1.1.1.0',
is_link=bool('-1.1.1.0-' not in img and img_file != img)))
img_file = GetMatchingImg(files, matching + [('-1.2.3.0-')], closest=img)
links.append(
MakeLink(img_file, '1.2.3.0',
is_link=bool('-1.2.3.0-' not in img and img_file != img)))
img_file = GetMatchingImg(files, matching + [('-1.0.0.0-')], closest=img)
links.append(
MakeLink(img_file, '1.0.0.0',
is_link=bool('-1.0.0.0-' not in img and img_file != img)))
print '<font size=-2>[ %s ]</font> ' % (' | '.join(links))
links = []
matching = img_group[2:]
img_file = GetMatchingImg(files, matching, closest=img, next_img=False,
exact=True)
links.append(MakeLink(img_file, '<< Prev',
is_link=bool(img_file != img)))
img_file = GetMatchingImg(files, matching, closest=img,
next_img=True, exact=True)
links.append(MakeLink(img_file, 'Next >>',
is_link=bool(img_file != img)))
print '<font size=-2>[ %s ]</font>' % (' | '.join(links))
links = []
matching = img_group[2:]
img_file = GetMatchingImg(files, matching, closest='onenet-99999999',
next_img=False)
links.append(MakeLink(img_file, 'Latest'))
print '<font size=-2>[ %s ]</font>' % (' | '.join(links))
links = []
matching = img_group[3:]
img_file = GetMatchingImg(files, matching + [('-hourly')], next_img=False)
links.append(MakeLink(img_file, 'Latest (hourly)'))
print '<font size=-2>[ %s ]</font>' % (' | '.join(links))
links = []
print '<br>'
def PrintImageLink(img):
ua = os.environ.get('HTTP_USER_AGENT', '')
if ('Chrome' in ua and
('MOBILE' not in ua.upper() and 'ANDROID' not in ua.upper())):
print ('<script>function runLoad() { img.setAttribute("src",'
'"/graphs/graph-data/%s"); } </script>' % img)
else:
print ('<script>function runLoad() { }</script> <img src="'
'/graphs/graph-data/%s">' % img)
def main():
form = cgi.FieldStorage()
files = os.listdir(GRAPHS)
fh = open('/var/www/graphs/graph.head.html')
print fh.read()
fh.close()
img = form.getfirst('img')
if not img or not IsValidImage(img):
img = GetDefaultImg(files)
files.sort()
PrintHeader(files, img)
PrintImageLink(img)
fh = open('/var/www/graphs/graph.tail.html')
print fh.read()
fh.close()
try:
main()
except KeyboardInterrupt:
pass
| {
"content_hash": "62a6d813e9963f7b00ecd69235d94c75",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 78,
"avg_line_length": 32.47089947089947,
"alnum_prop": 0.5994785725924718,
"repo_name": "steve-goog/apcas",
"id": "a10fdf87a2e90dc470f2d3e3eb943b933bb09144",
"size": "6752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graph_cgi.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12255"
},
{
"name": "HTML",
"bytes": "4118"
},
{
"name": "Python",
"bytes": "53235"
}
],
"symlink_target": ""
} |
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class DomainConfigurationTestJSON(base.BaseIdentityV3AdminTest):
custom_config = {
"identity": {
"driver": "ldap"
},
"ldap": {
"url": "ldap://myldap.com:389/",
"user_tree_dn": "ou=Users,dc=my_new_root,dc=org"
}
}
@classmethod
def setup_clients(cls):
super(DomainConfigurationTestJSON, cls).setup_clients()
cls.client = cls.domain_config_client
@classmethod
def resource_setup(cls):
super(DomainConfigurationTestJSON, cls).resource_setup()
cls.group = cls.groups_client.create_group(
name=data_utils.rand_name('group'),
description=data_utils.rand_name('group-desc'))['group']
@classmethod
def resource_cleanup(cls):
cls.groups_client.delete_group(cls.group['id'])
super(DomainConfigurationTestJSON, cls).resource_cleanup()
def _create_domain_and_config(self, config):
domain = self.setup_test_domain()
config = self.client.create_domain_config(domain['id'], **config)[
'config']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.client.delete_domain_config, domain['id'])
return domain, config
@decorators.idempotent_id('11a02bf0-6f94-4380-b3b0-c8dc18fc0d22')
def test_show_default_group_config_and_options(self):
# The API supports only the identity and ldap groups. For the ldap
# group, a valid value is url or user_tree_dn. For the identity group,
# a valid value is driver.
# Check that the default config has the identity and ldap groups.
config = self.client.show_default_config_settings()['config']
self.assertIsInstance(config, dict)
self.assertIn('identity', config)
self.assertIn('ldap', config)
# Check that the identity group is correct.
identity_config = self.client.show_default_group_config('identity')[
'config']
self.assertIsInstance(identity_config, dict)
self.assertIn('identity', identity_config)
self.assertIn('driver', identity_config['identity'])
self.assertIn('list_limit', identity_config['identity'])
# Show each option for the default domain and identity group.
for config_opt_name in ['driver', 'list_limit']:
retrieved_config_opt = self.client.show_default_group_option(
'identity', config_opt_name)['config']
self.assertIn(config_opt_name, retrieved_config_opt)
# Check that the ldap group is correct.
ldap_config = self.client.show_default_group_config('ldap')['config']
self.assertIsInstance(ldap_config, dict)
self.assertIn('ldap', ldap_config)
# Several valid options exist for ldap group.
valid_options = ldap_config['ldap'].keys()
# Show each option for the default domain and ldap group.
for config_opt_name in valid_options:
retrieved_config_opt = self.client.show_default_group_option(
'ldap', config_opt_name)['config']
self.assertIn(config_opt_name, retrieved_config_opt)
@decorators.idempotent_id('9e3ff13c-f597-4f01-9377-d6c06c2a1477')
def test_create_domain_config_and_show_config_groups_and_options(self):
domain, created_config = self._create_domain_and_config(
self.custom_config)
# Check that the entire configuration is correct.
self.assertEqual(self.custom_config, created_config)
# Check that each configuration group is correct.
for group_name in self.custom_config.keys():
group_cfg = self.client.show_domain_group_config(
domain['id'], group_name)['config']
self.assertIn(group_name, group_cfg)
self.assertEqual(self.custom_config[group_name],
group_cfg[group_name])
# Check that each configuration option is correct.
for opt_name in self.custom_config[group_name].keys():
group_opt = self.client.show_domain_group_option_config(
domain['id'], group_name, opt_name)['config']
self.assertIn(opt_name, group_opt)
self.assertEqual(self.custom_config[group_name][opt_name],
group_opt[opt_name])
@decorators.idempotent_id('7161023e-5dd0-4612-9da0-1bac6ac30b63')
def test_create_update_and_delete_domain_config(self):
domain, created_config = self._create_domain_and_config(
self.custom_config)
new_config = created_config
new_config['ldap']['url'] = data_utils.rand_url()
# Check that the altered configuration is reflected in updated_config.
updated_config = self.client.update_domain_config(
domain['id'], **new_config)['config']
self.assertEqual(new_config, updated_config)
# Check that showing the domain config shows the altered configuration.
retrieved_config = self.client.show_domain_config(domain['id'])[
'config']
self.assertEqual(new_config, retrieved_config)
# Check that deleting a configuration works.
self.client.delete_domain_config(domain['id'])
self.assertRaises(lib_exc.NotFound, self.client.show_domain_config,
domain['id'])
@decorators.idempotent_id('c7510fa2-6661-4170-9c6b-4783a80651e9')
def test_create_update_and_delete_domain_config_groups_and_opts(self):
domain, _ = self._create_domain_and_config(self.custom_config)
# Check that updating configuration groups work.
new_driver = data_utils.rand_name('driver')
new_limit = data_utils.rand_int_id(0, 100)
new_group_config = {'identity': {'driver': new_driver,
'list_limit': new_limit}}
updated_config = self.client.update_domain_group_config(
domain['id'], 'identity', **new_group_config)['config']
self.assertEqual(new_driver, updated_config['identity']['driver'])
self.assertEqual(new_limit, updated_config['identity']['list_limit'])
# Check that updating individual configuration group options work.
new_driver = data_utils.rand_name('driver')
updated_config = self.client.update_domain_group_option_config(
domain['id'], 'identity', 'driver', driver=new_driver)['config']
self.assertEqual(new_driver, updated_config['identity']['driver'])
# Check that deleting individual configuration group options work.
self.client.delete_domain_group_option_config(
domain['id'], 'identity', 'driver')
self.assertRaises(lib_exc.NotFound,
self.client.show_domain_group_option_config,
domain['id'], 'identity', 'driver')
# Check that deleting configuration groups work.
self.client.delete_domain_group_config(domain['id'], 'identity')
self.assertRaises(lib_exc.NotFound,
self.client.show_domain_group_config,
domain['id'], 'identity')
| {
"content_hash": "3d01fbd012489a5922ac28844c855589",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 79,
"avg_line_length": 43.857988165680474,
"alnum_prop": 0.6314085267134377,
"repo_name": "Juniper/tempest",
"id": "f7316970b0437dcd982b168d5fcd16d064c8cb16",
"size": "8044",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/api/identity/admin/v3/test_domain_configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4194970"
},
{
"name": "Shell",
"bytes": "19343"
}
],
"symlink_target": ""
} |
import re
from optparse import OptionParser
from ..old_extractors import features
from ..feature_extractors import tokenizer
from ..util import file_handling as fh, defines
def main():
# Handle input options and arguments
usage = "%prog"
parser = OptionParser(usage=usage)
#parser.add_option('-d', dest='dataset', default='',
# help='Dataset to process; (if not specified, all files in raw data directory will be used)')
parser.add_option('-n', dest='ngrams', default=1,
help='n for ngrams; default=%default')
(options, args) = parser.parse_args()
#if options.dataset != '':
# input_dir = defines.data_raw_labels_dir
# filename = fh.make_filename(input_dir, options.dataset, 'csv')
# files = [filename]
#else:
input_dir = defines.data_raw_labels_dir
files = fh.ls(input_dir, '*.csv')
n = int(options.ngrams)
print "Extracting ngram tokens:"
data = {}
for f in files:
print f
extract_ngram_tokens(f, n, data)
output_dir = fh.makedirs(defines.data_token_dir)
output_filename = fh.make_filename(output_dir, get_feature_name(n), 'json')
fh.write_to_json(data, output_filename)
# write default function definition
features.make_feature_definition(get_feature_name(n), get_prefix(n), filename=get_feature_name(n)+'_default',
min_doc_threshold=1, binarize=True, feature_type='tokens')
def get_feature_name(n):
return 'ngrams_' + str(n)
def get_prefix(n):
return '_n' + str(n) + '_'
def extract_ngram_tokens(input_filename, n, data, prefix='', add_paragraph_num=False):
Y = fh.read_csv(input_filename)
rids = Y.index
responses = fh.read_json(defines.data_raw_text_file)
if prefix == '':
prefix = get_prefix(n)
for rid in rids:
text = responses[rid].lower()
text = text.lstrip()
text = text.rstrip()
text = text.lstrip('/')
text = re.sub('<', '', text)
text = re.sub('>', '', text)
text = re.sub('-', ' - ', text)
text = re.sub('_', ' - ', text)
tokens = []
paragraphs = re.split('[/\\\\]', text)
paragraphs = [p for p in paragraphs if p != '']
count = 0
for p in paragraphs:
count += 1
sentences = tokenizer.split_sentences(p)
for s in sentences:
sent_tokens = tokenizer.make_ngrams(s, n)
sent_tokens = [t.rstrip('`"\'') if re.search('[a-z]', t) else t for t in sent_tokens]
sent_tokens = [t.lstrip('`"\'') if re.search('[a-z]', t) else t for t in sent_tokens]
if add_paragraph_num:
sent_tokens = [t + '_' + str(count) for t in sent_tokens]
tokens = tokens + sent_tokens
tokens = [prefix + t for t in tokens]
data[rid] = tokens
if __name__ == '__main__':
main()
| {
"content_hash": "cf4dea1a2f6f6d3107ed8e6e5d0d34c5",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 115,
"avg_line_length": 32.922222222222224,
"alnum_prop": 0.567667904151198,
"repo_name": "dallascard/guac",
"id": "4b2b0a3f9ae6a4049e9c041e86e93b3c3c70d28a",
"size": "2963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/old_extractors/extract_ngram_tokens.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "997183"
},
{
"name": "Shell",
"bytes": "7969"
}
],
"symlink_target": ""
} |
import csv
def unicode_csv_reader(filename, **kw):
for line in csv.reader(filename, **kw):
yield [unicode(c, 'utf-8') for c in line]
| {
"content_hash": "a3499123c161a8141670532644eec516",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 49,
"avg_line_length": 24.5,
"alnum_prop": 0.6326530612244898,
"repo_name": "openvenues/address_normalizer",
"id": "004c0f3ee655bb92aba7ad17c0b6f660be303dc4",
"size": "147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "address_normalizer/utils/unicode_csv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5835606"
},
{
"name": "Makefile",
"bytes": "206"
},
{
"name": "Python",
"bytes": "59154"
}
],
"symlink_target": ""
} |
"""Makes figure with gradient-weighted class-activation maps (Grad-CAM)."""
import os
import pickle
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot
from PIL import Image
from gewittergefahr.gg_utils import general_utils
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import monte_carlo
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import gradcam
from gewittergefahr.deep_learning import training_validation_io as trainval_io
from gewittergefahr.plotting import plotting_utils
from gewittergefahr.plotting import cam_plotting
from gewittergefahr.plotting import significance_plotting
from gewittergefahr.plotting import imagemagick_utils
from gewittergefahr.scripts import plot_input_examples as plot_examples
NONE_STRINGS = ['None', 'none']
RADAR_HEIGHTS_M_AGL = numpy.array([2000, 6000, 10000], dtype=int)
RADAR_FIELD_NAMES = [radar_utils.REFL_NAME, radar_utils.VORTICITY_NAME]
COLOUR_BAR_LENGTH = 0.25
PANEL_NAME_FONT_SIZE = 30
COLOUR_BAR_FONT_SIZE = 25
CONVERT_EXE_NAME = '/usr/bin/convert'
TITLE_FONT_SIZE = 150
TITLE_FONT_NAME = 'DejaVu-Sans-Bold'
FIGURE_RESOLUTION_DPI = 300
CONCAT_FIGURE_SIZE_PX = int(1e7)
GRADCAM_FILES_ARG_NAME = 'input_gradcam_file_names'
MC_FILES_ARG_NAME = 'input_monte_carlo_file_names'
COMPOSITE_NAMES_ARG_NAME = 'composite_names'
COLOUR_MAP_ARG_NAME = 'colour_map_name'
MIN_VALUES_ARG_NAME = 'min_colour_values'
MAX_VALUES_ARG_NAME = 'max_colour_values'
NUM_CONTOURS_ARG_NAME = 'num_contours'
SMOOTHING_RADIUS_ARG_NAME = 'smoothing_radius_grid_cells'
MAX_FDR_ARG_NAME = 'monte_carlo_max_fdr'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
GRADCAM_FILES_HELP_STRING = (
'List of Grad-CAM files (each will be read by `gradcam.read_file`).'
)
MC_FILES_HELP_STRING = (
'List of files with Monte Carlo significance (one per saliency file). Each'
' will be read by `_read_monte_carlo_test`. If you do not want to plot '
'significance for the [i]th composite, make the [i]th list element "None".'
)
COMPOSITE_NAMES_HELP_STRING = (
'List of composite names (one for each Grad-CAM file). This list must be '
'space-separated, but after reading the list, underscores within each item '
'will be replaced by spaces.'
)
COLOUR_MAP_HELP_STRING = (
'Name of colour map. Class activation for each predictor will be plotted '
'with the same colour map. For example, if name is "Greys", the colour map'
' used will be `pyplot.cm.Greys`. This argument supports only pyplot '
'colour maps.'
)
MIN_VALUES_HELP_STRING = (
'Minimum class activation in each colour scheme (one per file). Use '
'negative values to let these be determined automatically.'
)
MAX_VALUES_HELP_STRING = 'Same as `{0:s}` but for max values.'.format(
MIN_VALUES_ARG_NAME
)
NUM_CONTOURS_HELP_STRING = 'Number of contours for class activation.'
SMOOTHING_RADIUS_HELP_STRING = (
'e-folding radius for Gaussian smoother (num grid cells). If you do not '
'want to smooth CAMs, make this non-positive.'
)
MAX_FDR_HELP_STRING = (
'Max FDR (false-discovery rate) for field-based version of Monte Carlo '
'significance test. If you do not want to use field-based version, leave '
'this argument alone.'
)
OUTPUT_DIR_HELP_STRING = (
'Name of output directory (figures will be saved here).'
)
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + GRADCAM_FILES_ARG_NAME, type=str, nargs='+', required=True,
help=GRADCAM_FILES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + MC_FILES_ARG_NAME, type=str, nargs='+', required=True,
help=MC_FILES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + COMPOSITE_NAMES_ARG_NAME, type=str, nargs='+', required=True,
help=COMPOSITE_NAMES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + COLOUR_MAP_ARG_NAME, type=str, required=False, default='binary',
help=COLOUR_MAP_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + MIN_VALUES_ARG_NAME, type=float, nargs='+', required=True,
help=MIN_VALUES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + MAX_VALUES_ARG_NAME, type=float, nargs='+', required=True,
help=MAX_VALUES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + NUM_CONTOURS_ARG_NAME, type=int, required=False,
default=15, help=NUM_CONTOURS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + SMOOTHING_RADIUS_ARG_NAME, type=float, required=False,
default=1., help=SMOOTHING_RADIUS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + MAX_FDR_ARG_NAME, type=float, required=False, default=-1.,
help=MAX_FDR_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING
)
def _read_one_composite(gradcam_file_name, smoothing_radius_grid_cells,
monte_carlo_file_name, monte_carlo_max_fdr):
"""Reads class-activation map for one composite.
E = number of examples
M = number of rows in grid
N = number of columns in grid
H = number of heights in grid
F = number of radar fields
:param gradcam_file_name: Path to input file (will be read by
`gradcam.read_file`).
:param monte_carlo_file_name: Path to Monte Carlo file (will be read by
`_read_monte_carlo_file`).
:param smoothing_radius_grid_cells: Radius for Gaussian smoother, used only
for class-activation map.
:param monte_carlo_max_fdr: See documentation at top of file.
:return: mean_radar_matrix: E-by-M-by-N-by-H-by-F numpy array with mean
radar fields.
:return: mean_class_activn_matrix: E-by-M-by-N-by-H numpy array with mean
class-activation fields.
:return: significance_matrix: E-by-M-by-N-by-H numpy array of Boolean
flags.
:return: model_metadata_dict: Dictionary returned by
`cnn.read_model_metadata`.
"""
print('Reading CAMs from: "{0:s}"...'.format(gradcam_file_name))
gradcam_dict = gradcam.read_file(gradcam_file_name)[0]
mean_radar_matrix = numpy.expand_dims(
gradcam_dict[gradcam.MEAN_PREDICTOR_MATRICES_KEY][0], axis=0
)
mean_class_activn_matrix = numpy.expand_dims(
gradcam_dict[gradcam.MEAN_CAM_MATRICES_KEY][0], axis=0
)
if smoothing_radius_grid_cells is not None:
print((
'Smoothing class-activation maps with Gaussian filter (e-folding '
'radius of {0:.1f} grid cells)...'
).format(
smoothing_radius_grid_cells
))
mean_class_activn_matrix[0, ...] = general_utils.apply_gaussian_filter(
input_matrix=mean_class_activn_matrix[0, ...],
e_folding_radius_grid_cells=smoothing_radius_grid_cells
)
model_file_name = gradcam_dict[gradcam.MODEL_FILE_KEY]
model_metafile_name = cnn.find_metafile(model_file_name)
if monte_carlo_file_name is None:
significance_matrix = numpy.full(
mean_class_activn_matrix.shape, False, dtype=bool
)
else:
print('Reading Monte Carlo test from: "{0:s}"...'.format(
monte_carlo_file_name
))
this_file_handle = open(monte_carlo_file_name, 'rb')
monte_carlo_dict = pickle.load(this_file_handle)
this_file_handle.close()
p_value_matrix = monte_carlo_dict[monte_carlo.P_VALUE_MATRICES_KEY][0]
if monte_carlo_max_fdr is None:
significance_matrix = p_value_matrix <= 0.05
else:
significance_matrix = monte_carlo.find_sig_grid_points(
p_value_matrix=p_value_matrix,
max_false_discovery_rate=monte_carlo_max_fdr
)
significance_matrix = numpy.expand_dims(significance_matrix, axis=0)
print('Fraction of significant differences: {0:.4f}'.format(
numpy.mean(significance_matrix.astype(float))
))
print('Reading CNN metadata from: "{0:s}"...'.format(model_metafile_name))
model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
good_indices = numpy.array([
numpy.where(
training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] == h
)[0][0]
for h in RADAR_HEIGHTS_M_AGL
], dtype=int)
mean_radar_matrix = mean_radar_matrix[..., good_indices, :]
mean_class_activn_matrix = mean_class_activn_matrix[..., good_indices]
significance_matrix = significance_matrix[..., good_indices]
good_indices = numpy.array([
training_option_dict[trainval_io.RADAR_FIELDS_KEY].index(f)
for f in RADAR_FIELD_NAMES
], dtype=int)
mean_radar_matrix = mean_radar_matrix[..., good_indices]
training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] = RADAR_HEIGHTS_M_AGL
training_option_dict[trainval_io.RADAR_FIELDS_KEY] = RADAR_FIELD_NAMES
training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] = None
model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] = training_option_dict
return (
mean_radar_matrix, mean_class_activn_matrix, significance_matrix,
model_metadata_dict
)
def _overlay_text(
image_file_name, x_offset_from_center_px, y_offset_from_top_px,
text_string):
"""Overlays text on image.
:param image_file_name: Path to image file.
:param x_offset_from_center_px: Center-relative x-coordinate (pixels).
:param y_offset_from_top_px: Top-relative y-coordinate (pixels).
:param text_string: String to overlay.
:raises: ValueError: if ImageMagick command (which is ultimately a Unix
command) fails.
"""
# TODO(thunderhoser): Put this method somewhere more general.
command_string = (
'"{0:s}" "{1:s}" -gravity north -pointsize {2:d} -font "{3:s}" '
'-fill "rgb(0, 0, 0)" -annotate {4:+d}{5:+d} "{6:s}" "{1:s}"'
).format(
CONVERT_EXE_NAME, image_file_name, TITLE_FONT_SIZE, TITLE_FONT_NAME,
x_offset_from_center_px, y_offset_from_top_px, text_string
)
exit_code = os.system(command_string)
if exit_code == 0:
return
raise ValueError(imagemagick_utils.ERROR_STRING)
def _plot_one_composite(
gradcam_file_name, monte_carlo_file_name, composite_name_abbrev,
composite_name_verbose, colour_map_object, min_colour_value,
max_colour_value, num_contours, smoothing_radius_grid_cells,
monte_carlo_max_fdr, output_dir_name):
"""Plots class-activation map for one composite.
:param gradcam_file_name: Path to input file (will be read by
`gradcam.read_file`).
:param monte_carlo_file_name: Path to Monte Carlo file (will be read by
`_read_monte_carlo_file`).
:param composite_name_abbrev: Abbrev composite name (will be used in file
names).
:param composite_name_verbose: Verbose composite name (will be used in
figure title).
:param colour_map_object: See documentation at top of file.
:param min_colour_value: Minimum value in colour bar (may be NaN).
:param max_colour_value: Max value in colour bar (may be NaN).
:param num_contours: See documentation at top of file.
:param smoothing_radius_grid_cells: Same.
:param monte_carlo_max_fdr: Same.
:param output_dir_name: Name of output directory (figures will be saved
here).
:return: main_figure_file_name: Path to main image file created by this
method.
:return: min_colour_value: Same as input but cannot be None.
:return: max_colour_value: Same as input but cannot be None.
"""
(
mean_radar_matrix, mean_class_activn_matrix, significance_matrix,
model_metadata_dict
) = _read_one_composite(
gradcam_file_name=gradcam_file_name,
smoothing_radius_grid_cells=smoothing_radius_grid_cells,
monte_carlo_file_name=monte_carlo_file_name,
monte_carlo_max_fdr=monte_carlo_max_fdr
)
if numpy.isnan(min_colour_value) or numpy.isnan(max_colour_value):
min_colour_value_log10 = numpy.log10(
numpy.percentile(mean_class_activn_matrix, 1.)
)
max_colour_value_log10 = numpy.log10(
numpy.percentile(mean_class_activn_matrix, 99.)
)
min_colour_value_log10 = max([min_colour_value_log10, -2.])
max_colour_value_log10 = max([max_colour_value_log10, -1.])
min_colour_value_log10 = min([min_colour_value_log10, 1.])
max_colour_value_log10 = min([max_colour_value_log10, 2.])
min_colour_value = 10 ** min_colour_value_log10
max_colour_value = 10 ** max_colour_value_log10
else:
min_colour_value_log10 = numpy.log10(min_colour_value)
max_colour_value_log10 = numpy.log10(max_colour_value)
contour_interval_log10 = (
(max_colour_value_log10 - min_colour_value_log10) /
(num_contours - 1)
)
mean_activn_matrix_log10 = numpy.log10(mean_class_activn_matrix)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
field_names = training_option_dict[trainval_io.RADAR_FIELDS_KEY]
num_fields = mean_radar_matrix.shape[-1]
num_heights = mean_radar_matrix.shape[-2]
handle_dict = plot_examples.plot_one_example(
list_of_predictor_matrices=[mean_radar_matrix],
model_metadata_dict=model_metadata_dict, pmm_flag=True,
allow_whitespace=True, plot_panel_names=True,
panel_name_font_size=PANEL_NAME_FONT_SIZE,
add_titles=False, label_colour_bars=True,
colour_bar_length=COLOUR_BAR_LENGTH,
colour_bar_font_size=COLOUR_BAR_FONT_SIZE,
num_panel_rows=num_heights
)
figure_objects = handle_dict[plot_examples.RADAR_FIGURES_KEY]
axes_object_matrices = handle_dict[plot_examples.RADAR_AXES_KEY]
for k in range(num_fields):
cam_plotting.plot_many_2d_grids(
class_activation_matrix_3d=numpy.flip(
mean_activn_matrix_log10[0, ...], axis=0
),
axes_object_matrix=axes_object_matrices[k],
colour_map_object=colour_map_object,
min_contour_level=min_colour_value_log10,
max_contour_level=max_colour_value_log10,
contour_interval=contour_interval_log10
)
significance_plotting.plot_many_2d_grids_without_coords(
significance_matrix=numpy.flip(
significance_matrix[0, ...], axis=0
),
axes_object_matrix=axes_object_matrices[k]
)
panel_file_names = [None] * num_fields
for k in range(num_fields):
panel_file_names[k] = '{0:s}/{1:s}_{2:s}.jpg'.format(
output_dir_name, composite_name_abbrev,
field_names[k].replace('_', '-')
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[k]))
figure_objects[k].savefig(
panel_file_names[k], dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(figure_objects[k])
main_figure_file_name = '{0:s}/{1:s}_gradcam.jpg'.format(
output_dir_name, composite_name_abbrev
)
print('Concatenating panels to: "{0:s}"...'.format(main_figure_file_name))
imagemagick_utils.concatenate_images(
input_file_names=panel_file_names,
output_file_name=main_figure_file_name,
num_panel_rows=1, num_panel_columns=num_fields, border_width_pixels=50
)
imagemagick_utils.resize_image(
input_file_name=main_figure_file_name,
output_file_name=main_figure_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX
)
imagemagick_utils.trim_whitespace(
input_file_name=main_figure_file_name,
output_file_name=main_figure_file_name,
border_width_pixels=TITLE_FONT_SIZE + 25
)
_overlay_text(
image_file_name=main_figure_file_name,
x_offset_from_center_px=0, y_offset_from_top_px=0,
text_string=composite_name_verbose
)
imagemagick_utils.trim_whitespace(
input_file_name=main_figure_file_name,
output_file_name=main_figure_file_name,
border_width_pixels=10
)
return main_figure_file_name, min_colour_value, max_colour_value
def _add_colour_bar(figure_file_name, colour_map_object, min_colour_value,
max_colour_value, temporary_dir_name):
"""Adds colour bar to saved image file.
:param figure_file_name: Path to saved image file. Colour bar will be added
to this image.
:param colour_map_object: Colour scheme (instance of `matplotlib.pyplot.cm`
or similar).
:param min_colour_value: Minimum value in colour scheme.
:param max_colour_value: Max value in colour scheme.
:param temporary_dir_name: Name of temporary output directory.
"""
this_image_matrix = Image.open(figure_file_name)
figure_width_px, figure_height_px = this_image_matrix.size
figure_width_inches = float(figure_width_px) / FIGURE_RESOLUTION_DPI
figure_height_inches = float(figure_height_px) / FIGURE_RESOLUTION_DPI
extra_figure_object, extra_axes_object = pyplot.subplots(
1, 1, figsize=(figure_width_inches, figure_height_inches)
)
extra_axes_object.axis('off')
dummy_values = numpy.array([min_colour_value, max_colour_value])
colour_bar_object = plotting_utils.plot_linear_colour_bar(
axes_object_or_matrix=extra_axes_object, data_matrix=dummy_values,
colour_map_object=colour_map_object,
min_value=numpy.log10(min_colour_value),
max_value=numpy.log10(max_colour_value),
orientation_string='vertical', fraction_of_axis_length=1.25,
extend_min=False, extend_max=True, font_size=COLOUR_BAR_FONT_SIZE,
aspect_ratio=50.
)
tick_values = colour_bar_object.get_ticks()
tick_strings = [
'{0:.2f}'.format(10 ** v) for v in tick_values
]
for i in range(len(tick_strings)):
if '.' in tick_strings[i][:3]:
tick_strings[i] = tick_strings[i][:4]
else:
tick_strings[i] = tick_strings[i].split('.')[0]
colour_bar_object.set_ticks(tick_values)
colour_bar_object.set_ticklabels(tick_strings)
extra_file_name = '{0:s}/gradcam_colour-bar.jpg'.format(temporary_dir_name)
print('Saving colour bar to: "{0:s}"...'.format(extra_file_name))
extra_figure_object.savefig(
extra_file_name, dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(extra_figure_object)
print('Concatenating colour bar to: "{0:s}"...'.format(figure_file_name))
imagemagick_utils.concatenate_images(
input_file_names=[figure_file_name, extra_file_name],
output_file_name=figure_file_name,
num_panel_rows=1, num_panel_columns=2,
extra_args_string='-gravity Center'
)
os.remove(extra_file_name)
imagemagick_utils.trim_whitespace(
input_file_name=figure_file_name, output_file_name=figure_file_name
)
def _run(gradcam_file_names, monte_carlo_file_names, composite_names,
colour_map_name, min_colour_values, max_colour_values, num_contours,
smoothing_radius_grid_cells, monte_carlo_max_fdr, output_dir_name):
"""Makes figure with gradient-weighted class-activation maps (Grad-CAM).
This is effectively the main method.
:param gradcam_file_names: See documentation at top of file.
:param monte_carlo_file_names: Same.
:param composite_names: Same.
:param colour_map_name: Same.
:param min_colour_values: Same.
:param max_colour_values: Same.
:param num_contours: Same.
:param smoothing_radius_grid_cells: Same.
:param output_dir_name: Same.
"""
if smoothing_radius_grid_cells <= 0:
smoothing_radius_grid_cells = None
if monte_carlo_max_fdr <= 0:
monte_carlo_max_fdr = None
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name
)
colour_map_object = pyplot.cm.get_cmap(colour_map_name)
error_checking.assert_is_geq(num_contours, 10)
num_composites = len(gradcam_file_names)
expected_dim = numpy.array([num_composites], dtype=int)
error_checking.assert_is_numpy_array(
numpy.array(composite_names), exact_dimensions=expected_dim
)
error_checking.assert_is_numpy_array(
numpy.array(monte_carlo_file_names), exact_dimensions=expected_dim
)
monte_carlo_file_names = [
None if f in NONE_STRINGS else f for f in monte_carlo_file_names
]
nan_indices = numpy.where(numpy.logical_or(
max_colour_values < 0, min_colour_values < 0
))[0]
min_colour_values[nan_indices] = numpy.nan
max_colour_values[nan_indices] = numpy.nan
error_checking.assert_is_numpy_array(
min_colour_values, exact_dimensions=expected_dim
)
error_checking.assert_is_numpy_array(
max_colour_values, exact_dimensions=expected_dim
)
assert not numpy.any(max_colour_values <= min_colour_values)
composite_names_abbrev = [
n.replace('_', '-').lower() for n in composite_names
]
composite_names_verbose = [
'({0:s}) {1:s}'.format(
chr(ord('a') + i), composite_names[i].replace('_', ' ')
)
for i in range(num_composites)
]
panel_file_names = [None] * num_composites
for i in range(num_composites):
(
panel_file_names[i], min_colour_values[i], max_colour_values[i]
) = _plot_one_composite(
gradcam_file_name=gradcam_file_names[i],
monte_carlo_file_name=monte_carlo_file_names[i],
composite_name_abbrev=composite_names_abbrev[i],
composite_name_verbose=composite_names_verbose[i],
colour_map_object=colour_map_object,
min_colour_value=min_colour_values[i],
max_colour_value=max_colour_values[i],
num_contours=num_contours,
smoothing_radius_grid_cells=smoothing_radius_grid_cells,
monte_carlo_max_fdr=monte_carlo_max_fdr,
output_dir_name=output_dir_name
)
_add_colour_bar(
figure_file_name=panel_file_names[i],
colour_map_object=colour_map_object,
min_colour_value=min_colour_values[i],
max_colour_value=max_colour_values[i],
temporary_dir_name=output_dir_name
)
print('\n')
figure_file_name = '{0:s}/gradcam_concat.jpg'.format(output_dir_name)
print('Concatenating panels to: "{0:s}"...'.format(figure_file_name))
num_panel_rows = int(numpy.ceil(
numpy.sqrt(num_composites)
))
num_panel_columns = int(numpy.floor(
float(num_composites) / num_panel_rows
))
imagemagick_utils.concatenate_images(
input_file_names=panel_file_names,
output_file_name=figure_file_name, border_width_pixels=25,
num_panel_rows=num_panel_rows, num_panel_columns=num_panel_columns
)
imagemagick_utils.trim_whitespace(
input_file_name=figure_file_name, output_file_name=figure_file_name,
border_width_pixels=10
)
imagemagick_utils.resize_image(
input_file_name=figure_file_name, output_file_name=figure_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX
)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
gradcam_file_names=getattr(INPUT_ARG_OBJECT, GRADCAM_FILES_ARG_NAME),
monte_carlo_file_names=getattr(INPUT_ARG_OBJECT, MC_FILES_ARG_NAME),
composite_names=getattr(INPUT_ARG_OBJECT, COMPOSITE_NAMES_ARG_NAME),
colour_map_name=getattr(INPUT_ARG_OBJECT, COLOUR_MAP_ARG_NAME),
min_colour_values=numpy.array(
getattr(INPUT_ARG_OBJECT, MIN_VALUES_ARG_NAME), dtype=float
),
max_colour_values=numpy.array(
getattr(INPUT_ARG_OBJECT, MAX_VALUES_ARG_NAME), dtype=float
),
num_contours=getattr(INPUT_ARG_OBJECT, NUM_CONTOURS_ARG_NAME),
smoothing_radius_grid_cells=getattr(
INPUT_ARG_OBJECT, SMOOTHING_RADIUS_ARG_NAME
),
monte_carlo_max_fdr=getattr(INPUT_ARG_OBJECT, MAX_FDR_ARG_NAME),
output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)
)
| {
"content_hash": "e78b61d6efe8e3fd035561e8d2766ba3",
"timestamp": "",
"source": "github",
"line_count": 647,
"max_line_length": 80,
"avg_line_length": 37.67078825347759,
"alnum_prop": 0.6611414269888811,
"repo_name": "thunderhoser/GewitterGefahr",
"id": "3c23b3414c4ee06e52e8dbbb271fa0736661f288",
"size": "24373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gewittergefahr/interpretation_paper_2019/make_gradcam_figure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "31275"
},
{
"name": "Python",
"bytes": "5661041"
}
],
"symlink_target": ""
} |
"""
Django settings for vip project.
Merged with 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
from __future__ import absolute_import
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os import path, environ as env
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
GEOS_LIBRARY_PATH=env['VIP_DJANGO_GEOS_LIBRARY_PATH']
GDAL_LIBRARY_PATH=env['VIP_DJANGO_GDAL_LIBRARY_PATH']
#This should work in windows too?
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '20hiyp8-=0+oan+sa(r$xz#j83jr5*13*(j_(a)9q234cynf+&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env['VIP_DJANGO_DEBUG']=='1'
TEMPLATE_DEBUG = env['VIP_DJANGO_TEMPLATE_DEBUG']=='1'
ALLOWED_HOSTS = env['VIP_DJANGO_ALLOWED_HOSTS']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'rest_framework',
'django.contrib.gis',
'voxel_globe.meta',
'voxel_globe.world',
'voxel_globe.main',
'voxel_globe.tiepoint',
'voxel_globe.ingest',
'voxel_globe.task',
'voxel_globe.voxel_viewer',
'voxel_globe.order.visualsfm',
'voxel_globe.order.build_voxel_world',
'voxel_globe.order.error_point_cloud',
'voxel_globe.order.threshold_point_cloud',
'voxel_globe.generate_point_cloud',
'voxel_globe.arducopter',
'voxel_globe.angelfire',
'voxel_globe.clif',
'voxel_globe.no_metadata',
'voxel_globe.jpg_exif',
'voxel_globe.ingest.metadata',
'voxel_globe.ingest.payload',
'voxel_globe.visualsfm',
'voxel_globe.build_voxel_world',
'voxel_globe.tests',
'voxel_globe.quick',
'django.contrib.staticfiles',
) #Staticfiles MUST come last, or else it might skip some files
#at collectstatic deploy time!!!! This is one of the rare times
#order matters
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'voxel_globe.vip.middleware.RequireLoginMiddleware',
)
SECURE_BROWSER_XSS_FILTER=True
SECURE_CONTENT_TYPE_NOSNIFF=True
#SECURE_SSL_REDIRECT=True
ROOT_URLCONF = 'voxel_globe.vip.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'voxel_globe.vip.wsgi.application'
SERIALIZATION_MODULES = { 'geojson' : 'voxel_globe.serializers.geojson' }
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAuthenticated',),
#Eventually, 'DjangoModelPermissions' may be good?
# 'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',), I set this in the viewSet instead
# 'PAGINATE_BY': 10, Leave default as get all
'PAGINATE_BY_PARAM': 'page_size',
}
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geodjango',
'USER': env['VIP_POSTGRESQL_USER'],
'PASSWORD': '',
'HOST': '127.0.0.1',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
###STATICFILES_DIRS = [os.path.join(env['VIP_PYTHON_DIR'], 'lib', 'site-packages', '']
STATICFILES_DIRS = [env['VIP_DJANGO_STATIC_COMMON']]
STATIC_URL = '/'+env['VIP_DJANGO_STATIC_URL_PATH']+'/'
STATIC_ROOT = env['VIP_DJANGO_STATIC_ROOT']
MEDIA_ROOT = env['VIP_DJANGO_MEDIA_ROOT']
LOGIN_REQUIRED_URLS = (r'/(.*)$',)
LOGIN_REQUIRED_URLS_EXCEPTIONS = (
r'/login.html(.*)$',
r'/admin(.*)$', #Admin already does its own thing, leave it alone, even though I don't have to
r'/login(.*)$',
r'/logout(.*)$',
)
LOGIN_URL = '/login'
INGEST_TASKS = ['voxel_globe.arducopter.tasks',
'voxel_globe.jpg_exif.tasks',
'voxel_globe.no_metadata.tasks']
CELERYD_MAX_TASKS_PER_CHILD = 1
CELERYD_CONCURRENCY = env['VIP_NUMBER_CORES'] #default is #num of cores
CELERYD_LOG_COLOR = True
BROKER_URL = 'amqp://guest@localhost:5672//'
CELERY_RESULT_BACKEND = 'amqp://'
CELERY_TASK_SERIALIZER='json'
CELERY_ACCEPT_CONTENT=['json'] # Ignore other content
CELERY_RESULT_SERIALIZER='json'
CELERY_SEND_EVENTS=True
CELERY_DISABLE_RATE_LIMITS = True
CELERY_TRACK_STARTED = True
#Add every task in voxel_globe.quick.tasks to the route table
from celery.local import Proxy
import voxel_globe.quick.tasks as quick_tasks
CELERY_ROUTES = {}
for fun in [ x for x in dir(quick_tasks)
if isinstance(getattr(quick_tasks, x), Proxy)]:
CELERY_ROUTES['voxel_globe.quick.tasks.'+fun] = {'queue': 'vxl_quick'}
del Proxy, quick_tasks, fun | {
"content_hash": "7edc1d20c82d3220a415d317d002c6a8",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 114,
"avg_line_length": 30.434343434343436,
"alnum_prop": 0.6886823763690674,
"repo_name": "andyneff/voxel-globe",
"id": "edccd6ca5907592e777b15721441bfcd50bcab3e",
"size": "6026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voxel_globe/vip/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "21280"
},
{
"name": "Batchfile",
"bytes": "35781"
},
{
"name": "CSS",
"bytes": "1855"
},
{
"name": "HTML",
"bytes": "90597"
},
{
"name": "JavaScript",
"bytes": "131377"
},
{
"name": "Python",
"bytes": "302839"
},
{
"name": "Shell",
"bytes": "17009"
}
],
"symlink_target": ""
} |
import mock
import netaddr
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from neutron.common import constants as n_const
from neutron import context
from neutron.ipam.drivers.neutrondb_ipam import db_models
from neutron.ipam.drivers.neutrondb_ipam import driver
from neutron.ipam import exceptions as ipam_exc
from neutron.ipam import requests as ipam_req
from neutron import manager
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin
from neutron.tests.unit import testlib_api
def convert_firstip_to_ipaddress(range_item):
return netaddr.IPAddress(range_item['first_ip'])
class TestNeutronDbIpamMixin(object):
def _create_network(self, plugin, ctx, shared=False):
network = {'network': {'name': 'net',
'shared': shared,
'admin_state_up': True,
'tenant_id': self._tenant_id}}
created_network = plugin.create_network(ctx, network)
return (created_network, created_network['id'])
def _create_subnet(self, plugin, ctx, network_id, cidr, ip_version=4,
v6_address_mode=constants.ATTR_NOT_SPECIFIED,
allocation_pools=constants.ATTR_NOT_SPECIFIED):
subnet = {'subnet': {'name': 'sub',
'cidr': cidr,
'ip_version': ip_version,
'gateway_ip': constants.ATTR_NOT_SPECIFIED,
'allocation_pools': allocation_pools,
'enable_dhcp': True,
'dns_nameservers': constants.ATTR_NOT_SPECIFIED,
'host_routes': constants.ATTR_NOT_SPECIFIED,
'ipv6_address_mode': v6_address_mode,
'ipv6_ra_mode': constants.ATTR_NOT_SPECIFIED,
'network_id': network_id,
'tenant_id': self._tenant_id}}
return plugin.create_subnet(ctx, subnet)
class TestNeutronDbIpamPool(testlib_api.SqlTestCase,
TestNeutronDbIpamMixin):
"""Test case for the Neutron's DB IPAM driver subnet pool interface."""
def setUp(self):
super(TestNeutronDbIpamPool, self).setUp()
self._tenant_id = 'test-tenant'
# Configure plugin for tests
self.setup_coreplugin(test_db_plugin.DB_PLUGIN_KLASS)
# Prepare environment for tests
self.plugin = manager.NeutronManager.get_plugin()
self.ctx = context.get_admin_context()
self.network, self.net_id = self._create_network(self.plugin,
self.ctx)
# Allocate IPAM driver
self.ipam_pool = driver.NeutronDbPool(None, self.ctx)
def _verify_ipam_subnet_details(self, ipam_subnet,
cidr=None,
tenant_id=None,
gateway_ip=None,
allocation_pools=None):
ipam_subnet_details = ipam_subnet.get_details()
gateway_ip_address = None
cidr_ip_network = None
if gateway_ip:
gateway_ip_address = netaddr.IPAddress(gateway_ip)
if cidr:
cidr_ip_network = netaddr.IPNetwork(cidr)
self.assertEqual(tenant_id, ipam_subnet_details.tenant_id)
self.assertEqual(gateway_ip_address, ipam_subnet_details.gateway_ip)
self.assertEqual(cidr_ip_network, ipam_subnet_details.subnet_cidr)
self.assertEqual(allocation_pools,
ipam_subnet_details.allocation_pools)
def test_allocate_ipam_subnet_no_neutron_subnet_id(self):
cidr = '10.0.0.0/24'
allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'),
netaddr.IPRange('10.0.0.200', '10.0.0.250')]
subnet_req = ipam_req.SpecificSubnetRequest(
self._tenant_id,
None,
cidr,
allocation_pools=allocation_pools,
gateway_ip='10.0.0.101')
ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req)
self._verify_ipam_subnet_details(ipam_subnet,
cidr,
self._tenant_id,
'10.0.0.101',
allocation_pools)
def _prepare_specific_subnet_request(self, cidr):
subnet = self._create_subnet(
self.plugin, self.ctx, self.net_id, cidr)
subnet_req = ipam_req.SpecificSubnetRequest(
self._tenant_id,
subnet['id'],
cidr,
gateway_ip=subnet['gateway_ip'])
return subnet, subnet_req
def test_allocate_ipam_subnet_with_neutron_subnet_id(self):
cidr = '10.0.0.0/24'
subnet, subnet_req = self._prepare_specific_subnet_request(cidr)
ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req)
self._verify_ipam_subnet_details(
ipam_subnet,
cidr, self._tenant_id, subnet['gateway_ip'],
[netaddr.IPRange('10.0.0.2', '10.0.0.254')])
def test_allocate_any_subnet_fails(self):
self.assertRaises(
ipam_exc.InvalidSubnetRequestType,
self.ipam_pool.allocate_subnet,
ipam_req.AnySubnetRequest(self._tenant_id, 'meh',
constants.IPv4, 24))
def _test_update_subnet_pools(self, allocation_pools, expected_pools=None):
if expected_pools is None:
expected_pools = allocation_pools
cidr = '10.0.0.0/24'
subnet, subnet_req = self._prepare_specific_subnet_request(cidr)
self.ipam_pool.allocate_subnet(subnet_req)
update_subnet_req = ipam_req.SpecificSubnetRequest(
self._tenant_id,
subnet['id'],
cidr,
gateway_ip=subnet['gateway_ip'],
allocation_pools=allocation_pools)
self.ipam_pool.update_subnet(update_subnet_req)
ipam_subnet = self.ipam_pool.get_subnet(subnet['id'])
self._verify_ipam_subnet_details(
ipam_subnet,
cidr, self._tenant_id, subnet['gateway_ip'], expected_pools)
def test_update_subnet_pools(self):
allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'),
netaddr.IPRange('10.0.0.200', '10.0.0.250')]
self._test_update_subnet_pools(allocation_pools)
def test_update_subnet_pools_with_blank_pools(self):
allocation_pools = []
self._test_update_subnet_pools(allocation_pools)
def test_update_subnet_pools_with_none_pools(self):
allocation_pools = None
expected_pools = [netaddr.IPRange('10.0.0.2', '10.0.0.254')]
# Pools should not be changed on update
self._test_update_subnet_pools(allocation_pools,
expected_pools=expected_pools)
def test_get_subnet(self):
cidr = '10.0.0.0/24'
subnet, subnet_req = self._prepare_specific_subnet_request(cidr)
self.ipam_pool.allocate_subnet(subnet_req)
# Retrieve the subnet
ipam_subnet = self.ipam_pool.get_subnet(subnet['id'])
self._verify_ipam_subnet_details(
ipam_subnet,
cidr, self._tenant_id, subnet['gateway_ip'],
[netaddr.IPRange('10.0.0.2', '10.0.0.254')])
def test_get_non_existing_subnet_fails(self):
self.assertRaises(n_exc.SubnetNotFound,
self.ipam_pool.get_subnet,
'boo')
def test_remove_ipam_subnet(self):
cidr = '10.0.0.0/24'
subnet, subnet_req = self._prepare_specific_subnet_request(cidr)
self.ipam_pool.allocate_subnet(subnet_req)
# Remove ipam subnet by neutron subnet id
self.ipam_pool.remove_subnet(subnet['id'])
def test_remove_non_existent_subnet_fails(self):
self.assertRaises(n_exc.SubnetNotFound,
self.ipam_pool.remove_subnet,
'non-existent-id')
def test_get_details_for_invalid_subnet_id_fails(self):
cidr = '10.0.0.0/24'
subnet_req = ipam_req.SpecificSubnetRequest(
self._tenant_id,
'non-existent-id',
cidr)
self.ipam_pool.allocate_subnet(subnet_req)
# Neutron subnet does not exist, so get_subnet should fail
self.assertRaises(n_exc.SubnetNotFound,
self.ipam_pool.get_subnet,
'non-existent-id')
class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase,
TestNeutronDbIpamMixin):
"""Test case for Subnet interface for Neutron's DB IPAM driver.
This test case exercises the reference IPAM driver.
Even if it loads a plugin, the unit tests in this class do not exercise
it at all; they simply perform white box testing on the IPAM driver.
The plugin is exclusively used to create the neutron objects on which
the IPAM driver will operate.
"""
def _create_and_allocate_ipam_subnet(
self, cidr, allocation_pools=constants.ATTR_NOT_SPECIFIED,
ip_version=4, v6_auto_address=False, tenant_id=None):
v6_address_mode = constants.ATTR_NOT_SPECIFIED
if v6_auto_address:
# set ip version to 6 regardless of what's been passed to the
# method
ip_version = 6
v6_address_mode = n_const.IPV6_SLAAC
subnet = self._create_subnet(
self.plugin, self.ctx, self.net_id, cidr,
ip_version=ip_version,
allocation_pools=allocation_pools,
v6_address_mode=v6_address_mode)
# Build netaddr.IPRanges from allocation pools since IPAM SubnetRequest
# objects are strongly typed
allocation_pool_ranges = [netaddr.IPRange(
pool['start'], pool['end']) for pool in
subnet['allocation_pools']]
subnet_req = ipam_req.SpecificSubnetRequest(
tenant_id,
subnet['id'],
cidr,
gateway_ip=subnet['gateway_ip'],
allocation_pools=allocation_pool_ranges)
ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req)
return ipam_subnet, subnet
def setUp(self):
super(TestNeutronDbIpamSubnet, self).setUp()
self._tenant_id = 'test-tenant'
# Configure plugin for tests
self.setup_coreplugin(test_db_plugin.DB_PLUGIN_KLASS)
# Prepare environment for tests
self.plugin = manager.NeutronManager.get_plugin()
self.ctx = context.get_admin_context()
self.network, self.net_id = self._create_network(self.plugin,
self.ctx)
# Allocate IPAM driver
self.ipam_pool = driver.NeutronDbPool(None, self.ctx)
def test__verify_ip_succeeds(self):
cidr = '10.0.0.0/24'
ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0]
ipam_subnet._verify_ip(self.ctx, '10.0.0.2')
def test__verify_ip_not_in_subnet_fails(self):
cidr = '10.0.0.0/24'
ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0]
self.assertRaises(ipam_exc.InvalidIpForSubnet,
ipam_subnet._verify_ip,
self.ctx,
'192.168.0.2')
def test__verify_ip_bcast_and_network_fail(self):
cidr = '10.0.0.0/24'
ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0]
self.assertRaises(ipam_exc.InvalidIpForSubnet,
ipam_subnet._verify_ip,
self.ctx,
'10.0.0.255')
self.assertRaises(ipam_exc.InvalidIpForSubnet,
ipam_subnet._verify_ip,
self.ctx,
'10.0.0.0')
def _allocate_address(self, cidr, ip_version, address_request):
ipam_subnet = self._create_and_allocate_ipam_subnet(
cidr, ip_version=ip_version)[0]
return ipam_subnet.allocate(address_request)
def test_allocate_any_v4_address_succeeds(self):
self._test_allocate_any_address_succeeds('10.0.0.0/24', 4)
def test_allocate_any_v6_address_succeeds(self):
self._test_allocate_any_address_succeeds('fde3:abcd:4321:1::/64', 6)
def _test_allocate_any_address_succeeds(self, subnet_cidr, ip_version):
ip_address = self._allocate_address(
subnet_cidr, ip_version, ipam_req.AnyAddressRequest)
self.assertIn(netaddr.IPAddress(ip_address),
netaddr.IPSet(netaddr.IPNetwork(subnet_cidr)))
def test_allocate_specific_v4_address_succeeds(self):
ip_address = self._allocate_address(
'10.0.0.0/24', 4, ipam_req.SpecificAddressRequest('10.0.0.33'))
self.assertEqual('10.0.0.33', ip_address)
def test_allocate_specific_v6_address_succeeds(self):
ip_address = self._allocate_address(
'fde3:abcd:4321:1::/64', 6,
ipam_req.SpecificAddressRequest('fde3:abcd:4321:1::33'))
self.assertEqual('fde3:abcd:4321:1::33', ip_address)
def test_allocate_specific_v4_address_out_of_range_fails(self):
self.assertRaises(ipam_exc.InvalidIpForSubnet,
self._allocate_address,
'10.0.0.0/24', 4,
ipam_req.SpecificAddressRequest('192.168.0.1'))
def test_allocate_specific_v6_address_out_of_range_fails(self):
self.assertRaises(ipam_exc.InvalidIpForSubnet,
self._allocate_address,
'fde3:abcd:4321:1::/64', 6,
ipam_req.SpecificAddressRequest(
'fde3:abcd:eeee:1::33'))
def test_allocate_specific_address_in_use_fails(self):
ipam_subnet = self._create_and_allocate_ipam_subnet(
'fde3:abcd:4321:1::/64', ip_version=6)[0]
addr_req = ipam_req.SpecificAddressRequest('fde3:abcd:4321:1::33')
ipam_subnet.allocate(addr_req)
self.assertRaises(ipam_exc.IpAddressAlreadyAllocated,
ipam_subnet.allocate,
addr_req)
def test_allocate_any_address_exhausted_pools_fails(self):
# Same as above, the ranges will be recalculated always
ipam_subnet = self._create_and_allocate_ipam_subnet(
'192.168.0.0/30', ip_version=4)[0]
ipam_subnet.allocate(ipam_req.AnyAddressRequest)
# The second address generation request on a /30 for v4 net must fail
self.assertRaises(ipam_exc.IpAddressGenerationFailure,
ipam_subnet.allocate,
ipam_req.AnyAddressRequest)
def _test_deallocate_address(self, cidr, ip_version):
ipam_subnet = self._create_and_allocate_ipam_subnet(
cidr, ip_version=ip_version)[0]
ip_address = ipam_subnet.allocate(ipam_req.AnyAddressRequest)
ipam_subnet.deallocate(ip_address)
def test_deallocate_v4_address(self):
self._test_deallocate_address('10.0.0.0/24', 4)
def test_deallocate_v6_address(self):
# This test does not really exercise any different code path wrt
# test_deallocate_v4_address. It is provided for completeness and for
# future proofing in case v6-specific logic will be added.
self._test_deallocate_address('fde3:abcd:4321:1::/64', 6)
def test_allocate_unallocated_address_fails(self):
ipam_subnet = self._create_and_allocate_ipam_subnet(
'10.0.0.0/24', ip_version=4)[0]
self.assertRaises(ipam_exc.IpAddressAllocationNotFound,
ipam_subnet.deallocate, '10.0.0.2')
def test_allocate_all_pool_addresses_triggers_range_recalculation(self):
# This test instead might be made to pass, but for the wrong reasons!
pass
def test_allocate_subnet_for_non_existent_subnet_pass(self):
# This test should pass because ipam subnet is no longer
# have foreign key relationship with neutron subnet.
# Creating ipam subnet before neutron subnet is a valid case.
subnet_req = ipam_req.SpecificSubnetRequest(
'tenant_id', 'meh', '192.168.0.0/24')
self.ipam_pool.allocate_subnet(subnet_req)
def test_update_allocation_pools_with_no_pool_change(self):
cidr = '10.0.0.0/24'
ipam_subnet = self._create_and_allocate_ipam_subnet(
cidr)[0]
ipam_subnet.subnet_manager.delete_allocation_pools = mock.Mock()
ipam_subnet.create_allocation_pools = mock.Mock()
alloc_pools = [netaddr.IPRange('10.0.0.2', '10.0.0.254')]
# Make sure allocation pools recreation does not happen in case of
# unchanged allocation pools
ipam_subnet.update_allocation_pools(alloc_pools, cidr)
self.assertFalse(
ipam_subnet.subnet_manager.delete_allocation_pools.called)
self.assertFalse(ipam_subnet.create_allocation_pools.called)
def _test__no_pool_changes(self, new_pools):
id = 'some-id'
ipam_subnet = driver.NeutronDbSubnet(id, self.ctx)
pools = [db_models.IpamAllocationPool(ipam_subnet_id=id,
first_ip='192.168.10.20',
last_ip='192.168.10.41'),
db_models.IpamAllocationPool(ipam_subnet_id=id,
first_ip='192.168.10.50',
last_ip='192.168.10.60')]
ipam_subnet.subnet_manager.list_pools = mock.Mock(return_value=pools)
return ipam_subnet._no_pool_changes(self.ctx, new_pools)
def test__no_pool_changes_negative(self):
pool_list = [[netaddr.IPRange('192.168.10.2', '192.168.10.254')],
[netaddr.IPRange('192.168.10.20', '192.168.10.41')],
[netaddr.IPRange('192.168.10.20', '192.168.10.41'),
netaddr.IPRange('192.168.10.51', '192.168.10.60')]]
for pools in pool_list:
self.assertFalse(self._test__no_pool_changes(pools))
def test__no_pool_changes_positive(self):
pools = [netaddr.IPRange('192.168.10.20', '192.168.10.41'),
netaddr.IPRange('192.168.10.50', '192.168.10.60')]
self.assertTrue(self._test__no_pool_changes(pools))
| {
"content_hash": "d988214343c821b81f87b400f657310c",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 79,
"avg_line_length": 44.72355769230769,
"alnum_prop": 0.5860790110185434,
"repo_name": "cloudbase/neutron",
"id": "4183f990dff4ef55428710ba1e3cd90424e664fe",
"size": "19242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9942988"
},
{
"name": "Shell",
"bytes": "14325"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import webnotes
@webnotes.whitelist()
def update(ml):
"""update modules"""
webnotes.conn.set_global('hidden_modules', ml)
webnotes.msgprint('Updated')
webnotes.clear_cache() | {
"content_hash": "55f2287b872ddd2ff68f1851d1c1d1f1",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 47,
"avg_line_length": 24.22222222222222,
"alnum_prop": 0.7477064220183486,
"repo_name": "rohitw1991/innoworth-lib",
"id": "5e4d6e0261037de5978c75be07a5b21d4a5de2c5",
"size": "305",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "core/page/modules_setup/modules_setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "102097"
},
{
"name": "JavaScript",
"bytes": "1442740"
},
{
"name": "Python",
"bytes": "576012"
}
],
"symlink_target": ""
} |
"""Provide trimming of input reads from Fastq or BAM files.
"""
import os
import sys
from bcbio import utils
from bcbio.utils import (file_exists, append_stem, replace_directory)
from bcbio.log import logger
from bcbio.distributed import objectstore
from bcbio.provenance import do
from Bio.Seq import Seq
from bcbio.distributed.transaction import file_transaction
MINIMUM_LENGTH = 25
SUPPORTED_ADAPTERS = {
"illumina": ["AACACTCTTTCCCT", "AGATCGGAAGAGCG"],
"truseq": ["AGATCGGAAGAG"],
"polya": ["AAAAAAAAAAAAA"],
"nextera": ["AATGATACGGCGA", "CAAGCAGAAGACG"]}
def trim_adapters(fastq_files, out_dir, config):
"""
for small insert sizes, the read length can be longer than the insert
resulting in the reverse complement of the 3' adapter being sequenced.
this takes adapter sequences and trims the only the reverse complement
of the adapter
MYSEQUENCEAAAARETPADA -> MYSEQUENCEAAAA (no polyA trim)
"""
quality_format = _get_quality_format(config)
to_trim = _get_sequences_to_trim(config, SUPPORTED_ADAPTERS)
out_files = replace_directory(append_stem(fastq_files, ".trimmed"), out_dir)
out_files = _cutadapt_trim(fastq_files, quality_format, to_trim, out_files, config)
return out_files
def _cutadapt_trim(fastq_files, quality_format, adapters, out_files, config):
"""Trimming with cutadapt, using version installed with bcbio-nextgen.
Uses the system executable to find the version next to our Anaconda Python.
TODO: Could we use cutadapt as a library to avoid this?
"""
if all([file_exists(x) for x in out_files]):
return out_files
cmd = _cutadapt_trim_cmd(fastq_files, quality_format, adapters, out_files)
if len(fastq_files) == 1:
of1 = out_files[0]
message = "Trimming %s in single end mode with cutadapt." % (fastq_files[0])
with file_transaction(config, of1) as of1_tx:
do.run(cmd.format(**locals()), message)
else:
with file_transaction(config, out_files) as tx_out_files:
of1_tx, of2_tx = tx_out_files
tmp_fq1 = append_stem(of1_tx, ".tmp")
tmp_fq2 = append_stem(of2_tx, ".tmp")
singles_file = of1_tx + ".single"
message = "Trimming %s and %s in paired end mode with cutadapt." % (fastq_files[0],
fastq_files[1])
do.run(cmd.format(**locals()), message)
return out_files
def _cutadapt_trim_cmd(fastq_files, quality_format, adapters, out_files):
"""Trimming with cutadapt, using version installed with bcbio-nextgen.
Uses the system executable to find the version next to our Anaconda Python.
TODO: Could we use cutadapt as a library to avoid this?
"""
if all([file_exists(x) for x in out_files]):
return out_files
if quality_format == "illumina":
quality_base = "64"
else:
quality_base = "33"
# --times=2 tries twice remove adapters which will allow things like:
# realsequenceAAAAAAadapter to remove both the poly-A and the adapter
# this behavior might not be what we want; we could also do two or
# more passes of cutadapt
cutadapt = os.path.join(os.path.dirname(sys.executable), "cutadapt")
adapter_cmd = " ".join(map(lambda x: "--adapter=" + x, adapters))
base_cmd = ("{cutadapt} --times=2 --quality-base={quality_base} "
"--quality-cutoff=5 --format=fastq "
"{adapter_cmd} ").format(**locals())
if len(fastq_files) == 1:
return _cutadapt_se_cmd(fastq_files, out_files, base_cmd)
else:
return _cutadapt_pe_nosickle(fastq_files, out_files, quality_format, base_cmd)
def _cutadapt_se_cmd(fastq_files, out_files, base_cmd):
"""
this has to use the -o option, not redirect to stdout in order for gzipping to be
honored
"""
min_length = MINIMUM_LENGTH
cmd = base_cmd + " --minimum-length={min_length} ".format(**locals())
fq1 = objectstore.cl_input(fastq_files[0])
of1 = out_files[0]
cmd += " -o {of1} " + str(fq1)
return cmd
def _cutadapt_pe_nosickle(fastq_files, out_files, quality_format, base_cmd):
"""
sickle has an issue with 0 length reads, here is the open issue for it:
https://github.com/najoshi/sickle/issues/32
until that is resolved, this is a workaround which avoids using sickle
"""
fq1, fq2 = [objectstore.cl_input(x) for x in fastq_files]
of1, of2 = out_files
base_cmd += " --minimum-length={min_length} ".format(min_length=MINIMUM_LENGTH)
first_cmd = base_cmd + " -o {tmp_fq1} -p {tmp_fq2} " + fq1 + " " + fq2
second_cmd = base_cmd + " -o {of2_tx} -p {of1_tx} {tmp_fq2} {tmp_fq1}"
return first_cmd + ";" + second_cmd + "; rm {tmp_fq1} {tmp_fq2} "
def _get_sequences_to_trim(config, builtin):
builtin_adapters = _get_builtin_adapters(config, builtin)
polya = builtin_adapters.get("polya", [None])[0]
# allow for trimming of custom sequences for advanced users
custom_trim = config["algorithm"].get("custom_trim", [])
builtin_adapters = {k: v for k, v in builtin_adapters.items() if
k != "polya"}
trim_sequences = custom_trim
# for unstranded RNA-seq, libraries, both polyA and polyT can appear
# at the 3' end as well
if polya:
trim_sequences += [polya, str(Seq(polya).reverse_complement())]
# also trim the reverse complement of the adapters
for _, v in builtin_adapters.items():
trim_sequences += [str(Seq(sequence)) for sequence in v]
trim_sequences += [str(Seq(sequence).reverse_complement()) for
sequence in v]
return trim_sequences
def _get_quality_format(config):
SUPPORTED_FORMATS = ["illumina", "standard"]
quality_format = config["algorithm"].get("quality_format", "standard").lower()
if quality_format not in SUPPORTED_FORMATS:
logger.error("quality_format is set to an unsupported format. "
"Supported formats are %s."
% (", ".join(SUPPORTED_FORMATS)))
exit(1)
return quality_format
def _get_builtin_adapters(config, builtin):
chemistries = config["algorithm"].get("adapters", [])
adapters = {chemistry: builtin[chemistry] for
chemistry in chemistries if chemistry in builtin}
return adapters
| {
"content_hash": "a9e632fdd120bf71975fe06cc91f944c",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 95,
"avg_line_length": 42.90604026845637,
"alnum_prop": 0.645236977944627,
"repo_name": "elkingtonmcb/bcbio-nextgen",
"id": "628561175417a1fc63c00dd0839e43ce70d768b1",
"size": "6393",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bcbio/bam/trim.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1485192"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "14156"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import cocotb_test.simulator
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, Timer
from cocotb.regression import TestFactory
from cocotbext.axi import AxiStreamSource
from cocotbext.uart import UartSink
class TB:
def __init__(self, dut, baud=3e6):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.fork(Clock(dut.clk, 8, units="ns").start())
self.source = AxiStreamSource(dut, "s_axis", dut.clk, dut.rst)
self.sink = UartSink(dut.txd, baud=baud, bits=len(dut.s_axis_tdata), stop_bits=1)
dut.prescale.setimmediatevalue(int(1/8e-9/baud/8))
async def reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst <= 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst <= 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test(dut, payload_lengths=None, payload_data=None):
tb = TB(dut)
await tb.reset()
for test_data in [payload_data(x) for x in payload_lengths()]:
await tb.source.write(test_data)
rx_data = bytearray()
while len(rx_data) < len(test_data):
rx_data.extend(await tb.sink.read())
tb.log.info("Read data: %s", rx_data)
assert tb.sink.empty()
await Timer(2, 'us')
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def prbs31(state=0x7fffffff):
while True:
for i in range(8):
if bool(state & 0x08000000) ^ bool(state & 0x40000000):
state = ((state & 0x3fffffff) << 1) | 1
else:
state = (state & 0x3fffffff) << 1
yield state & 0xff
def size_list():
return list(range(1, 16)) + [128]
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
def prbs_payload(length):
gen = prbs31()
return bytearray([next(gen) for x in range(length)])
if cocotb.SIM_NAME:
factory = TestFactory(run_test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload, prbs_payload])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
def test_uart_tx(request):
dut = "uart_tx"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
]
parameters = {}
parameters['DATA_WIDTH'] = 8
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| {
"content_hash": "c171a882c1c13f49734be3313db70435",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 89,
"avg_line_length": 27.73548387096774,
"alnum_prop": 0.6662014421958595,
"repo_name": "alexforencich/xfcp",
"id": "647ea7fd2f508da1e8a44e0b5aab60e8d6a13a38",
"size": "4321",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/uart/tb/uart_tx/test_uart_tx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "259479"
},
{
"name": "Python",
"bytes": "3200270"
},
{
"name": "Shell",
"bytes": "14435"
},
{
"name": "Tcl",
"bytes": "29878"
},
{
"name": "Verilog",
"bytes": "4179456"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/booster/shared_bst_moncal_charged_heavy.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","bst_moncal_charged_heavy_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "0cb78cbf097e2f77957e1e043b116892",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 96,
"avg_line_length": 26.846153846153847,
"alnum_prop": 0.7163323782234957,
"repo_name": "obi-two/Rebelion",
"id": "a5cd1d903a0a58b37287b993a42ea2926e8030c6",
"size": "494",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/ship/components/booster/shared_bst_moncal_charged_heavy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import glob
import os
from resource_management.core import shell, sudo
from resource_management.core.logger import Logger
from resource_management.core.resources import Directory
from resource_management.core.resources.system import Execute, File
from resource_management.core.source import InlineTemplate
from resource_management.libraries import XmlConfig
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.check_process_status import \
check_process_status
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.stack_features import \
check_stack_feature
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.script.script import Script
class Master(Script):
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
self.create_zeppelin_log_dir(env)
if params.spark_version:
Execute('echo spark_version:' + str(params.spark_version) + ' detected for spark_home: '
+ params.spark_home + ' >> ' + params.zeppelin_log_file, user=params.zeppelin_user)
if params.spark2_version:
Execute('echo spark2_version:' + str(params.spark2_version) + ' detected for spark2_home: '
+ params.spark2_home + ' >> ' + params.zeppelin_log_file, user=params.zeppelin_user)
def create_zeppelin_dir(self, params):
params.HdfsResource(format("/user/{zeppelin_user}"),
type="directory",
action="create_on_execute",
owner=params.zeppelin_user,
recursive_chown=True,
recursive_chmod=True
)
params.HdfsResource(format("/user/{zeppelin_user}/test"),
type="directory",
action="create_on_execute",
owner=params.zeppelin_user,
recursive_chown=True,
recursive_chmod=True
)
params.HdfsResource(format("/apps/zeppelin"),
type="directory",
action="create_on_execute",
owner=params.zeppelin_user,
recursive_chown=True,
recursive_chmod=True
)
spark_deps_full_path = self.get_zeppelin_spark_dependencies()[0]
spark_dep_file_name = os.path.basename(spark_deps_full_path)
params.HdfsResource(params.spark_jar_dir + "/" + spark_dep_file_name,
type="file",
action="create_on_execute",
source=spark_deps_full_path,
group=params.zeppelin_group,
owner=params.zeppelin_user,
mode=0444,
replace_existing_files=True,
)
params.HdfsResource(None, action="execute")
def create_zeppelin_log_dir(self, env):
import params
env.set_params(params)
Directory([params.zeppelin_log_dir],
owner=params.zeppelin_user,
group=params.zeppelin_group,
cd_access="a",
create_parents=True,
mode=0755
)
def create_zeppelin_hdfs_conf_dir(self, env):
import params
env.set_params(params)
Directory([params.external_dependency_conf],
owner=params.zeppelin_user,
group=params.zeppelin_group,
cd_access="a",
create_parents=True,
mode=0755
)
def chown_zeppelin_pid_dir(self, env):
import params
env.set_params(params)
Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"), params.zeppelin_pid_dir),
sudo=True)
def configure(self, env):
import params
import status_params
env.set_params(params)
env.set_params(status_params)
self.create_zeppelin_log_dir(env)
# create the pid and zeppelin dirs
Directory([params.zeppelin_pid_dir, params.zeppelin_dir],
owner=params.zeppelin_user,
group=params.zeppelin_group,
cd_access="a",
create_parents=True,
mode=0755
)
self.chown_zeppelin_pid_dir(env)
# write out zeppelin-site.xml
XmlConfig("zeppelin-site.xml",
conf_dir=params.conf_dir,
configurations=params.config['configurations']['zeppelin-config'],
owner=params.zeppelin_user,
group=params.zeppelin_group
)
# write out zeppelin-env.sh
env_content = InlineTemplate(params.zeppelin_env_content)
File(format("{params.conf_dir}/zeppelin-env.sh"), content=env_content,
owner=params.zeppelin_user, group=params.zeppelin_group)
# write out shiro.ini
shiro_ini_content = InlineTemplate(params.shiro_ini_content)
File(format("{params.conf_dir}/shiro.ini"), content=shiro_ini_content,
owner=params.zeppelin_user, group=params.zeppelin_group)
# write out log4j.properties
File(format("{params.conf_dir}/log4j.properties"), content=params.log4j_properties_content,
owner=params.zeppelin_user, group=params.zeppelin_group)
self.create_zeppelin_hdfs_conf_dir(env)
if len(params.hbase_master_hosts) > 0 and params.is_hbase_installed:
# copy hbase-site.xml
XmlConfig("hbase-site.xml",
conf_dir=params.external_dependency_conf,
configurations=params.config['configurations']['hbase-site'],
configuration_attributes=params.config['configuration_attributes']['hbase-site'],
owner=params.zeppelin_user,
group=params.zeppelin_group,
mode=0644)
XmlConfig("hdfs-site.xml",
conf_dir=params.external_dependency_conf,
configurations=params.config['configurations']['hdfs-site'],
configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
owner=params.zeppelin_user,
group=params.zeppelin_group,
mode=0644)
XmlConfig("core-site.xml",
conf_dir=params.external_dependency_conf,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.zeppelin_user,
group=params.zeppelin_group,
mode=0644)
def check_and_copy_notebook_in_hdfs(self, params):
if params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir'].startswith("/"):
notebook_directory = params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir']
else:
notebook_directory = "/user/" + format("{zeppelin_user}") + "/" + \
params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir']
if not self.is_directory_exists_in_HDFS(notebook_directory, params.zeppelin_user):
# hdfs dfs -mkdir {notebook_directory}
params.HdfsResource(format("{notebook_directory}"),
type="directory",
action="create_on_execute",
owner=params.zeppelin_user,
recursive_chown=True,
recursive_chmod=True
)
# hdfs dfs -put /usr/hdp/current/zeppelin-server/notebook/ {notebook_directory}
params.HdfsResource(format("{notebook_directory}"),
type="directory",
action="create_on_execute",
source=params.notebook_dir,
owner=params.zeppelin_user,
recursive_chown=True,
recursive_chmod=True
)
def stop(self, env, upgrade_type=None):
import params
self.create_zeppelin_log_dir(env)
self.chown_zeppelin_pid_dir(env)
Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh stop >> ' + params.zeppelin_log_file,
user=params.zeppelin_user)
def start(self, env, upgrade_type=None):
import params
import status_params
self.configure(env)
Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"), "/etc/zeppelin"),
sudo=True)
Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"),
os.path.join(params.zeppelin_dir, "notebook")), sudo=True)
if params.security_enabled:
zeppelin_kinit_cmd = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal}; ")
Execute(zeppelin_kinit_cmd, user=params.zeppelin_user)
if 'zeppelin.notebook.storage' in params.config['configurations']['zeppelin-config'] \
and params.config['configurations']['zeppelin-config']['zeppelin.notebook.storage'] == 'org.apache.zeppelin.notebook.repo.FileSystemNotebookRepo':
self.check_and_copy_notebook_in_hdfs(params)
zeppelin_spark_dependencies = self.get_zeppelin_spark_dependencies()
if zeppelin_spark_dependencies and os.path.exists(zeppelin_spark_dependencies[0]):
self.create_zeppelin_dir(params)
if params.conf_stored_in_hdfs:
if not self.is_directory_exists_in_HDFS(self.get_zeppelin_conf_FS_directory(params), params.zeppelin_user):
# hdfs dfs -mkdir {zeppelin's conf directory}
params.HdfsResource(self.get_zeppelin_conf_FS_directory(params),
type="directory",
action="create_on_execute",
owner=params.zeppelin_user,
recursive_chown=True,
recursive_chmod=True
)
# if first_setup:
if not glob.glob(params.conf_dir + "/interpreter.json") and \
not os.path.exists(params.conf_dir + "/interpreter.json"):
self.create_interpreter_json()
if params.zeppelin_interpreter_config_upgrade == True:
self.reset_interpreter_settings()
self.update_zeppelin_interpreter()
Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh restart >> '
+ params.zeppelin_log_file, user=params.zeppelin_user)
pidfile = glob.glob(os.path.join(status_params.zeppelin_pid_dir,
'zeppelin-' + params.zeppelin_user + '*.pid'))[0]
Logger.info(format("Pid file is: {pidfile}"))
def status(self, env):
import status_params
env.set_params(status_params)
try:
pid_file = glob.glob(status_params.zeppelin_pid_dir + '/zeppelin-' +
status_params.zeppelin_user + '*.pid')[0]
except IndexError:
pid_file = ''
check_process_status(pid_file)
def reset_interpreter_settings(self):
import json
import interpreter_json_template
interpreter_json_template = json.loads(interpreter_json_template.template)['interpreterSettings']
config_data = self.get_interpreter_settings()
interpreter_settings = config_data['interpreterSettings']
for setting_key in interpreter_json_template.keys():
if setting_key not in interpreter_settings:
interpreter_settings[setting_key] = interpreter_json_template[
setting_key]
else:
templateGroups = interpreter_json_template[setting_key]['interpreterGroup']
groups = interpreter_settings[setting_key]['interpreterGroup']
templateProperties = interpreter_json_template[setting_key]['properties']
properties = interpreter_settings[setting_key]['properties']
templateOptions = interpreter_json_template[setting_key]['option']
options = interpreter_settings[setting_key]['option']
# search for difference in groups from current interpreter and template interpreter
# if any group exists in template but doesn't exist in current interpreter, it will be added
group_names = []
for group in groups:
group_names.append(group['name'])
for template_group in templateGroups:
if not template_group['name'] in group_names:
groups.append(template_group)
# search for difference in properties from current interpreter and template interpreter
# if any property exists in template but doesn't exist in current interpreter, it will be added
for template_property in templateProperties:
if not template_property in properties:
properties[template_property] = templateProperties[template_property]
# search for difference in options from current interpreter and template interpreter
# if any option exists in template but doesn't exist in current interpreter, it will be added
for template_option in templateOptions:
if not template_option in options:
options[template_option] = templateOptions[template_option]
self.set_interpreter_settings(config_data)
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, format_stack_version(params.version)):
stack_select.select_packages(params.version)
def get_zeppelin_conf_FS_directory(self, params):
hdfs_interpreter_config = params.config['configurations']['zeppelin-config']['zeppelin.config.fs.dir']
# if it doesn't start from "/" or doesn't contains "://" as in hdfs://, file://, etc then make it a absolute path
if not (hdfs_interpreter_config.startswith("/") or '://' in hdfs_interpreter_config):
hdfs_interpreter_config = "/user/" + format("{zeppelin_user}") + "/" + hdfs_interpreter_config
return hdfs_interpreter_config
def get_zeppelin_conf_FS(self, params):
return self.get_zeppelin_conf_FS_directory(params) + "/interpreter.json"
def is_directory_exists_in_HDFS(self, path, as_user):
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
#-d: if the path is a directory, return 0.
path_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -d {path};echo $?"),
user=as_user)[1]
# if there is no kerberos setup then the string will contain "-bash: kinit: command not found"
if "\n" in path_exists:
path_exists = path_exists.split("\n").pop()
# '1' means it does not exists
if path_exists == '0':
return True
else:
return False
def is_file_exists_in_HDFS(self, path, as_user):
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
#-f: if the path is a file, return 0.
path_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -f {path};echo $?"),
user=as_user)[1]
# if there is no kerberos setup then the string will contain "-bash: kinit: command not found"
if "\n" in path_exists:
path_exists = path_exists.split("\n").pop()
# '1' means it does not exists
if path_exists == '0':
#-z: if the file is zero length, return 0.
path_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -z {path};echo $?"),
user=as_user)[1]
if "\n" in path_exists:
path_exists = path_exists.split("\n").pop()
if path_exists != '0':
return True
return False
def copy_interpreter_from_HDFS_to_FS(self, params):
if params.conf_stored_in_hdfs:
zeppelin_conf_fs = self.get_zeppelin_conf_FS(params)
if self.is_file_exists_in_HDFS(zeppelin_conf_fs, params.zeppelin_user):
# copy from hdfs to /etc/zeppelin/conf/interpreter.json
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
interpreter_config = os.path.join(params.conf_dir, "interpreter.json")
shell.call(format("rm {interpreter_config};"
"{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -get {zeppelin_conf_fs} {interpreter_config}"),
user=params.zeppelin_user)
return True
return False
def get_interpreter_settings(self):
import params
import json
self.copy_interpreter_from_HDFS_to_FS(params)
interpreter_config = os.path.join(params.conf_dir, "interpreter.json")
config_content = sudo.read_file(interpreter_config)
config_data = json.loads(config_content)
return config_data
def set_interpreter_settings(self, config_data):
import params
import json
interpreter_config = os.path.join(params.conf_dir, "interpreter.json")
File(interpreter_config,
group=params.zeppelin_group,
owner=params.zeppelin_user,
mode=0644,
content=json.dumps(config_data, indent=2))
if params.conf_stored_in_hdfs:
#delete file from HDFS, as the `replace_existing_files` logic checks length of file which can remain same.
params.HdfsResource(self.get_zeppelin_conf_FS(params),
type="file",
action="delete_on_execute")
#recreate file in HDFS from LocalFS
params.HdfsResource(self.get_zeppelin_conf_FS(params),
type="file",
action="create_on_execute",
source=interpreter_config,
owner=params.zeppelin_user,
recursive_chown=True,
recursive_chmod=True,
replace_existing_files=True)
def update_kerberos_properties(self):
import params
config_data = self.get_interpreter_settings()
interpreter_settings = config_data['interpreterSettings']
for interpreter_setting in interpreter_settings:
interpreter = interpreter_settings[interpreter_setting]
if interpreter['group'] == 'livy':
if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
interpreter['properties']['zeppelin.livy.principal'] = params.zeppelin_kerberos_principal
interpreter['properties']['zeppelin.livy.keytab'] = params.zeppelin_kerberos_keytab
else:
interpreter['properties']['zeppelin.livy.principal'] = ""
interpreter['properties']['zeppelin.livy.keytab'] = ""
elif interpreter['group'] == 'spark':
if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
interpreter['properties']['spark.yarn.principal'] = params.zeppelin_kerberos_principal
interpreter['properties']['spark.yarn.keytab'] = params.zeppelin_kerberos_keytab
else:
interpreter['properties']['spark.yarn.principal'] = ""
interpreter['properties']['spark.yarn.keytab'] = ""
elif interpreter['group'] == 'jdbc':
if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
interpreter['properties']['zeppelin.jdbc.auth.type'] = "KERBEROS"
interpreter['properties']['zeppelin.jdbc.principal'] = params.zeppelin_kerberos_principal
interpreter['properties']['zeppelin.jdbc.keytab.location'] = params.zeppelin_kerberos_keytab
if params.zookeeper_znode_parent \
and params.hbase_zookeeper_quorum \
and 'phoenix.url' in interpreter['properties'] \
and params.zookeeper_znode_parent not in interpreter['properties']['phoenix.url']:
interpreter['properties']['phoenix.url'] = "jdbc:phoenix:" + \
params.hbase_zookeeper_quorum + ':' + \
params.zookeeper_znode_parent
else:
interpreter['properties']['zeppelin.jdbc.auth.type'] = "SIMPLE"
interpreter['properties']['zeppelin.jdbc.principal'] = ""
interpreter['properties']['zeppelin.jdbc.keytab.location'] = ""
elif interpreter['group'] == 'sh':
if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
interpreter['properties']['zeppelin.shell.auth.type'] = "KERBEROS"
interpreter['properties']['zeppelin.shell.principal'] = params.zeppelin_kerberos_principal
interpreter['properties']['zeppelin.shell.keytab.location'] = params.zeppelin_kerberos_keytab
else:
interpreter['properties']['zeppelin.shell.auth.type'] = ""
interpreter['properties']['zeppelin.shell.principal'] = ""
interpreter['properties']['zeppelin.shell.keytab.location'] = ""
self.set_interpreter_settings(config_data)
def update_zeppelin_interpreter(self):
import params
config_data = self.get_interpreter_settings()
interpreter_settings = config_data['interpreterSettings']
if params.zeppelin_interpreter:
settings_to_delete = []
for settings_key, interpreter in interpreter_settings.items():
if interpreter['group'] not in params.zeppelin_interpreter:
settings_to_delete.append(settings_key)
for key in settings_to_delete:
del interpreter_settings[key]
hive_interactive_properties_key = 'hive_interactive'
for setting_key in interpreter_settings.keys():
interpreter = interpreter_settings[setting_key]
if interpreter['group'] == 'jdbc' and interpreter['name'] == 'jdbc':
interpreter['dependencies'] = []
if not params.hive_server_host and params.hive_server_interactive_hosts:
hive_interactive_properties_key = 'hive'
if params.hive_server_host:
interpreter['properties']['hive.driver'] = 'org.apache.hive.jdbc.HiveDriver'
interpreter['properties']['hive.user'] = 'hive'
interpreter['properties']['hive.password'] = ''
interpreter['properties']['hive.proxy.user.property'] = 'hive.server2.proxy.user'
if params.hive_server2_support_dynamic_service_discovery:
interpreter['properties']['hive.url'] = 'jdbc:hive2://' + \
params.hive_zookeeper_quorum + \
'/;' + 'serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=' + \
params.hive_zookeeper_namespace
else:
interpreter['properties']['hive.url'] = 'jdbc:hive2://' + \
params.hive_server_host + \
':' + params.hive_server_port
if 'hive.splitQueries' not in interpreter['properties']:
interpreter['properties']["hive.splitQueries"] = "true"
if params.hive_server_interactive_hosts:
interpreter['properties'][hive_interactive_properties_key + '.driver'] = 'org.apache.hive.jdbc.HiveDriver'
interpreter['properties'][hive_interactive_properties_key + '.user'] = 'hive'
interpreter['properties'][hive_interactive_properties_key + '.password'] = ''
interpreter['properties'][hive_interactive_properties_key + '.proxy.user.property'] = 'hive.server2.proxy.user'
if params.hive_server2_support_dynamic_service_discovery:
interpreter['properties'][hive_interactive_properties_key + '.url'] = 'jdbc:hive2://' + \
params.hive_zookeeper_quorum + \
'/;' + 'serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=' + \
params.hive_interactive_zookeeper_namespace
else:
interpreter['properties'][hive_interactive_properties_key + '.url'] = 'jdbc:hive2://' + \
params.hive_server_interactive_hosts + \
':' + params.hive_server_port
if hive_interactive_properties_key + '.splitQueries' not in interpreter['properties']:
interpreter['properties'][hive_interactive_properties_key + '.splitQueries'] = "true"
if params.spark_thrift_server_hosts:
interpreter['properties']['spark.driver'] = 'org.apache.hive.jdbc.HiveDriver'
interpreter['properties']['spark.user'] = 'hive'
interpreter['properties']['spark.password'] = ''
interpreter['properties']['spark.proxy.user.property'] = 'hive.server2.proxy.user'
interpreter['properties']['spark.url'] = 'jdbc:hive2://' + \
params.spark_thrift_server_hosts + ':' + params.spark_hive_thrift_port + '/'
if params.hive_principal:
interpreter['properties']['spark.url'] += ';principal=' + params.hive_principal
if params.hive_transport_mode:
interpreter['properties']['spark.url'] += ';transportMode=' + params.hive_transport_mode
if 'spark.splitQueries' not in interpreter['properties']:
interpreter['properties']['spark.splitQueries'] = "true"
if params.spark2_thrift_server_hosts:
interpreter['properties']['spark2.driver'] = 'org.apache.hive.jdbc.HiveDriver'
interpreter['properties']['spark2.user'] = 'hive'
interpreter['properties']['spark2.password'] = ''
interpreter['properties']['spark2.proxy.user.property'] = 'hive.server2.proxy.user'
interpreter['properties']['spark2.url'] = 'jdbc:hive2://' + \
params.spark2_thrift_server_hosts + ':' + params.spark2_hive_thrift_port + '/'
if params.hive_principal:
interpreter['properties']['spark2.url'] += ';principal=' + params.hive_principal
if params.hive_transport_mode:
interpreter['properties']['spark2.url'] += ';transportMode=' + params.hive_transport_mode
if 'spark2.splitQueries' not in interpreter['properties']:
interpreter['properties']['spark2.splitQueries'] = "true"
if params.zookeeper_znode_parent \
and params.hbase_zookeeper_quorum:
interpreter['properties']['phoenix.driver'] = 'org.apache.phoenix.jdbc.PhoenixDriver'
if 'phoenix.hbase.client.retries.number' not in interpreter['properties']:
interpreter['properties']['phoenix.hbase.client.retries.number'] = '1'
if 'phoenix.phoenix.query.numberFormat' not in interpreter['properties']:
interpreter['properties']['phoenix.phoenix.query.numberFormat'] = '#.#'
if 'phoenix.user' not in interpreter['properties']:
interpreter['properties']['phoenix.user'] = 'phoenixuser'
if 'phoenix.password' not in interpreter['properties']:
interpreter['properties']['phoenix.password'] = ''
interpreter['properties']['phoenix.url'] = "jdbc:phoenix:" + \
params.hbase_zookeeper_quorum + ':' + \
params.zookeeper_znode_parent
if 'phoenix.splitQueries' not in interpreter['properties']:
interpreter['properties']['phoenix.splitQueries'] = "true"
elif interpreter['group'] == 'livy' and interpreter['name'] == 'livy':
if params.livy_livyserver_host:
interpreter['properties']['zeppelin.livy.url'] = params.livy_livyserver_protocol + \
"://" + params.livy_livyserver_host + \
":" + params.livy_livyserver_port
else:
del interpreter_settings[setting_key]
elif interpreter['group'] == 'livy' and interpreter['name'] == 'livy2':
if params.livy2_livyserver_host:
interpreter['properties']['zeppelin.livy.url'] = params.livy2_livyserver_protocol + \
"://" + params.livy2_livyserver_host + \
":" + params.livy2_livyserver_port
else:
del interpreter_settings[setting_key]
elif interpreter['group'] == 'spark' and interpreter['name'] == 'spark':
if 'spark-env' in params.config['configurations']:
interpreter['properties']['master'] = "yarn-client"
interpreter['properties']['SPARK_HOME'] = "/usr/hdp/current/spark-client/"
else:
del interpreter_settings[setting_key]
elif interpreter['group'] == 'spark' and interpreter['name'] == 'spark2':
if 'spark2-env' in params.config['configurations']:
interpreter['properties']['master'] = "yarn-client"
interpreter['properties']['SPARK_HOME'] = "/usr/hdp/current/spark2-client/"
else:
del interpreter_settings[setting_key]
self.set_interpreter_settings(config_data)
self.update_kerberos_properties()
def create_interpreter_json(self):
import interpreter_json_template
import params
if not self.copy_interpreter_from_HDFS_to_FS(params):
interpreter_json = interpreter_json_template.template
File(format("{params.conf_dir}/interpreter.json"),
content=interpreter_json,
owner=params.zeppelin_user,
group=params.zeppelin_group,
mode=0664)
if params.conf_stored_in_hdfs:
params.HdfsResource(self.get_zeppelin_conf_FS(params),
type="file",
action="create_on_execute",
source=format("{params.conf_dir}/interpreter.json"),
owner=params.zeppelin_user,
recursive_chown=True,
recursive_chmod=True,
replace_existing_files=True)
def get_zeppelin_spark_dependencies(self):
import params
return glob.glob(params.zeppelin_dir + '/interpreter/spark/dep/zeppelin-spark-dependencies*.jar')
if __name__ == "__main__":
Master().execute()
| {
"content_hash": "8052aa25f75b5f58c8cb9810bf9baf64",
"timestamp": "",
"source": "github",
"line_count": 657,
"max_line_length": 154,
"avg_line_length": 48.336377473363775,
"alnum_prop": 0.6206505652297132,
"repo_name": "arenadata/ambari",
"id": "6607c4c31308475599ea9c32406b3b6ed2b62fb9",
"size": "31779",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
from django import forms
from django.db import models
from django.contrib.auth.models import User
from gym_app.models import User, Athlete, Tracker, Exercise, PersonalTrainer, BodyScreening
from django.db.models import Q
class UserForm(forms.ModelForm):
username = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control"}))
email = forms.CharField(widget=forms.EmailInput(attrs={'class': "form-control"}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class' : 'form-control'}))
first_name = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control"}))
last_name = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control"}))
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
for fieldname in ['username', 'password', 'email']:
self.fields[fieldname].help_text = None
class Meta:
model = User
fields = ('username','password', 'email', 'first_name', 'last_name')
class UserEditForm(forms.ModelForm):
first_name = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control"}))
last_name = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control"}))
email = forms.CharField(widget=forms.EmailInput(attrs={'class': "form-control"}))
def __init__(self, *args, **kwargs):
super(UserEditForm, self).__init__(*args, **kwargs)
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class ChangePasswordForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': "form-control"}))
class Meta:
model = User
fields = ('password', )
class AthleteForm(forms.ModelForm):
level = forms.ChoiceField(widget=forms.Select(attrs={'class': "form-control"}), choices=Athlete.LEVELS)
training_period = forms.ChoiceField(widget=forms.Select(attrs={'class': "form-control"}), choices=Athlete.TRAINING_PERIOD)
gender = forms.ChoiceField(widget=forms.Select(attrs={'class': "form-control"}), choices=Athlete.GENDERS)
class Meta:
model = Athlete
fields = ('level', 'training_period', 'gender')
class PersonalTrainerForm(forms.ModelForm):
gender = forms.ChoiceField(widget=forms.Select(attrs={'class': "form-control"}), choices=Athlete.GENDERS)
class Meta:
model = PersonalTrainer
fields = ('gender',)
class ExerciseForm(forms.ModelForm):
weight = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control"}))
repetition = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control"}))
sets = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control"}))
class Meta:
model = Exercise
fields = ('weight','repetition', 'sets')
class BodyScreeningForm(forms.ModelForm):
class Meta:
model = BodyScreening
fields = ('triceps', 'biceps', 'subscapular','supraspinale','suprailic',
'abdominal','chest','thigh','calf','weight','feet', 'inches')
class AthleteSelectForm(forms.Form):
athlete = forms.ModelChoiceField(queryset=User.objects.filter( Q(groups__name='premium')), empty_label='...', to_field_name='username', widget=forms.Select(attrs={'class': "form-control"}))
class AthleteWorkoutDaySelectForm(forms.Form):
athlete = forms.ModelChoiceField(queryset=User.objects.filter(Q(groups__name='premium')), empty_label='...', to_field_name='username', widget=forms.Select(attrs={'class': "form-control"}))
DAYS = (
('Whole week', 'Whole week'),
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
('7', '7'),
)
day = forms.ChoiceField(
widget=forms.Select(attrs={'class': "form-control"}), choices=DAYS,
required=True,
initial='Whole week'
)
class UserTypeForm(forms.Form):
GROUPS = (
('regular', 'Athlete'),
('personal_trainer', 'Personal Trainer'),
)
group = forms.ChoiceField(widget=forms.Select(attrs={'class': "form-control"}),choices=GROUPS, required=True, label='User Type')
class PaymentForm(forms.Form):
owner = forms.CharField(
max_length=100,
required=True,
label='Name on Card',
widget=forms.TextInput(attrs={'class': "form-control", 'placeholder' : "Card Holder's Name"})
)
number = forms.CharField(
max_length=16,
required=True,
label='Card Number',
widget=forms.TextInput(attrs={'class': "form-control", 'placeholder' : "Debit/Credit Card Number"})
)
ccv = forms.CharField(
max_length=4,
required=True,
label='Card CCV',
widget=forms.TextInput(attrs={'class': "form-control", 'placeholder' : "Security Code"})
)
MONTHS = (
('01', 'Jan (01)'),
('02', 'Feb (02)'),
('03', 'Mar (03)'),
('04', 'Apr (04)'),
('05', 'May (05)'),
('06', 'Jun (06)'),
('07', 'Jul (07)'),
('08', 'Aug (08)'),
('09', 'Sep (09)'),
('10', 'Oct (10)'),
('11', 'Nov (11)'),
('12', 'Oct (12)'),
)
YEARS = (
('15','2015'),
('16','2016'),
('17','2017'),
('18','2018'),
('19','2019'),
('20','2020'),
('21','2021'),
('22','2022'),
('23','2023'),
('24','2024'),
('25','2025'),
('26','2026'),
)
month = forms.ChoiceField(
choices=MONTHS,
required=True,
label='Month',
widget=forms.Select(attrs={'class': "form-control col-sm-2"})
)
year = forms.ChoiceField(
choices=YEARS,
required=True,
label='Year',
widget=forms.Select(attrs={'class': "form-control col-sm-2"}),
)
class UserGenderForm(forms.Form):
GENDERS = (
('F', 'Female'),
('M', 'Male'),
)
gender = forms.ChoiceField(
widget=forms.Select(attrs={'class': "form-control"}), choices=GENDERS,
required=True,
initial='F'
)
| {
"content_hash": "2d9d58017928e0bf856620384dd8c825",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 193,
"avg_line_length": 34.388888888888886,
"alnum_prop": 0.5870759289176091,
"repo_name": "brunoliveira8/managyment",
"id": "7523ee7530dcd222b9da73baf3134ef30be54283",
"size": "6190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/gym_app/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "222466"
},
{
"name": "HTML",
"bytes": "115210"
},
{
"name": "JavaScript",
"bytes": "286340"
},
{
"name": "Python",
"bytes": "54867"
}
],
"symlink_target": ""
} |
from cosmo_tester.framework import util
from cosmo_tester.framework.examples import get_example_deployment
def _infra(image_based_manager, ssh_key, logger, tenant, test_config):
example = get_example_deployment(
image_based_manager, ssh_key, logger, tenant, test_config,
upload_plugin=False)
example.blueprint_file = util.get_resource_path(
'blueprints/service_composition/fake_vm.yaml'
)
example.blueprint_id = 'infra'
example.deployment_id = 'infra'
example.inputs = {'agent_user': image_based_manager.username}
return example
def _app(image_based_manager, ssh_key, logger, tenant, test_config,
blueprint_name, app_name='app', client_ip=None,
client_username='admin', client_password='admin', ca_cert_path=None):
if not client_ip:
client_ip = image_based_manager.private_ip_address
example = get_example_deployment(
image_based_manager, ssh_key, logger, tenant, test_config,
upload_plugin=False)
example.blueprint_file = util.get_resource_path(
'blueprints/service_composition/{0}.yaml'.format(blueprint_name)
)
example.blueprint_id = app_name
example.deployment_id = app_name
example.inputs['agent_user'] = image_based_manager.username
example.inputs['client_ip'] = client_ip
example.inputs['client_tenant'] = tenant
example.inputs['client_username'] = client_username
example.inputs['client_password'] = client_password
if ca_cert_path:
example.inputs['ca_cert_path'] = ca_cert_path
example.create_secret = False # don't try to create it twice
return example
def _check_custom_execute_operation(app, logger):
logger.info('Testing custom execute operation.')
util.run_blocking_execution(
app.manager.client, 'app', 'execute_operation', logger,
params={'operation': 'maintenance.poll', 'node_ids': 'app'})
assert app.manager.client.executions.list(
workflow_id='execute_operation')[0].status == 'terminated'
def _verify_deployments_and_nodes(app, number_of_deployments):
# verify all deployments exist
assert len(app.manager.client.deployments.list()) == number_of_deployments
# assert all compute instances are alive
nodes = app.manager.client.nodes.list()
assert len(set(node.deployment_id for node in nodes)) == \
number_of_deployments
for node in nodes:
if node.type == 'cloudify.nodes.Compute':
assert node.number_of_instances == 1
| {
"content_hash": "6295b457fa9049e368b21234785cf389",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 78,
"avg_line_length": 39.77777777777778,
"alnum_prop": 0.6887470071827614,
"repo_name": "cloudify-cosmo/cloudify-system-tests",
"id": "902dc27e9d80fd1cb0e843d12bb542c840628049",
"size": "2506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cosmo_tester/test_suites/service_composition/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "487590"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
} |
"""Signature Version 4 test suite.
AWS provides a test suite for signature version 4:
https://github.com/awslabs/aws-c-auth/tree/v0.3.15/tests/aws-sig-v4-test-suite
This module contains logic to run these tests. The test files were
placed in ./aws4_testsuite, and we're using nose's test generators to
dynamically generate testcases based on these files.
"""
import os
import logging
import io
import datetime
import re
from botocore.compat import six, urlsplit, parse_qsl, HAS_CRT
import mock
import botocore.auth
from botocore.awsrequest import AWSRequest
from botocore.credentials import Credentials
SECRET_KEY = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"
ACCESS_KEY = 'AKIDEXAMPLE'
DATE = datetime.datetime(2015, 8, 30, 12, 36, 0)
SERVICE = 'service'
REGION = 'us-east-1'
TESTSUITE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'aws4_testsuite')
# The following tests are not run. Each test has a comment as
# to why the test is being ignored.
TESTS_TO_IGNORE = [
# Bad request-line syntax, python's HTTP parser chokes on this.
'normalize-path/get-space',
# Multiple query params of the same key not supported by the SDKs.
'get-vanilla-query-order-key-case',
'get-vanilla-query-order-key',
'get-vanilla-query-order-value',
]
if not six.PY3:
TESTS_TO_IGNORE += [
# NO support
'get-header-key-duplicate',
'get-header-value-order',
]
log = logging.getLogger(__name__)
class RawHTTPRequest(six.moves.BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, raw_request):
if isinstance(raw_request, six.text_type):
raw_request = raw_request.encode('utf-8')
self.rfile = six.BytesIO(raw_request)
self.raw_requestline = self.rfile.readline()
self.error_code = None
self.error_message = None
self.parse_request()
def send_error(self, code, message):
self.error_code = code
self.error_message = message
def test_generator():
datetime_patcher = mock.patch.object(
botocore.auth.datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)
)
mocked_datetime = datetime_patcher.start()
mocked_datetime.utcnow.return_value = DATE
for (dirpath, dirnames, filenames) in os.walk(TESTSUITE_DIR):
if not any(f.endswith('.req') for f in filenames):
continue
test_case = os.path.relpath(dirpath, TESTSUITE_DIR)
if test_case in TESTS_TO_IGNORE:
log.debug("Skipping test: %s", test_case)
continue
if HAS_CRT:
yield (_test_crt_signature_version_4, test_case)
else:
yield (_test_signature_version_4, test_case)
datetime_patcher.stop()
def create_request_from_raw_request(raw_request):
request = AWSRequest()
raw = RawHTTPRequest(raw_request)
if raw.error_code is not None:
raise Exception(raw.error_message)
request.method = raw.command
datetime_now = DATE
request.context['timestamp'] = datetime_now.strftime('%Y%m%dT%H%M%SZ')
for key, val in raw.headers.items():
request.headers[key] = val
request.data = raw.rfile.read()
host = raw.headers.get('host', '')
# For whatever reason, the BaseHTTPRequestHandler encodes
# the first line of the response as 'iso-8859-1',
# so we need decode this into utf-8.
if isinstance(raw.path, six.text_type):
raw.path = raw.path.encode('iso-8859-1').decode('utf-8')
url = 'https://%s%s' % (host, raw.path)
if '?' in url:
split_url = urlsplit(url)
params = dict(parse_qsl(split_url.query))
request.url = split_url.path
request.params = params
else:
request.url = url
return request
def _test_signature_version_4(test_case):
test_case = _SignatureTestCase(test_case)
request = create_request_from_raw_request(test_case.raw_request)
auth = botocore.auth.SigV4Auth(test_case.credentials, SERVICE, REGION)
actual_canonical_request = auth.canonical_request(request)
actual_string_to_sign = auth.string_to_sign(request,
actual_canonical_request)
auth.add_auth(request)
actual_auth_header = request.headers['Authorization']
# Some stuff only works right when you go through auth.add_auth()
# So don't assert the interim steps unless the end result was wrong.
if actual_auth_header != test_case.authorization_header:
assert_equal(actual_canonical_request, test_case.canonical_request,
test_case.raw_request, 'canonical_request')
assert_equal(actual_string_to_sign, test_case.string_to_sign,
test_case.raw_request, 'string_to_sign')
assert_equal(actual_auth_header, test_case.authorization_header,
test_case.raw_request, 'authheader')
def _test_crt_signature_version_4(test_case):
test_case = _SignatureTestCase(test_case)
request = create_request_from_raw_request(test_case.raw_request)
# Use CRT logging to diagnose interim steps (canonical request, etc)
# import awscrt.io
# awscrt.io.init_logging(awscrt.io.LogLevel.Trace, 'stdout')
auth = botocore.crt.auth.CrtSigV4Auth(test_case.credentials,
SERVICE, REGION)
auth.add_auth(request)
actual_auth_header = request.headers['Authorization']
assert_equal(actual_auth_header, test_case.authorization_header,
test_case.raw_request, 'authheader')
def assert_equal(actual, expected, raw_request, part):
if actual != expected:
message = "The %s did not match" % part
message += "\nACTUAL:%r !=\nEXPECT:%r" % (actual, expected)
message += '\nThe raw request was:\n%s' % raw_request
raise AssertionError(message)
class _SignatureTestCase(object):
def __init__(self, test_case):
filepath = os.path.join(TESTSUITE_DIR, test_case,
os.path.basename(test_case))
# We're using io.open() because we need to open these files with
# a specific encoding, and in 2.x io.open is the best way to do this.
self.raw_request = io.open(filepath + '.req',
encoding='utf-8').read()
self.canonical_request = io.open(
filepath + '.creq',
encoding='utf-8').read().replace('\r', '')
self.string_to_sign = io.open(
filepath + '.sts',
encoding='utf-8').read().replace('\r', '')
self.authorization_header = io.open(
filepath + '.authz',
encoding='utf-8').read().replace('\r', '')
self.signed_request = io.open(filepath + '.sreq',
encoding='utf-8').read()
token_pattern = r'^x-amz-security-token:(.*)$'
token_match = re.search(token_pattern, self.canonical_request,
re.MULTILINE)
token = token_match.group(1) if token_match else None
self.credentials = Credentials(ACCESS_KEY, SECRET_KEY, token)
| {
"content_hash": "ddcf44020562ed7634084abbbd41fde7",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 78,
"avg_line_length": 36.979166666666664,
"alnum_prop": 0.6384507042253521,
"repo_name": "pplu/botocore",
"id": "6183ddc123e05c8935050a102131e17ab93f5896",
"size": "7667",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/unit/auth/test_sigv4.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "23824"
},
{
"name": "Python",
"bytes": "2691062"
}
],
"symlink_target": ""
} |
from backends import Backend, register_backend
import subprocess
import os
class Text(Backend):
def __init__(self, name):
Backend.__init__(self, name, '.diff')
self._pdftotext = os.path.join(self._utilsdir, 'pdftotext');
def create_refs(self, doc_path, refs_path):
out_path = os.path.join(refs_path, 'text')
p = subprocess.Popen([self._pdftotext, doc_path, out_path + '.txt'], stderr = subprocess.PIPE)
return self._check_exit_status(p, out_path)
def _create_diff(self, ref_path, result_path):
import difflib
ref = open(ref_path, 'r')
result = open(result_path, 'r')
diff = difflib.unified_diff(ref.readlines(), result.readlines(), ref_path, result_path)
ref.close()
result.close()
diff_file = open(result_path + '.diff', 'w')
diff_file.writelines(diff)
diff_file.close()
register_backend('text', Text)
| {
"content_hash": "720aa30a98a0d8437f1a6f3701dc0d51",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 102,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.6149732620320856,
"repo_name": "zhongzw/skia-sdl",
"id": "10b660a3e20278b82f727d741bf40ec0b34d758a",
"size": "1713",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "third_party/externals/poppler/regtest/backends/text.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "81892"
},
{
"name": "C",
"bytes": "6328790"
},
{
"name": "C++",
"bytes": "23135795"
},
{
"name": "CSS",
"bytes": "2700"
},
{
"name": "Java",
"bytes": "23451"
},
{
"name": "JavaScript",
"bytes": "27099"
},
{
"name": "Logos",
"bytes": "3432"
},
{
"name": "Makefile",
"bytes": "9796"
},
{
"name": "Objective-C",
"bytes": "278384"
},
{
"name": "Objective-C++",
"bytes": "111018"
},
{
"name": "Perl",
"bytes": "971"
},
{
"name": "Python",
"bytes": "352394"
},
{
"name": "R",
"bytes": "4069"
},
{
"name": "Rebol",
"bytes": "1540"
},
{
"name": "Shell",
"bytes": "323723"
}
],
"symlink_target": ""
} |
"""This module contains description of objects returned by the
conductor.
The actual objects returned are located in resource.py, which aim
is to hide some necessary magic. Current module describes objects
fields via docstrings and contains implementation of helper methods.
"""
from oslo.config import cfg
from sahara.utils import configs
from sahara.utils import remote
CONF = cfg.CONF
CONF.import_opt('node_domain', 'sahara.config')
class Cluster(object):
"""An object representing Cluster.
id
name
description
tenant_id
trust_id
is_transient
plugin_name
hadoop_version
cluster_configs - configs dict converted to object,
see the docs for details
default_image_id
anti_affinity
management_private_key
management_public_key
user_keypair_id
status
status_description
info
extra
rollback_info - internal information required for rollback
sahara_info - internal information about sahara settings
node_groups - list of NodeGroup objects
cluster_template_id
cluster_template - ClusterTemplate object
"""
class NodeGroup(object):
"""An object representing Node Group.
id
name
flavor_id
image_id
image_username
node_processes - list of node processes
node_configs - configs dict converted to object,
see the docs for details
volumes_per_node
volumes_size
volume_mount_prefix
floating_ip_pool - Floating IP Pool name used to assign Floating IPs to
instances in this Node Group
security_groups - List of security groups for instances in this Node Group
auto_security_group - indicates if Sahara should create additional
security group for the Node Group
open_ports - List of ports that will be opened if auto_security_group is
True
count
instances - list of Instance objects
node_group_template_id
node_group_template - NodeGroupTemplate object
If node group belongs to cluster:
cluster_id - parent Cluster ID
cluster - parent Cluster object
If node group belongs to cluster template:
cluster_template_id - parent ClusterTemplate ID
cluster_template - parent ClusterTemplate object
"""
def configuration(self):
return configs.merge_configs(self.cluster.cluster_configs,
self.node_configs)
def storage_paths(self):
mp = []
for idx in range(1, self.volumes_per_node + 1):
mp.append(self.volume_mount_prefix + str(idx))
# Here we assume that NG's instances use ephemeral
# drives for storage if volumes_per_node == 0
if not mp:
mp = ['/mnt']
return mp
def get_image_id(self):
return self.image_id or self.cluster.default_image_id
class Instance(object):
"""An object representing Instance.
id
node_group_id - parent NodeGroup ID
node_group - parent NodeGroup object
instance_id - Nova instance ID
instance_name
internal_ip
management_ip
volumes
"""
def hostname(self):
return self.instance_name
def fqdn(self):
return self.instance_name + '.' + CONF.node_domain
def remote(self):
return remote.get_remote(self)
class ClusterTemplate(object):
"""An object representing Cluster Template.
id
name
description
cluster_configs - configs dict converted to object,
see the docs for details
default_image_id
anti_affinity
tenant_id
plugin_name
hadoop_version
node_groups - list of NodeGroup objects
"""
class NodeGroupTemplate(object):
"""An object representing Node Group Template.
id
name
description
tenant_id
flavor_id
image_id
plugin_name
hadoop_version
node_processes - list of node processes
node_configs - configs dict converted to object,
see the docs for details
volumes_per_node
volumes_size
volume_mount_prefix
floating_ip_pool
security_groups
auto_security_group
"""
# EDP Objects
class DataSource(object):
"""An object representing Data Source.
id
tenant_id
name
description
type
url
credentials
"""
class JobExecution(object):
"""An object representing JobExecution
id
tenant_id
job_id
input_id
output_id
start_time
end_time
cluster_id
info
progress
oozie_job_id
return_code
"""
class Job(object):
"""An object representing Job
id
tenant_id
name
description
type
mains
libs
"""
class JobBinary(object):
"""An object representing JobBinary
id
tenant_id
name
description
url - URLs may be the following: internal-db://URL, swift://
extra - extra may contain not only user-password but e.g. auth-token
"""
class JobBinaryInternal(object):
"""An object representing JobBinaryInternal
Note that the 'data' field is not returned. It uses deferred
loading and must be requested explicitly with the
job_binary_get_raw_data() conductor method.
id
tenant_id
name
datasize
"""
| {
"content_hash": "b459b9c96fd7a5387e4149d420081bda",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 78,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.6548271301719252,
"repo_name": "keedio/sahara",
"id": "6df9a7668b9c85f4791940a48998edb30670ccfd",
"size": "5876",
"binary": false,
"copies": "2",
"ref": "refs/heads/openbus",
"path": "sahara/conductor/objects.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "4363"
},
{
"name": "Java",
"bytes": "3609"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "2385994"
},
{
"name": "Shell",
"bytes": "36024"
}
],
"symlink_target": ""
} |
import pytest
@pytest.mark.asyncio
@pytest.mark.usefixtures('mocked_meta_path')
async def test_remote_descriptor():
import importlib
from implant import core
class Foo(core.Command):
foobar = core.Parameter(description='foobar')
remote = core.CommandRemote('foobar_module.Foobar')
module = importlib.import_module('foobar_module')
Foo.remote.set_remote_class(module)
foo = Foo()
foo.foobar = 'foobar'
assert await foo.remote(None) == 'foobar'
def test_parameter():
from implant import core
class Foo(core.Command):
foo = core.Parameter(default='bar')
bar = core.Parameter()
def local(self, context):
pass
def remote(self, context):
pass
foo = Foo(bar='baz')
assert foo.bar == 'baz'
assert foo.foo == 'bar'
assert dict(foo) == {'bar': 'baz', 'foo': 'bar'}
def test_parameter_attrib_error():
from implant import core
class Foo(core.Command):
bar = core.Parameter()
def local(self, context):
pass
def remote(self, context):
pass
foo = Foo()
with pytest.raises(AttributeError):
foo.bar # pylint: disable=W0104
def test_cls_params():
from implant import core
| {
"content_hash": "67f052e538fd19a24e18ec088ce0f9af",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 59,
"avg_line_length": 20.70967741935484,
"alnum_prop": 0.6090342679127726,
"repo_name": "diefans/debellator",
"id": "f001a3e691e38e64705e4bfcfcb636cb349ca75e",
"size": "1284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/test_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "140993"
}
],
"symlink_target": ""
} |
import asyncio
import logging
from .client import Client
from .client import InsertError
from .client import PoolError
from .client import QueryError
from .client import ServerError
from .client import UserAuthError
from .helpers import cleanup
from .helpers import gen_data
from .helpers import gen_points
from .helpers import gen_series
from .server import Server
from .siridb import SiriDB
from .testbase import default_test_setup
from .testbase import TestBase
from .series import Series
from .pipe_client import PipeClient as SiriDBAsyncUnixConnection
from .args import parse_args
from .task import Task
async def _run_test(test, loglevel):
logger = logging.getLogger()
logger.setLevel(loglevel)
task = Task(test.title)
try:
await test.run()
except Exception as e:
task.stop(success=False)
raise e
else:
task.stop(success=True)
logger.setLevel('CRITICAL')
await task.task
def run_test(test, loglevel='CRITICAL'):
assert isinstance(test, TestBase)
loop = asyncio.get_event_loop()
cleanup()
loop.run_until_complete(_run_test(test, loglevel))
| {
"content_hash": "3df3886e3bd623c04c178e5ce65e3c2c",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 64,
"avg_line_length": 25.155555555555555,
"alnum_prop": 0.7402826855123675,
"repo_name": "transceptor-technology/siridb-server",
"id": "9ce56c322f355ba52bc75134257a83a0f77ebe4f",
"size": "1132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "itest/testing/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1289956"
},
{
"name": "Go",
"bytes": "38146"
},
{
"name": "Makefile",
"bytes": "61306"
},
{
"name": "Objective-C",
"bytes": "1092"
},
{
"name": "Python",
"bytes": "109901"
},
{
"name": "Roff",
"bytes": "5147"
},
{
"name": "Shell",
"bytes": "3517"
}
],
"symlink_target": ""
} |
""" Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
from numpy import (shape, sometrue, array, transpose, searchsorted,
ones, atleast_1d, atleast_2d, ravel,
dot, poly1d, asarray, intp)
import numpy as np
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
import math
import warnings
import functools
import operator
from scipy._lib.six import xrange, integer_types, string_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
from ._bsplines import make_interp_spline
def reduce_sometrue(a):
all = a
while len(shape(all)) > 1:
all = sometrue(all, axis=0)
return all
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, string_types) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Note that calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless `fill_value="extrapolate"`.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated. ("nearest" and "linear" kinds only.)
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer overflow
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
self._spline = make_interp_spline(self.x, self._y, k=order)
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self._kind not in ('nearest', 'linear'):
raise ValueError("Extrapolation does not work with "
"kind=%s" % self._kind)
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return self._spline(x_new)
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase(object):
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
`self.x` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
`self.x` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu,:].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep`
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**(a) * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the breakpoint)
# Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k,:], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (integer_types, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
raise ValueError("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" %
(xi[i], len(y1), xi[i+1], len(y2), orders[i]))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1], y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class NdPPoly(object):
"""
Piecewise tensor product polynomial
The value at point `xp = (x', y', z', ...)` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[sl]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[sl]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1-dimensional, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# Reuse 1D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x,y,z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices, norm_distances, out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices, norm_distances, out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
def _setdiag(a, k, v):
if not a.ndim == 2:
raise ValueError("Input array should be 2-D.")
M,N = a.shape
if k > 0:
start = k
num = N - k
else:
num = M + k
start = abs(k)*N
end = start + num*(N+1)-1
a.flat[start:end:(N+1)] = v
# Return the spline that minimizes the dis-continuity of the
# "order-th" derivative; for order >= 2.
def _find_smoothest2(xk, yk):
N = len(xk) - 1
Np1 = N + 1
# find pseudo-inverse of B directly.
Bd = np.empty((Np1, N))
for k in range(-N,N):
if (k < 0):
l = np.arange(-k, Np1)
v = (l+k+1)
if ((k+1) % 2):
v = -v
else:
l = np.arange(k,N)
v = N - l
if ((k % 2)):
v = -v
_setdiag(Bd, k, v)
Bd /= (Np1)
V2 = np.ones((Np1,))
V2[1::2] = -1
V2 /= math.sqrt(Np1)
dk = np.diff(xk)
b = 2*np.diff(yk, axis=0)/dk
J = np.zeros((N-1,N+1))
idk = 1.0/dk
_setdiag(J,0,idk[:-1])
_setdiag(J,1,-idk[1:]-idk[:-1])
_setdiag(J,2,idk[1:])
A = dot(J.T,J)
val = dot(V2,dot(A,V2))
res1 = dot(np.outer(V2,V2)/val,A)
mk = dot(np.eye(Np1)-res1, _dot0(Bd,b))
return mk
def _get_spline2_Bb(xk, yk, kind, conds):
Np1 = len(xk)
dk = xk[1:]-xk[:-1]
if kind == 'not-a-knot':
# use banded-solver
nlu = (1,1)
B = ones((3,Np1))
alpha = 2*(yk[1:]-yk[:-1])/dk
zrs = np.zeros((1,)+yk.shape[1:])
row = (Np1-1)//2
b = np.concatenate((alpha[:row],zrs,alpha[row:]),axis=0)
B[0,row+2:] = 0
B[2,:(row-1)] = 0
B[0,row+1] = dk[row-1]
B[1,row] = -dk[row]-dk[row-1]
B[2,row-1] = dk[row]
return B, b, None, nlu
else:
raise NotImplementedError("quadratic %s is not available" % kind)
def _get_spline3_Bb(xk, yk, kind, conds):
# internal function to compute different tri-diagonal system
# depending on the kind of spline requested.
# conds is only used for 'second' and 'first'
Np1 = len(xk)
if kind in ['natural', 'second']:
if kind == 'natural':
m0, mN = 0.0, 0.0
else:
m0, mN = conds
# the matrix to invert is (N-1,N-1)
# use banded solver
beta = 2*(xk[2:]-xk[:-2])
alpha = xk[1:]-xk[:-1]
nlu = (1,1)
B = np.empty((3,Np1-2))
B[0,1:] = alpha[2:]
B[1,:] = beta
B[2,:-1] = alpha[1:-1]
dyk = yk[1:]-yk[:-1]
b = (dyk[1:]/alpha[1:] - dyk[:-1]/alpha[:-1])
b *= 6
b[0] -= m0
b[-1] -= mN
def append_func(mk):
# put m0 and mN into the correct shape for
# concatenation
ma = array(m0,copy=0,ndmin=yk.ndim)
mb = array(mN,copy=0,ndmin=yk.ndim)
if ma.shape[1:] != yk.shape[1:]:
ma = ma*(ones(yk.shape[1:])[np.newaxis,...])
if mb.shape[1:] != yk.shape[1:]:
mb = mb*(ones(yk.shape[1:])[np.newaxis,...])
mk = np.concatenate((ma,mk),axis=0)
mk = np.concatenate((mk,mb),axis=0)
return mk
return B, b, append_func, nlu
elif kind in ['clamped', 'endslope', 'first', 'not-a-knot', 'runout',
'parabolic']:
if kind == 'endslope':
# match slope of lagrange interpolating polynomial of
# order 3 at end-points.
x0,x1,x2,x3 = xk[:4]
sl_0 = (1./(x0-x1)+1./(x0-x2)+1./(x0-x3))*yk[0]
sl_0 += (x0-x2)*(x0-x3)/((x1-x0)*(x1-x2)*(x1-x3))*yk[1]
sl_0 += (x0-x1)*(x0-x3)/((x2-x0)*(x2-x1)*(x3-x2))*yk[2]
sl_0 += (x0-x1)*(x0-x2)/((x3-x0)*(x3-x1)*(x3-x2))*yk[3]
xN3,xN2,xN1,xN0 = xk[-4:]
sl_N = (1./(xN0-xN1)+1./(xN0-xN2)+1./(xN0-xN3))*yk[-1]
sl_N += (xN0-xN2)*(xN0-xN3)/((xN1-xN0)*(xN1-xN2)*(xN1-xN3))*yk[-2]
sl_N += (xN0-xN1)*(xN0-xN3)/((xN2-xN0)*(xN2-xN1)*(xN3-xN2))*yk[-3]
sl_N += (xN0-xN1)*(xN0-xN2)/((xN3-xN0)*(xN3-xN1)*(xN3-xN2))*yk[-4]
elif kind == 'clamped':
sl_0, sl_N = 0.0, 0.0
elif kind == 'first':
sl_0, sl_N = conds
# Now set up the (N+1)x(N+1) system of equations
beta = np.r_[0,2*(xk[2:]-xk[:-2]),0]
alpha = xk[1:]-xk[:-1]
gamma = np.r_[0,alpha[1:]]
B = np.diag(alpha,k=-1) + np.diag(beta) + np.diag(gamma,k=1)
d1 = alpha[0]
dN = alpha[-1]
if kind == 'not-a-knot':
d2 = alpha[1]
dN1 = alpha[-2]
B[0,:3] = [d2,-d1-d2,d1]
B[-1,-3:] = [dN,-dN1-dN,dN1]
elif kind == 'runout':
B[0,:3] = [1,-2,1]
B[-1,-3:] = [1,-2,1]
elif kind == 'parabolic':
B[0,:2] = [1,-1]
B[-1,-2:] = [-1,1]
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
B[0,:2] = [2*d1,d1]
B[-1,-2:] = [dN,2*dN]
# Set up RHS (b)
b = np.empty((Np1,)+yk.shape[1:])
dyk = (yk[1:]-yk[:-1])*1.0
if kind in ['not-a-knot', 'runout', 'parabolic']:
b[0] = b[-1] = 0.0
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
b[0] = (dyk[0]/d1 - sl_0)
b[-1] = -(dyk[-1]/dN - sl_N)
b[1:-1,...] = (dyk[1:]/alpha[1:]-dyk[:-1]/alpha[:-1])
b *= 6.0
return B, b, None, None
else:
raise ValueError("%s not supported" % kind)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# If conds is None, then use the not_a_knot condition
# at K-1 farthest separated points in the interval
def _find_not_a_knot(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued second
# derivative at K-1 farthest separated points
def _find_natural(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued first
# derivative at K-1 farthest separated points
def _find_clamped(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def _find_fixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then use coefficient periodicity
# If conds is 'function' then use function periodicity
def _find_periodic(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# Doesn't use conds
def _find_symmetric(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# conds is a dictionary with multiple values
def _find_mixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
@np.deprecate(message="splmake is deprecated in scipy 0.19.0, "
"use make_interp_spline instead.")
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
@np.deprecate(message="spleval is deprecated in scipy 0.19.0, "
"use BSpline instead.")
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj,cvals,k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),)+index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj,cvals.real[sl],k,deriv)
res[sl].imag = _fitpack._bspleval(xx,xj,cvals.imag[sl],k,deriv)
else:
res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv)
res.shape = oldshape + sh
return res
@np.deprecate(message="spltopp is deprecated in scipy 0.19.0, "
"use PPoly.from_spline instead.")
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple.
"""
return ppform.fromspline(xk, cvals, k)
@np.deprecate(message="spline is deprecated in scipy 0.19.0, "
"use Bspline class instead.")
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)
| {
"content_hash": "fb4d92c665b3d82a262315ae47f24dd9",
"timestamp": "",
"source": "github",
"line_count": 3055,
"max_line_length": 111,
"avg_line_length": 34.852700490998366,
"alnum_prop": 0.547170697346795,
"repo_name": "maniteja123/scipy",
"id": "78aa770e80f1d52f48db8de56f1dff1c021364c3",
"size": "106475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/interpolate/interpolate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4306215"
},
{
"name": "C++",
"bytes": "3692292"
},
{
"name": "FORTRAN",
"bytes": "5573034"
},
{
"name": "HTML",
"bytes": "124330"
},
{
"name": "Makefile",
"bytes": "76425"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "10486771"
},
{
"name": "Shell",
"bytes": "2218"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
"""
A context object for caching a function's return value each time it
is called with the same input arguments.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import os
import shutil
import sys
import time
import pydoc
try:
import cPickle as pickle
except ImportError:
import pickle
import functools
import traceback
import warnings
import inspect
try:
# json is in the standard library for Python >= 2.6
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Not the end of the world: we'll do without this functionality
json = None
# Local imports
from .hashing import hash
from .func_inspect import get_func_code, get_func_name, filter_args
from .logger import Logger, format_time
from . import numpy_pickle
from .disk import rm_subdirs
FIRST_LINE_TEXT = "# first line:"
# TODO: The following object should have a data store object as a sub
# object, and the interface to persist and query should be separated in
# the data store.
#
# This would enable creating 'Memory' objects with a different logic for
# pickling that would simply span a MemorizedFunc with the same
# store (or do we want to copy it to avoid cross-talks?), for instance to
# implement HDF5 pickling.
# TODO: Same remark for the logger, and probably use the Python logging
# mechanism.
def extract_first_line(func_code):
""" Extract the first line information from the function code
text if available.
"""
if func_code.startswith(FIRST_LINE_TEXT):
func_code = func_code.split('\n')
first_line = int(func_code[0][len(FIRST_LINE_TEXT):])
func_code = '\n'.join(func_code[1:])
else:
first_line = -1
return func_code, first_line
class JobLibCollisionWarning(UserWarning):
""" Warn that there might be a collision between names of functions.
"""
###############################################################################
# class `MemorizedFunc`
###############################################################################
class MemorizedFunc(Logger):
""" Callable object decorating a function for caching its return value
each time it is called.
All values are cached on the filesystem, in a deep directory
structure. Methods are provided to inspect the cache or clean it.
Attributes
----------
func: callable
The original, undecorated, function.
cachedir: string
Path to the base cache directory of the memory context.
ignore: list or None
List of variable names to ignore when choosing whether to
recompute.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments. Only used if save_npy was true when the
cache was created.
verbose: int, optional
The verbosity flag, controls messages that are issued as
the function is revaluated.
"""
#-------------------------------------------------------------------------
# Public interface
#-------------------------------------------------------------------------
def __init__(self, func, cachedir, ignore=None, save_npy=True,
mmap_mode=None, verbose=1, timestamp=None):
"""
Parameters
----------
func: callable
The function to decorate
cachedir: string
The path of the base directory to use as a data store
ignore: list or None
List of variable names to ignore.
save_npy: boolean, optional
If True, numpy arrays are saved outside of the pickle
files in the cache, as npy files.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments. Only used if save_npy was true when the
cache was created.
verbose: int, optional
Verbosity flag, controls the debug messages that are issued
as functions are revaluated. The higher, the more verbose
timestamp: float, optional
The reference time from which times in tracing messages
are reported.
"""
Logger.__init__(self)
self._verbose = verbose
self.cachedir = cachedir
self.func = func
self.save_npy = save_npy
self.mmap_mode = mmap_mode
if timestamp is None:
timestamp = time.time()
self.timestamp = timestamp
if ignore is None:
ignore = []
self.ignore = ignore
if not os.path.exists(self.cachedir):
os.makedirs(self.cachedir)
try:
functools.update_wrapper(self, func)
except:
" Objects like ufunc don't like that "
if inspect.isfunction(func):
doc = pydoc.TextDoc().document(func
).replace('\n', '\n\n', 1)
else:
# Pydoc does a poor job on other objects
doc = func.__doc__
self.__doc__ = 'Memoized version of %s' % doc
def __call__(self, *args, **kwargs):
# Compare the function code with the previous to see if the
# function code has changed
output_dir, _ = self.get_output_dir(*args, **kwargs)
# FIXME: The statements below should be try/excepted
if not (self._check_previous_func_code(stacklevel=3) and
os.path.exists(output_dir)):
return self.call(*args, **kwargs)
else:
try:
t0 = time.time()
out = self.load_output(output_dir)
if self._verbose > 4:
t = time.time() - t0
_, name = get_func_name(self.func)
msg = '%s cache loaded - %s' % (name, format_time(t))
print max(0, (80 - len(msg))) * '_' + msg
return out
except Exception:
# XXX: Should use an exception logger
self.warn('Exception while loading results for '
'(args=%s, kwargs=%s)\n %s' %
(args, kwargs, traceback.format_exc()))
shutil.rmtree(output_dir, ignore_errors=True)
return self.call(*args, **kwargs)
def __reduce__(self):
""" We don't store the timestamp when pickling, to avoid the hash
depending from it.
In addition, when unpickling, we run the __init__
"""
return (self.__class__, (self.func, self.cachedir, self.ignore,
self.save_npy, self.mmap_mode, self._verbose))
#-------------------------------------------------------------------------
# Private interface
#-------------------------------------------------------------------------
def _get_func_dir(self, mkdir=True):
""" Get the directory corresponding to the cache for the
function.
"""
module, name = get_func_name(self.func)
module.append(name)
func_dir = os.path.join(self.cachedir, *module)
if mkdir and not os.path.exists(func_dir):
try:
os.makedirs(func_dir)
except OSError:
""" Dir exists: we have a race condition here, when using
multiprocessing.
"""
# XXX: Ugly
return func_dir
def get_output_dir(self, *args, **kwargs):
""" Returns the directory in which are persisted the results
of the function corresponding to the given arguments.
The results can be loaded using the .load_output method.
"""
coerce_mmap = (self.mmap_mode is not None)
argument_hash = hash(filter_args(self.func, self.ignore,
*args, **kwargs),
coerce_mmap=coerce_mmap)
output_dir = os.path.join(self._get_func_dir(self.func),
argument_hash)
return output_dir, argument_hash
def _write_func_code(self, filename, func_code, first_line):
""" Write the function code and the filename to a file.
"""
func_code = '%s %i\n%s' % (FIRST_LINE_TEXT, first_line, func_code)
file(filename, 'w').write(func_code)
def _check_previous_func_code(self, stacklevel=2):
"""
stacklevel is the depth a which this function is called, to
issue useful warnings to the user.
"""
# Here, we go through some effort to be robust to dynamically
# changing code and collision. We cannot inspect.getsource
# because it is not reliable when using IPython's magic "%run".
func_code, source_file, first_line = get_func_code(self.func)
func_dir = self._get_func_dir()
func_code_file = os.path.join(func_dir, 'func_code.py')
try:
if not os.path.exists(func_code_file):
raise IOError
old_func_code, old_first_line = \
extract_first_line(file(func_code_file).read())
except IOError:
self._write_func_code(func_code_file, func_code, first_line)
return False
if old_func_code == func_code:
return True
# We have differing code, is this because we are refering to
# differing functions, or because the function we are refering as
# changed?
if old_first_line == first_line == -1:
_, func_name = get_func_name(self.func, resolv_alias=False,
win_characters=False)
if not first_line == -1:
func_description = '%s (%s:%i)' % (func_name,
source_file, first_line)
else:
func_description = func_name
warnings.warn(JobLibCollisionWarning(
"Cannot detect name collisions for function '%s'"
% func_description), stacklevel=stacklevel)
# Fetch the code at the old location and compare it. If it is the
# same than the code store, we have a collision: the code in the
# file has not changed, but the name we have is pointing to a new
# code block.
if (not old_first_line == first_line
and source_file is not None
and os.path.exists(source_file)):
_, func_name = get_func_name(self.func, resolv_alias=False)
num_lines = len(func_code.split('\n'))
on_disk_func_code = file(source_file).readlines()[
old_first_line - 1:old_first_line - 1 + num_lines - 1]
on_disk_func_code = ''.join(on_disk_func_code)
if on_disk_func_code.rstrip() == old_func_code.rstrip():
warnings.warn(JobLibCollisionWarning(
'Possible name collisions between functions '
"'%s' (%s:%i) and '%s' (%s:%i)" %
(func_name, source_file, old_first_line,
func_name, source_file, first_line)),
stacklevel=stacklevel)
# The function has changed, wipe the cache directory.
# XXX: Should be using warnings, and giving stacklevel
self.clear(warn=True)
return False
def clear(self, warn=True):
""" Empty the function's cache.
"""
func_dir = self._get_func_dir(mkdir=False)
if self._verbose and warn:
self.warn("Clearing cache %s" % func_dir)
if os.path.exists(func_dir):
shutil.rmtree(func_dir, ignore_errors=True)
try:
os.makedirs(func_dir)
except OSError:
""" Directory exists: it has been created by another process
in the mean time. """
func_code, _, first_line = get_func_code(self.func)
func_code_file = os.path.join(func_dir, 'func_code.py')
self._write_func_code(func_code_file, func_code, first_line)
def call(self, *args, **kwargs):
""" Force the execution of the function with the given arguments and
persist the output values.
"""
start_time = time.time()
if self._verbose:
print self.format_call(*args, **kwargs)
output_dir, argument_hash = self.get_output_dir(*args, **kwargs)
output = self.func(*args, **kwargs)
self._persist_output(output, output_dir)
input_repr = self._persist_input(output_dir, *args, **kwargs)
duration = time.time() - start_time
if self._verbose:
_, name = get_func_name(self.func)
msg = '%s - %s' % (name, format_time(duration))
print max(0, (80 - len(msg))) * '_' + msg
return output
def format_call(self, *args, **kwds):
""" Returns a nicely formatted statement displaying the function
call with the given arguments.
"""
path, signature = self.format_signature(self.func, *args,
**kwds)
msg = '%s\n[Memory] Calling %s...\n%s' % (80 * '_', path, signature)
return msg
# XXX: Not using logging framework
#self.debug(msg)
def format_signature(self, func, *args, **kwds):
# XXX: This should be moved out to a function
# XXX: Should this use inspect.formatargvalues/formatargspec?
module, name = get_func_name(func)
module = [m for m in module if m]
if module:
module.append(name)
module_path = '.'.join(module)
else:
module_path = name
arg_str = list()
previous_length = 0
for arg in args:
arg = self.format(arg, indent=2)
if len(arg) > 1500:
arg = '%s...' % arg[:700]
if previous_length > 80:
arg = '\n%s' % arg
previous_length = len(arg)
arg_str.append(arg)
arg_str.extend(['%s=%s' % (v, self.format(i)) for v, i in
kwds.iteritems()])
arg_str = ', '.join(arg_str)
signature = '%s(%s)' % (name, arg_str)
return module_path, signature
# Make make public
def _persist_output(self, output, dir):
""" Persist the given output tuple in the directory.
"""
try:
if not os.path.exists(dir):
os.makedirs(dir)
filename = os.path.join(dir, 'output.pkl')
if 'numpy' in sys.modules and self.save_npy:
numpy_pickle.dump(output, filename)
else:
output_file = file(filename, 'w')
pickle.dump(output, output_file, protocol=2)
output_file.close()
except OSError:
" Race condition in the creation of the directory "
def _persist_input(self, output_dir, *args, **kwargs):
""" Save a small summary of the call using json format in the
output directory.
"""
argument_dict = filter_args(self.func, self.ignore,
*args, **kwargs)
input_repr = dict((k, repr(v)) for k, v in argument_dict.iteritems())
if json is not None:
# This can fail do to race-conditions with multiple
# concurrent joblibs removing the file or the directory
try:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
json.dump(
input_repr,
file(os.path.join(output_dir, 'input_args.json'), 'w'),
)
except:
pass
return input_repr
def load_output(self, output_dir):
""" Read the results of a previous calculation from the directory
it was cached in.
"""
if self._verbose > 1:
t = time.time() - self.timestamp
print '[Memory]% 16s: Loading %s...' % (
format_time(t),
self.format_signature(self.func)[0]
)
filename = os.path.join(output_dir, 'output.pkl')
if self.save_npy:
return numpy_pickle.load(filename,
mmap_mode=self.mmap_mode)
else:
output_file = file(filename, 'r')
return pickle.load(output_file)
# XXX: Need a method to check if results are available.
#-------------------------------------------------------------------------
# Private `object` interface
#-------------------------------------------------------------------------
def __repr__(self):
return '%s(func=%s, cachedir=%s)' % (
self.__class__.__name__,
self.func,
repr(self.cachedir),
)
###############################################################################
# class `Memory`
###############################################################################
class Memory(Logger):
""" A context object for caching a function's return value each time it
is called with the same input arguments.
All values are cached on the filesystem, in a deep directory
structure.
see :ref:`memory_reference`
"""
#-------------------------------------------------------------------------
# Public interface
#-------------------------------------------------------------------------
def __init__(self, cachedir, save_npy=True, mmap_mode=None,
verbose=1):
"""
Parameters
----------
cachedir: string or None
The path of the base directory to use as a data store
or None. If None is given, no caching is done and
the Memory object is completely transparent.
save_npy: boolean, optional
If True, numpy arrays are saved outside of the pickle
files in the cache, as npy files.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments. Only used if save_npy was true when the
cache was created.
verbose: int, optional
Verbosity flag, controls the debug messages that are issued
as functions are revaluated.
"""
# XXX: Bad explaination of the None value of cachedir
Logger.__init__(self)
self._verbose = verbose
self.save_npy = save_npy
self.mmap_mode = mmap_mode
self.timestamp = time.time()
if cachedir is None:
self.cachedir = None
else:
self.cachedir = os.path.join(cachedir, 'joblib')
if not os.path.exists(self.cachedir):
os.makedirs(self.cachedir)
def cache(self, func=None, ignore=None, verbose=None,
mmap_mode=False):
""" Decorates the given function func to only compute its return
value for input arguments not cached on disk.
Parameters
----------
func: callable, optional
The function to be decorated
ignore: list of strings
A list of arguments name to ignore in the hashing
verbose: integer, optional
The verbosity mode of the function. By default that
of the memory object is used.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments. By default that of the memory object is used.
Returns
-------
decorated_func: MemorizedFunc object
The returned object is a MemorizedFunc object, that is
callable (behaves like a function), but offers extra
methods for cache lookup and management. See the
documentation for :class:`joblib.memory.MemorizedFunc`.
"""
if func is None:
# Partial application, to be able to specify extra keyword
# arguments in decorators
return functools.partial(self.cache, ignore=ignore)
if self.cachedir is None:
return func
if verbose is None:
verbose = self._verbose
if mmap_mode is False:
mmap_mode = self.mmap_mode
if isinstance(func, MemorizedFunc):
func = func.func
return MemorizedFunc(func, cachedir=self.cachedir,
save_npy=self.save_npy,
mmap_mode=mmap_mode,
ignore=ignore,
verbose=verbose,
timestamp=self.timestamp)
def clear(self, warn=True):
""" Erase the complete cache directory.
"""
if warn:
self.warn('Flushing completely the cache')
rm_subdirs(self.cachedir)
def eval(self, func, *args, **kwargs):
""" Eval function func with arguments `*args` and `**kwargs`,
in the context of the memory.
This method works similarly to the builtin `apply`, except
that the function is called only if the cache is not
up to date.
"""
if self.cachedir is None:
return func(*args, **kwargs)
return self.cache(func)(*args, **kwargs)
#-------------------------------------------------------------------------
# Private `object` interface
#-------------------------------------------------------------------------
def __repr__(self):
return '%s(cachedir=%s)' % (
self.__class__.__name__,
repr(self.cachedir),
)
def __reduce__(self):
""" We don't store the timestamp when pickling, to avoid the hash
depending from it.
In addition, when unpickling, we run the __init__
"""
# We need to remove 'joblib' from the end of cachedir
return (self.__class__, (self.cachedir[:-7],
self.save_npy, self.mmap_mode, self._verbose))
| {
"content_hash": "72f2df82795876ed73590981ac3c43e2",
"timestamp": "",
"source": "github",
"line_count": 578,
"max_line_length": 79,
"avg_line_length": 39.733564013840834,
"alnum_prop": 0.5213794304624227,
"repo_name": "splunk/splunk-app-splunkgit",
"id": "296e410578d132e80d3218e225963e34c6c5ba8f",
"size": "22966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/joblib/memory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "149147"
},
{
"name": "Shell",
"bytes": "15969"
}
],
"symlink_target": ""
} |
from netforce.model import Model, fields
from netforce import access
class ReasonCode(Model):
_name = "reason.code"
_string = "Reason Code"
_key = ["code"]
_fields = {
"type": fields.Selection([["fault", "Fault Code"], ["service_multi_visit", "Service Multi-Visit"], ["service_late_response", "Service Late Response"], ["lost_sale", "Lost Sales"], ["sale_return","Sales Return"]], "Reason Type", search=True, required=True),
"code": fields.Char("Code", search=True, required=True),
"name": fields.Char("Name", required=True, search=True),
"description": fields.Text("Description"),
"comments": fields.One2Many("message", "related_id", "Comments"),
}
_order = "type,code"
ReasonCode.register()
| {
"content_hash": "801e50f072ba4a4855d561d913132c3a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 264,
"avg_line_length": 42.22222222222222,
"alnum_prop": 0.6381578947368421,
"repo_name": "nfco/netforce",
"id": "c1133ac269d9306ac302fb182c5ca4791a4d1495",
"size": "1865",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "netforce_general/netforce_general/models/reason_code.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "73"
},
{
"name": "CSS",
"bytes": "407336"
},
{
"name": "HTML",
"bytes": "478918"
},
{
"name": "Java",
"bytes": "11870"
},
{
"name": "JavaScript",
"bytes": "3712147"
},
{
"name": "Makefile",
"bytes": "353"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3469515"
},
{
"name": "Roff",
"bytes": "15858"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
} |
class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
try:
return haystack.index(needle)
except ValueError:
return -1
| {
"content_hash": "90b2332f0ba1290aa3e6c3dae254ed60",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 41,
"avg_line_length": 24.181818181818183,
"alnum_prop": 0.5,
"repo_name": "Junnplus/leetcode",
"id": "9b1126b8b2ec897fc194de34910d62cacb202ed5",
"size": "266",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "algorithms/easy/implement-strstr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "68625"
},
{
"name": "Python",
"bytes": "62812"
}
],
"symlink_target": ""
} |
"""
This module tests the Connector. He's a cool guy. He tries to connect to
Eventstore and doesn't quit until he's exhausted every option. He only deals
with TCP connections, StreamReaders, and StreamWriters, so we can test him with
a simple echo server.
When he makes a connection, loses a connection, or quits trying, he raises an
event. These events are handled by the Reader, Writer, and Dispatcher.
"""
import asyncio
import logging
import pytest
from photonpump.connection import Connector, ConnectorCommand
from photonpump.conversations import Ping
from photonpump.discovery import (
DiscoveryFailed,
NodeService,
SingleNodeDiscovery,
DiscoveryRetryPolicy,
)
from ..fakes import EchoServer, TeeQueue, SpyDispatcher
async def connector_event(connector_event):
fut = asyncio.Future()
def _cb(*args):
if not fut.done():
fut.set_result(None)
connector_event.append(_cb)
return await fut
@pytest.mark.asyncio
async def test_when_connecting_to_a_server(event_loop):
"""
When we connect to a server, the protocol should begin sending the
pending messages held by the dispatcher.
When it receives an InboundMessage from the MessageReader, it should call
the "dispatch" method of the Dispatcher.
"""
addr = NodeService("localhost", 8338, None)
async with EchoServer(addr):
dispatcher = SpyDispatcher()
connector = Connector(
SingleNodeDiscovery(addr, DiscoveryRetryPolicy()),
dispatcher,
)
ping = Ping()
await dispatcher.start_conversation(ping)
await connector.start()
await connector_event(connector.connected)
roundtripped_message = await dispatcher.received.get()
assert roundtripped_message.conversation_id == ping.conversation_id
await connector.stop()
@pytest.mark.asyncio
async def test_when_a_server_disconnects(event_loop):
"""
Usually, when eventstore goes away, we'll get an EOF on the transport.
If that happens, we should raise a disconnected event.
We should also place a reconnection message on the control queue.
"""
addr = NodeService("localhost", 8338, None)
queue = TeeQueue()
dispatcher = SpyDispatcher()
connector = Connector(
SingleNodeDiscovery(addr, DiscoveryRetryPolicy()),
dispatcher,
ctrl_queue=queue,
)
raised_disconnected_event = asyncio.Future()
def on_disconnected():
raised_disconnected_event.set_result(True)
connector.disconnected.append(on_disconnected)
async with EchoServer(addr) as server:
await connector.start()
connect = await queue.next_event()
assert connect.command == ConnectorCommand.Connect
connected = await queue.next_event()
assert connected.command == ConnectorCommand.HandleConnectionOpened
server.stop()
disconnect = await queue.next_event()
assert disconnect.command == ConnectorCommand.HandleConnectionFailed
reconnect = await queue.next_event()
assert reconnect.command == ConnectorCommand.Connect
assert raised_disconnected_event.result() is True
await connector.stop()
@pytest.mark.asyncio
async def test_when_three_heartbeats_fail_in_a_row(event_loop):
"""
We're going to set up a separate heartbeat loop to send heartbeat requests
to the server. If three of those heartbeats timeout in a row, we'll put a
reconnection request on the queue.
"""
queue = TeeQueue()
addr = NodeService("localhost", 8338, None)
dispatcher = SpyDispatcher()
connector = Connector(
SingleNodeDiscovery(addr, DiscoveryRetryPolicy()),
dispatcher,
ctrl_queue=queue,
)
async with EchoServer(addr):
await connector.start()
[connect, connected] = await queue.next_event(count=2)
assert connect.command == ConnectorCommand.Connect
assert connected.command == ConnectorCommand.HandleConnectionOpened
connector.heartbeat_failed()
connector.heartbeat_failed()
connector.heartbeat_failed()
[hb1, hb2, hb3, connection_closed, reconnect] = await queue.next_event(count=5)
assert connection_closed.command == ConnectorCommand.HandleConnectionClosed
assert reconnect.command == ConnectorCommand.Connect
await connector.stop()
@pytest.mark.asyncio
async def test_when_a_heartbeat_succeeds(event_loop):
"""
If one of our heartbeats succeeds, we should reset our counter.
Ergo, if we have two failures, followed by a success, followed
by two failures, we should not reset the connection.
"""
queue = TeeQueue()
addr = NodeService("localhost", 8338, None)
dispatcher = SpyDispatcher()
connector = Connector(
SingleNodeDiscovery(addr, DiscoveryRetryPolicy()),
dispatcher,
ctrl_queue=queue,
)
async with EchoServer(addr):
await connector.start()
[connect, connected] = await queue.next_event(count=2)
assert connect.command == ConnectorCommand.Connect
assert connected.command == ConnectorCommand.HandleConnectionOpened
connector.heartbeat_failed()
connector.heartbeat_failed()
[hb1, hb2] = await queue.next_event(count=2)
assert connector.heartbeat_failures == 2
connector.heartbeat_received("Foo")
connector.heartbeat_failed()
connector.heartbeat_failed()
[success, hb3, hb4] = await queue.next_event(count=3)
assert connector.heartbeat_failures == 2
assert success.command == ConnectorCommand.HandleHeartbeatSuccess
assert hb3.command == ConnectorCommand.HandleHeartbeatFailed
assert hb4.command == ConnectorCommand.HandleHeartbeatFailed
await connector.stop()
@pytest.mark.asyncio
async def test_when_discovery_fails_on_reconnection(event_loop):
"""
If we can't retry our current node any more, and we can't discover a new one
then it's game over and we should raise the stopped event.
"""
class never_retry:
def __init__(self):
self.recorded = None
def should_retry(self, _):
return self.recorded is None
def record_failure(self, node):
self.recorded = node
async def wait(self, node):
...
wait_for_stopped = asyncio.Future()
def on_stopped(exn):
wait_for_stopped.set_result(exn)
queue = TeeQueue()
addr = NodeService("localhost", 8338, None)
policy = never_retry()
dispatcher = SpyDispatcher()
connector = Connector(
SingleNodeDiscovery(addr, policy), dispatcher, ctrl_queue=queue
)
connector.stopped.append(on_stopped)
await connector.start()
[connect, connection_failed] = await queue.next_event(count=2)
[failed] = await asyncio.wait_for(queue.next_event(count=1), 2)
assert failed.command == ConnectorCommand.HandleConnectorFailed
assert policy.recorded == addr
assert isinstance(await wait_for_stopped, DiscoveryFailed)
@pytest.mark.asyncio
async def test_when_the_connection_fails_with_an_error(event_loop):
queue = TeeQueue()
addr = NodeService("localhost", 8338, None)
dispatcher = SpyDispatcher()
connector = Connector(
SingleNodeDiscovery(addr, DiscoveryRetryPolicy()),
dispatcher,
ctrl_queue=queue,
)
async with EchoServer(addr):
await connector.start()
[connect, connected] = await queue.next_event(count=2)
exn = ValueError()
connector.connection_lost(exn)
[connection_failed] = await queue.next_event(count=1)
assert connection_failed.command == ConnectorCommand.HandleConnectionFailed
assert connection_failed.data is exn
await connector.stop()
@pytest.mark.asyncio
async def test_when_restarting_a_running_connector(event_loop):
queue = TeeQueue()
addr = NodeService("localhost", 8338, None)
dispatcher = SpyDispatcher()
connector = Connector(
SingleNodeDiscovery(addr, DiscoveryRetryPolicy()),
dispatcher,
ctrl_queue=queue,
)
async with EchoServer(addr):
await connector.start()
[connect, connected] = await queue.next_event(count=2)
assert connected.command == ConnectorCommand.HandleConnectionOpened
await connector_event(connector.connected)
await connector.reconnect(connector.target_node)
[connect, closed, b, c, d, connected] = await queue.next_event(count=6)
assert closed.command == ConnectorCommand.HandleConnectionClosed
assert connect.command == ConnectorCommand.Connect
assert connected.command == ConnectorCommand.HandleConnectionOpened
await connector.stop()
@pytest.mark.asyncio
async def test_when_restarting_a_stopped_connector(event_loop):
queue = TeeQueue()
addr = NodeService("localhost", 8338, None)
dispatcher = SpyDispatcher()
connector = Connector(
SingleNodeDiscovery(addr, DiscoveryRetryPolicy()),
dispatcher,
ctrl_queue=queue,
)
async with EchoServer(addr):
await connector.reconnect()
[connect, connected] = await queue.next_event(count=2)
assert connect.command == ConnectorCommand.Connect
assert connected.command == ConnectorCommand.HandleConnectionOpened
await connector_event(connector.connected)
await connector.stop()
| {
"content_hash": "f18b0bcd9e43cdb9ead025b3d82ade1a",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 87,
"avg_line_length": 29.91194968553459,
"alnum_prop": 0.6846089150546678,
"repo_name": "madedotcom/photon-pump",
"id": "bb7e077dccb73957d422aec6e11bbf37c9a1957f",
"size": "9512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/connection/test_connector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1273"
},
{
"name": "Python",
"bytes": "389865"
}
],
"symlink_target": ""
} |
import inspect
import os.path
from robot import utils
from robot.errors import DataError
from .loggerhelper import AbstractLoggerProxy
from .logger import LOGGER
if utils.is_jython:
from java.lang import Object
from java.util import HashMap
class _RecursionAvoidingMetaclass(type):
"""Metaclass to wrap listener methods so that they cannot cause recursion.
Recursion would otherwise happen if one listener logs something and that
message is received and logged again by log_message or message method.
"""
def __new__(cls, name, bases, dct):
for attr, value in dct.items():
if not attr.startswith('_') and inspect.isroutine(value):
dct[attr] = cls._wrap_listener_method(value)
dct['_calling_method'] = False
return type.__new__(cls, name, bases, dct)
@staticmethod
def _wrap_listener_method(method):
def wrapped(self, *args):
if not self._calling_method:
self._calling_method = True
method(self, *args)
self._calling_method = False
return wrapped
class Listeners(object):
__metaclass__ = _RecursionAvoidingMetaclass
_start_attrs = ['doc', 'starttime', 'longname']
_end_attrs = _start_attrs + ['endtime', 'elapsedtime', 'status', 'message']
def __init__(self, listeners):
self._listeners = self._import_listeners(listeners)
self._running_test = False
self._setup_or_teardown_type = None
def __nonzero__(self):
return bool(self._listeners)
def _import_listeners(self, listener_data):
listeners = []
for name, args in listener_data:
try:
listeners.append(_ListenerProxy(name, args))
except DataError, err:
if args:
name += ':' + ':'.join(args)
LOGGER.error("Taking listener '%s' into use failed: %s"
% (name, unicode(err)))
return listeners
def start_suite(self, suite):
for li in self._listeners:
if li.version == 1:
li.call_method(li.start_suite, suite.name, suite.doc)
else:
attrs = self._get_start_attrs(suite, 'metadata')
attrs.update({'tests' : [t.name for t in suite.tests],
'suites': [s.name for s in suite.suites],
'totaltests': suite.get_test_count(),
'source': suite.source or ''})
li.call_method(li.start_suite, suite.name, attrs)
def end_suite(self, suite):
for li in self._listeners:
if li.version == 1:
li.call_method(li.end_suite, suite.status,
suite.get_full_message())
else:
attrs = self._get_end_attrs(suite, 'metadata')
attrs.update({'statistics': suite.get_stat_message(),
'source': suite.source or ''})
li.call_method(li.end_suite, suite.name, attrs)
def start_test(self, test):
self._running_test = True
for li in self._listeners:
if li.version == 1:
li.call_method(li.start_test, test.name, test.doc, test.tags)
else:
attrs = self._get_start_attrs(test, 'tags')
attrs['critical'] = 'yes' if test.critical else 'no'
attrs['template'] = test.template or ''
li.call_method(li.start_test, test.name, attrs)
def end_test(self, test):
self._running_test = False
for li in self._listeners:
if li.version == 1:
li.call_method(li.end_test, test.status, test.message)
else:
attrs = self._get_end_attrs(test, 'tags')
attrs['critical'] = 'yes' if test.critical else 'no'
attrs['template'] = test.template or ''
li.call_method(li.end_test, test.name, attrs)
def start_keyword(self, kw):
for li in self._listeners:
if li.version == 1:
li.call_method(li.start_keyword, kw.name, kw.args)
else:
attrs = self._get_start_attrs(kw, 'args', '-longname')
attrs['type'] = self._get_keyword_type(kw, start=True)
li.call_method(li.start_keyword, kw.name, attrs)
def end_keyword(self, kw):
for li in self._listeners:
if li.version == 1:
li.call_method(li.end_keyword, kw.status)
else:
attrs = self._get_end_attrs(kw, 'args', '-longname', '-message')
attrs['type'] = self._get_keyword_type(kw, start=False)
li.call_method(li.end_keyword, kw.name, attrs)
def _get_keyword_type(self, kw, start=True):
# When running setup or teardown, only the top level keyword has type
# set to setup/teardown but we want to pass that type also to all
# start/end_keyword listener methods called below that keyword.
if kw.type == 'kw':
return self._setup_or_teardown_type or 'Keyword'
kw_type = self._get_setup_or_teardown_type(kw)
self._setup_or_teardown_type = kw_type if start else None
return kw_type
def _get_setup_or_teardown_type(self, kw):
return '%s %s' % (('Test' if self._running_test else 'Suite'),
kw.type.title())
def log_message(self, msg):
for li in self._listeners:
if li.version == 2:
li.call_method(li.log_message, self._create_msg_dict(msg))
def message(self, msg):
for li in self._listeners:
if li.version == 2:
li.call_method(li.message, self._create_msg_dict(msg))
def _create_msg_dict(self, msg):
return {'timestamp': msg.timestamp, 'message': msg.message,
'level': msg.level, 'html': 'yes' if msg.html else 'no'}
def output_file(self, name, path):
for li in self._listeners:
li.call_method(getattr(li, '%s_file' % name.lower()), path)
def close(self):
for li in self._listeners:
li.call_method(li.close)
def _get_start_attrs(self, item, *names):
return self._get_attrs(item, self._start_attrs, names)
def _get_end_attrs(self, item, *names):
return self._get_attrs(item, self._end_attrs, names)
def _get_attrs(self, item, defaults, extras):
names = self._get_attr_names(defaults, extras)
return dict((n, self._get_attr_value(item, n)) for n in names)
def _get_attr_names(self, defaults, extras):
names = list(defaults)
for name in extras:
if name.startswith('-'):
names.remove(name[1:])
else:
names.append(name)
return names
def _get_attr_value(self, item, name):
value = getattr(item, name)
return self._take_copy_of_mutable_value(value)
def _take_copy_of_mutable_value(self, value):
if isinstance(value, (dict, utils.NormalizedDict)):
return dict(value)
if isinstance(value, list):
return list(value)
return value
class _ListenerProxy(AbstractLoggerProxy):
_methods = ['start_suite', 'end_suite', 'start_test', 'end_test',
'start_keyword', 'end_keyword', 'log_message', 'message',
'output_file', 'report_file', 'log_file', 'debug_file',
'xunit_file', 'close']
def __init__(self, name, args):
listener = self._import_listener(name, args)
AbstractLoggerProxy.__init__(self, listener)
self.name = name
self.version = self._get_version(listener)
self.is_java = utils.is_jython and isinstance(listener, Object)
def _import_listener(self, name, args):
importer = utils.Importer('listener')
return importer.import_class_or_module(os.path.normpath(name),
instantiate_with_args=args)
def _get_version(self, listener):
try:
return int(getattr(listener, 'ROBOT_LISTENER_API_VERSION', 1))
except ValueError:
return 1
def call_method(self, method, *args):
if self.is_java:
args = [self._to_map(a) if isinstance(a, dict) else a for a in args]
try:
method(*args)
except:
message, details = utils.get_error_details()
LOGGER.error("Calling listener method '%s' of listener '%s' failed: %s"
% (method.__name__, self.name, message))
LOGGER.info("Details:\n%s" % details)
def _to_map(self, dictionary):
map = HashMap()
for key, value in dictionary.iteritems():
map.put(key, value)
return map
| {
"content_hash": "42888347074567a182d1e8b0bc573db5",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 83,
"avg_line_length": 38.1931330472103,
"alnum_prop": 0.559051578829082,
"repo_name": "qitaos/robotframework-mabot",
"id": "4caa26d4873b66fe7d77f2d4ebec8ff9efb97bb9",
"size": "9505",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/robot/output/listeners.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11560"
},
{
"name": "HTML",
"bytes": "84841"
},
{
"name": "JavaScript",
"bytes": "38214"
},
{
"name": "Python",
"bytes": "1288243"
},
{
"name": "Shell",
"bytes": "32"
}
],
"symlink_target": ""
} |
__author__ = 'Michael'
from src import PClass
# Test for bias addition and correctness of range in init function
testClass = PClass(3)
assert testClass.weightList.size() == 4
for x in range(0, testClass.numInputs):
assert -1 <= testClass.weightList[x] & testClass.weightList[x] <= 1
| {
"content_hash": "50a2bcc961d92516fcbf21647d1ae46f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 71,
"avg_line_length": 32.111111111111114,
"alnum_prop": 0.726643598615917,
"repo_name": "michaellee1/ANN-PCML",
"id": "04fdfb7f4b23167325d2f0d16b8587ed003c1b3c",
"size": "289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1249"
}
],
"symlink_target": ""
} |
import argparse
import learning.selector as ls
import logging as log
import MySQLdb
import numpy as np
import pandas as pd
import pandas.io.sql as pdsql
import processors.selector as ps
import processors.utils as processor_utils
import sys
import transformation.categorical as categorical
import validation.kfold as kfold
log.basicConfig(stream=sys.stdout, level=log.INFO,
format='%(asctime)-15s %(threadName)s %(filename)s %(levelname)s %(message)s')
def process_args():
parser = argparse.ArgumentParser(description='Python wrapper to simulate load balancing system and to run tests.')
parser.add_argument("-p", "--processor", required=True,
help="Request processor(simple-round-robin,best-counter)")
parser.add_argument("-n", "--node-count", required=True,
help="Node count for processor(1,2,3,...)")
parser.add_argument("-l", "--limit", required=True,
help="Data limit(to avoid running out of memory)")
parser.add_argument("-k", "--k-folds", type=int, default=2,
help="Number of folds for cross validation(higher better, but computation more expensive)")
parser.add_argument("-m", "--model", required=True,
help="ML model(ols, lasso, ridge, sgd, tree)")
parser.add_argument("-a", "--arguments",
help="ML model arguments - kwargs separated with comma")
parser.add_argument("-u", "--url", default='false',
help="Flag whether url should be parsed into tree like indicators")
parser.add_argument("-ul", "--url-limit", type=int, default=None,
help="Limit depth of tree hierarchy of dummy variable parsed from urls")
parser.add_argument("-f", "--features", type=str, default='*',
help="List of features - comma separated")
parser.add_argument("-r", "--run", required=True,
help="'true' or 'false' whether to run full with processor")
args = parser.parse_args()
log.info('action=args values="%s"' % args)
return args
def to_omit(url):
omit = ["payload_size"]
if (url and url == u'true'):
# omit url because it will be parsed separate way
omit.append('url')
return omit
def main_wrapper():
log.info("action=main status=start")
args = process_args()
conn = MySQLdb.connect(host="localhost", user="root",
passwd="password", db="mlrl")
_to_omit = to_omit(args.url)
dataframe, headers = categorical.tranform(
pdsql.read_sql_query(
"SELECT %s FROM data LIMIT %s" % (args.features, args.limit),
conn
),
_to_omit,
args.url,
args.url_limit
)
if (args.run and args.run == u'true'):
processor = ps.select_processor(args)
model = ls.select_model(args)
model.learn(y=dataframe['payload_size'],
x=dataframe[headers])
predictions = model.predict(dataframe[headers])
rmse, rsquarred = processor_utils.process_and_compute_stats(
processor, dataframe, predictions)
log.info("action=results counter=%s rmse=%s rsquarred=%s" %
(processor.real_node_counters, rmse, rsquarred))
if (args.k_folds and args.k_folds is not -1):
validator = kfold.Kfold(ls.select_model_supplier(args), ps.select_processor_supplier(
args), dataframe, headers, args.k_folds, args)
validator.validate()
log.info("action=main status=end")
def main():
try:
main_wrapper()
except Exception:
log.info('action=end-with-exception')
raise
if __name__ == "__main__":
main()
| {
"content_hash": "4793727415bb536bb545d4d804faa523",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 118,
"avg_line_length": 38.3469387755102,
"alnum_prop": 0.6101649813730707,
"repo_name": "jansyk13/ml-rate-limiting",
"id": "bd0fd31c46817b9295228d883edd9a087375cc54",
"size": "3780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/limython.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import os
import sys
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
import fitelp.constants as constants
import fitelp.read_fits_file
constants.init()
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '../Input_Data_Files'))
def read_spectra(filename, scaleFlux):
""" Reads spectra from input FITS File
Stores the wavelength (in Angstroms) in a vector 'x'
x and y are an array of the wavelengths and fluxes of each of the orders"""
if not os.path.isfile(filename):
filename = os.path.join(constants.DATA_FILES, filename)
spectra = fitelp.read_fits_file.readmultispec(filename)
x = spectra['wavelen']
y = spectra['flux'] * scaleFlux
if len(x.shape) == 1:
x = np.array([x])
y = np.array([y])
return x, y
class GalaxyRegion(object):
def __init__(self, rp):
""" x is wavelength arrays, y is flux arrays """
self.xBlue, self.yBlue = read_spectra(rp.blueSpecFile, rp.scaleFlux)
self.xRed, self.yRed = read_spectra(rp.redSpecFile, rp.scaleFlux)
self.rp = rp
if rp.blueSpecError is None:
self.xBlueError, self.yBlueError = (None, None)
else:
self.xBlueError, self.yBlueError = read_spectra(rp.blueSpecError, rp.scaleFlux)
if rp.redSpecError is None:
self.xRedError, self.yRedError = (None, None)
else:
self.xRedError, self.yRedError = read_spectra(rp.redSpecError, rp.scaleFlux)
if not os.path.exists(os.path.join(constants.OUTPUT_DIR, rp.regionName)):
os.makedirs(os.path.join(constants.OUTPUT_DIR, rp.regionName))
def plot_order(self, orderNum, filt='red', minIndex=0, maxIndex=-1, title=''):
"""Plots the wavelength vs flux for a particular order. orderNum starts from 0"""
orderNum -= 1
x, y, xE, yE = self._filter_argument(filt)
fig = plt.figure(self.rp.regionName + " Order Plot " + title)
plt.title(title)
ax1 = fig.add_subplot(111)
ax1.plot(x[orderNum][minIndex:maxIndex], y[orderNum][minIndex:maxIndex], label='Spectrum')
# ax1Ticks = ax1.get_xticks()
# ax2Ticks = ax1Ticks
# ax2.set_xticks(ax2Ticks)
# ax2.set_xbound(ax1.get_xbound())
# ax2.set_xticklabels("%.2f" % z for z in (x[orderNum][minIndex:maxIndex][t] for t in ax2Ticks[:-2]))
#ax2.plot(y[orderNum][minIndex:maxIndex])
if yE is not None:
pass #plt.plot(xE[orderNum][minIndex:maxIndex], yE[orderNum][minIndex:maxIndex], label='Spectrum Error')
plt.legend()
plt.xlabel(constants.WAVE_AXIS_LABEL)
plt.ylabel(constants.FLUX_AXIS_LABEL)
plt.savefig(os.path.join(constants.OUTPUT_DIR, self.rp.regionName, title))
def mask_emission_line(self, orderNum, filt='red', minIndex=0, maxIndex=-1):
orderNum -= 1
x, y, xE, yE = self._filter_argument(filt)
xMask, yMask = x[orderNum][minIndex:maxIndex], y[orderNum][minIndex:maxIndex]
if yE is None:
xEMask, yEMask = None, None
else:
xEMask, yEMask = xE[orderNum][minIndex:maxIndex], yE[orderNum][minIndex:maxIndex]
return xMask, yMask, xEMask, yEMask
def _filter_argument(self, filt):
try:
if filt == 'red':
x, y, xE, yE = self.xRed, self.yRed, self.xRedError, self.yRedError
elif filt == 'blue':
x, y, xE, yE = self.xBlue, self.yBlue, self.xBlueError, self.yBlueError
return x, y, xE, yE
except NameError:
print("Error: Invalid argument. Choose 'red' or 'blue' for the filter argument")
exit()
| {
"content_hash": "a3bc4ccc7e919556876f1785a86b9abd",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 116,
"avg_line_length": 39.45744680851064,
"alnum_prop": 0.6255055270962524,
"repo_name": "daniel-muthukrishna/EmissionLineAnalysis",
"id": "e69966ecb8d046b1343701219443f3deffc0ef88",
"size": "3709",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "fitelp/read_spectra.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "320951"
}
],
"symlink_target": ""
} |
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import messages
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.stacks.api import d3_data
from openstack_dashboard.dashboards.project.stacks.tables import EventsTable
from openstack_dashboard.dashboards.project.stacks.tables import ResourcesTable
LOG = logging.getLogger(__name__)
class StackTopologyTab(tabs.Tab):
name = _("Topology")
slug = "topology"
template_name = "project/stacks/_detail_topology.html"
preload = False
def get_context_data(self, request):
context = {}
stack = self.tab_group.kwargs['stack']
context['stack_id'] = stack.id
context['d3_data'] = d3_data(request, stack_id=stack.id)
return context
class StackOverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
template_name = "project/stacks/_detail_overview.html"
def get_context_data(self, request):
return {"stack": self.tab_group.kwargs['stack']}
class ResourceOverviewTab(tabs.Tab):
name = _("Overview")
slug = "resource_overview"
template_name = "project/stacks/_resource_overview.html"
def get_context_data(self, request):
return {
"resource": self.tab_group.kwargs['resource'],
"metadata": self.tab_group.kwargs['metadata']}
class StackEventsTab(tabs.Tab):
name = _("Events")
slug = "events"
template_name = "project/stacks/_detail_events.html"
preload = False
def get_context_data(self, request):
stack = self.tab_group.kwargs['stack']
try:
stack_identifier = '%s/%s' % (stack.stack_name, stack.id)
events = api.heat.events_list(self.request, stack_identifier)
LOG.debug('got events %s' % events)
except:
events = []
messages.error(request, _(
'Unable to get events for stack "%s".') % stack.stack_name)
return {"stack": stack,
"table": EventsTable(request, data=events), }
class StackResourcesTab(tabs.Tab):
name = _("Resources")
slug = "resources"
template_name = "project/stacks/_detail_resources.html"
preload = False
def get_context_data(self, request):
stack = self.tab_group.kwargs['stack']
try:
stack_identifier = '%s/%s' % (stack.stack_name, stack.id)
resources = api.heat.resources_list(self.request, stack_identifier)
LOG.debug('got resources %s' % resources)
except:
resources = []
messages.error(request, _(
'Unable to get resources for stack "%s".') % stack.stack_name)
return {"stack": stack,
"table": ResourcesTable(
request, data=resources, stack=stack), }
class StackDetailTabs(tabs.TabGroup):
slug = "stack_details"
tabs = (StackTopologyTab, StackOverviewTab, StackResourcesTab,
StackEventsTab)
sticky = True
class ResourceDetailTabs(tabs.TabGroup):
slug = "resource_details"
tabs = (ResourceOverviewTab,)
sticky = True
| {
"content_hash": "5210354a2c97909c82150e751a6606b1",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 79,
"avg_line_length": 31.068627450980394,
"alnum_prop": 0.6333228147680656,
"repo_name": "tuskar/tuskar-ui",
"id": "9a736bb21f5ca934fcbc90861d867e57f0085744",
"size": "3759",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/stacks/tabs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "159761"
},
{
"name": "JavaScript",
"bytes": "467747"
},
{
"name": "Python",
"bytes": "2393436"
},
{
"name": "Shell",
"bytes": "12884"
}
],
"symlink_target": ""
} |
__all__ = ["best_likelihood", "bootstrap", "likelihood_calculation",
"likelihood_function", "likelihood_tree", "models",
"pairwise_distance", "parameter_controller", "predicate",
"simulate", "substitution_calculation", "substitution_model",
"coevolution"]
__author__ = ""
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Gavin Huttley", "Peter Maxwell", "Andrew Butterfield",
"Rob Knight", "Matthrew Wakefield", "Brett Easton",
"Edward Lang","Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Gavin Huttley"
__email__ = "gavin.huttley@anu.edu.au"
__status__ = "Production"
| {
"content_hash": "0e538c3a554e5713457351b1db5373ff",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 72,
"avg_line_length": 44.375,
"alnum_prop": 0.6,
"repo_name": "sauloal/cnidaria",
"id": "a949cbf84967add7c145cf4b6cb8c1610e08b321",
"size": "733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/venv/lib/python2.7/site-packages/cogent/evolve/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1696790"
},
{
"name": "C++",
"bytes": "3035466"
},
{
"name": "CSS",
"bytes": "20306"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "Groff",
"bytes": "32478"
},
{
"name": "HTML",
"bytes": "19658"
},
{
"name": "JavaScript",
"bytes": "250616"
},
{
"name": "Jupyter Notebook",
"bytes": "8401292"
},
{
"name": "M4",
"bytes": "3905"
},
{
"name": "Makefile",
"bytes": "177650"
},
{
"name": "Objective-C",
"bytes": "1701"
},
{
"name": "Python",
"bytes": "28122291"
},
{
"name": "R",
"bytes": "86108"
},
{
"name": "Shell",
"bytes": "676123"
}
],
"symlink_target": ""
} |
"""Estimate resources required for processing a set of tasks.
Uses annotations provided in multitasks.py for each function to identify utilized
programs, then extracts resource requirements from the input bcbio_system file.
"""
import copy
import math
from bcbio.pipeline import config_utils
from bcbio.log import logger
def _get_resource_programs(progs, algs):
"""Retrieve programs used in analysis based on algorithm configurations.
Handles special cases like aligners and variant callers.
"""
checks = {"gatk-vqsr": config_utils.use_vqsr,
"snpeff": config_utils.use_snpeff,
"bcbio-variation-recall": config_utils.use_bcbio_variation_recall}
parent_child = {"vardict": _parent_prefix("vardict")}
out = set([])
for p in progs:
if p == "aligner":
for alg in algs:
aligner = alg.get("aligner")
if aligner:
out.add(aligner)
elif p == "variantcaller":
for key, fn in parent_child.items():
if fn(algs):
out.add(key)
for alg in algs:
vc = alg.get("variantcaller")
if vc:
if isinstance(vc, (list, tuple)):
for x in vc:
out.add(x)
else:
out.add(vc)
elif p in checks:
if checks[p](algs):
out.add(p)
else:
out.add(p)
return sorted(list(out))
def _parent_prefix(prefix):
"""Identify a parent prefix we should add to resources if present in a caller name.
"""
def run(algs):
for alg in algs:
vcs = alg.get("variantcaller")
if vcs:
if not isinstance(vcs, (list, tuple)):
vcs = [vcs]
return any(vc.startswith(prefix) for vc in vcs)
return run
def _ensure_min_resources(progs, cores, memory, min_memory):
"""Ensure setting match minimum resources required for used programs.
"""
for p in progs:
if p in min_memory:
if not memory or cores * memory < min_memory[p]:
memory = float(min_memory[p]) / cores
return cores, memory
def _str_memory_to_gb(memory):
val = float(memory[:-1])
units = memory[-1]
if units.lower() == "m":
val = val / 1000.0
else:
assert units.lower() == "g", "Unexpected memory units: %s" % memory
return val
def _get_prog_memory(resources, cores_per_job):
"""Get expected memory usage, in Gb per core, for a program from resource specification.
"""
out = None
for jvm_opt in resources.get("jvm_opts", []):
if jvm_opt.startswith("-Xmx"):
out = _str_memory_to_gb(jvm_opt[4:])
memory = resources.get("memory")
if memory:
out = _str_memory_to_gb(memory)
prog_cores = resources.get("cores", 1)
# if a single core with memory is requested for the job
# and we run multiple cores, scale down to avoid overscheduling
if out and prog_cores == 1 and cores_per_job > prog_cores:
out = out / float(cores_per_job)
return out
def _scale_cores_to_memory(cores, mem_per_core, sysinfo, system_memory):
"""Scale multicore usage to avoid excessive memory usage based on system information.
"""
total_mem = "%.2f" % (cores * mem_per_core + system_memory)
if "cores" not in sysinfo:
return cores, total_mem, 1.0
total_mem = min(float(total_mem), float(sysinfo["memory"]) - system_memory)
cores = min(cores, int(sysinfo["cores"]))
mem_cores = int(math.floor(float(total_mem) / mem_per_core)) # cores based on available memory
if mem_cores < 1:
out_cores = 1
elif mem_cores < cores:
out_cores = mem_cores
else:
out_cores = cores
mem_pct = float(out_cores) / float(cores)
return out_cores, total_mem, mem_pct
def _scale_jobs_to_memory(jobs, mem_per_core, sysinfo):
"""When scheduling jobs with single cores, avoid overscheduling due to memory.
"""
if "cores" not in sysinfo:
return jobs, 1.0
sys_mem_per_core = float(sysinfo["memory"]) / float(sysinfo["cores"])
if sys_mem_per_core < mem_per_core:
pct = sys_mem_per_core / float(mem_per_core)
target_jobs = int(math.floor(jobs * pct))
return max(target_jobs, 1), pct
else:
return jobs, 1.0
def calculate(parallel, items, sysinfo, config, multiplier=1,
max_multicore=None):
"""Determine cores and workers to use for this stage based on used programs.
multiplier specifies the number of regions items will be split into during
processing.
max_multicore specifies an optional limit on the maximum cores. Can use to
force single core processing during specific tasks.
sysinfo specifies cores and memory on processing nodes, allowing us to tailor
jobs for available resources.
"""
assert len(items) > 0, "Finding job resources but no items to process"
all_cores = []
all_memory = []
# Provide 100Mb of additional memory for the system
system_memory = 0.10
algs = [config_utils.get_algorithm_config(x) for x in items]
progs = _get_resource_programs(parallel.get("progs", []), algs)
# Calculate cores
for prog in progs:
resources = config_utils.get_resources(prog, config)
all_cores.append(resources.get("cores", 1))
if len(all_cores) == 0:
all_cores.append(1)
cores_per_job = max(all_cores)
if max_multicore:
cores_per_job = min(cores_per_job, max_multicore)
if "cores" in sysinfo:
cores_per_job = min(cores_per_job, int(sysinfo["cores"]))
total = parallel["cores"]
if total > cores_per_job:
num_jobs = total // cores_per_job
else:
num_jobs, cores_per_job = 1, total
# Calculate memory. Use 1Gb memory usage per core as min baseline if not specified
for prog in progs:
resources = config_utils.get_resources(prog, config)
memory = _get_prog_memory(resources, cores_per_job)
if memory:
all_memory.append(memory)
if len(all_memory) == 0:
all_memory.append(1)
memory_per_core = max(all_memory)
logger.debug("Resource requests: {progs}; memory: {memory}; cores: {cores}".format(
progs=", ".join(progs), memory=", ".join("%.2f" % x for x in all_memory),
cores=", ".join(str(x) for x in all_cores)))
cores_per_job, memory_per_core = _ensure_min_resources(progs, cores_per_job, memory_per_core,
min_memory=parallel.get("ensure_mem", {}))
if cores_per_job == 1:
memory_per_job = "%.2f" % memory_per_core
num_jobs, mem_pct = _scale_jobs_to_memory(num_jobs, memory_per_core, sysinfo)
else:
cores_per_job, memory_per_job, mem_pct = _scale_cores_to_memory(cores_per_job,
memory_per_core, sysinfo,
system_memory)
# For local runs with multiple jobs and multiple cores, potentially scale jobs down
if num_jobs > 1 and parallel.get("type") == "local":
memory_per_core = float(memory_per_job) / cores_per_job
num_jobs, _ = _scale_jobs_to_memory(num_jobs, memory_per_core, sysinfo)
# do not overschedule if we don't have extra items to process
num_jobs = min(num_jobs, len(items) * multiplier)
logger.debug("Configuring %d jobs to run, using %d cores each with %sg of "
"memory reserved for each job" % (num_jobs, cores_per_job,
str(memory_per_job)))
parallel = copy.deepcopy(parallel)
parallel["cores_per_job"] = cores_per_job
parallel["num_jobs"] = num_jobs
parallel["mem"] = str(memory_per_job)
parallel["mem_pct"] = "%.2f" % mem_pct
parallel["system_cores"] = sysinfo.get("cores", 1)
return parallel
| {
"content_hash": "d71ca4f7c96761b1ad8f4fed2a1b2adc",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 101,
"avg_line_length": 40.792929292929294,
"alnum_prop": 0.594651479509719,
"repo_name": "fw1121/bcbio-nextgen",
"id": "63b913b160337fe0276ed7449edfdf90d4d97801",
"size": "8077",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bcbio/distributed/resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1452773"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "11523"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/furniture/shared_furniture_radar_table.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "1151ee416d7a0f5a0db5ba1fa5da6537",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 86,
"avg_line_length": 24.23076923076923,
"alnum_prop": 0.7015873015873015,
"repo_name": "obi-two/Rebelion",
"id": "8b14314aa5b9580f179eaab2e21c1bbc7b12fb3b",
"size": "460",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/furniture/shared_furniture_radar_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from SimpleCV.base import cv2, np, LazyProperty, copy
from SimpleCV.ImageClass import Image
from SimpleCV.Features import Blob, FeatureSet
from SimpleCV.Color import Color
class SLIC:
"""
**SUMMARY**
This class contains an implementation of the SLIC Superpixel
algorithm by Achanta et al. (PAMI'12, vol. 34, num. 11, pp. 2274-2282).
The C++ implementation from which this Python implementation is derived
can be found here https://github.com/PSMM/SLIC-Superpixels
**EXAMPLE**
>>> img = Image("lenna")
>>> nr_superpixels = 400
>>> step = int((img.width*img.height/nr_superpixels)**0.5)
>>> nc = 40
>>> slic = SLIC(img, step, nc)
>>> superpixels = slic.generateSuperPixels()
>>> superpixels.show()
"""
def __init__(self, img, step, nc):
self.image = img
self.img = img.getNumpy()
self.labimg = cv2.cvtColor(self.img, cv2.COLOR_BGR2LAB).astype(np.float64)
self.contourImage = img.copy()
self.contourImg = self.contourImage._numpy
self.width, self.height = img.size()
self.step = step
self.nc = nc
self.ns = step
self.FLT_MAX = 1000000
self.ITERATIONS = 10
def generateSuperPixels(self):
"""
Compute the over-segmentation based on the step-size and relative
weighting of the pixel and colour values.
"""
self._initData()
indnp = np.mgrid[0:self.height,0:self.width].swapaxes(0,2).swapaxes(0,1)
for i in range(self.ITERATIONS):
self.distances = self.FLT_MAX * np.ones(self.img.shape[:2])
for j in xrange(self.centers.shape[0]):
xlow, xhigh = int(self.centers[j][3] - self.step), int(self.centers[j][3] + self.step)
ylow, yhigh = int(self.centers[j][4] - self.step), int(self.centers[j][4] + self.step)
if xlow <= 0:
xlow = 0
if xhigh > self.width:
xhigh = self.width
if ylow <=0:
ylow = 0
if yhigh > self.height:
yhigh = self.height
cropimg = self.labimg[ylow : yhigh , xlow : xhigh].astype(np.int64)
colordiff = cropimg - self.labimg[self.centers[j][4], self.centers[j][3]]
colorDist = np.sqrt(np.sum(np.square(colordiff.astype(np.int64)), axis=2))
yy, xx = np.ogrid[ylow : yhigh, xlow : xhigh]
pixdist = ((yy-self.centers[j][4])**2 + (xx-self.centers[j][3])**2)**0.5
dist = ((colorDist/self.nc)**2 + (pixdist/self.ns)**2)**0.5
distanceCrop = self.distances[ylow : yhigh, xlow : xhigh]
idx = dist < distanceCrop
distanceCrop[idx] = dist[idx]
self.distances[ylow : yhigh, xlow : xhigh] = distanceCrop
self.clusters[ylow : yhigh, xlow : xhigh][idx] = j
for k in xrange(len(self.centers)):
idx = (self.clusters == k)
colornp = self.labimg[idx]
distnp = indnp[idx]
self.centers[k][0:3] = np.sum(colornp, axis=0)
sumy, sumx = np.sum(distnp, axis=0)
self.centers[k][3:] = sumx, sumy
self.centers[k] /= np.sum(idx)
self._createConnectivity()
superpixels = self._segmentSuperpixels()
return superpixels
def _initData(self):
"""
Initialize the cluster centers and initial values of the pixel-wise
cluster assignment and distance values.
"""
self.clusters = -1 * np.ones(self.img.shape[:2])
self.distances = self.FLT_MAX * np.ones(self.img.shape[:2])
centers = []
for i in xrange(self.step, self.width - self.step/2, self.step):
for j in xrange(self.step, self.height - self.step/2, self.step):
nc = self._findLocalMinimum(center=(i, j))
color = self.labimg[nc[1], nc[0]]
center = [color[0], color[1], color[2], nc[0], nc[1]]
centers.append(center)
self.center_counts = np.zeros(len(centers))
self.centers = np.array(centers)
def _findLocalMinimum(self, center):
"""
Find a local gradient minimum of a pixel in a 3x3 neighbourhood.
This method is called upon initialization of the cluster centers.
"""
min_grad = self.FLT_MAX
loc_min = center
for i in xrange(center[0] - 1, center[0] + 2):
for j in xrange(center[1] - 1, center[1] + 2):
c1 = self.labimg[j+1, i]
c2 = self.labimg[j, i+1]
c3 = self.labimg[j, i]
if ((c1[0] - c3[0])**2)**0.5 + ((c2[0] - c3[0])**2)**0.5 < min_grad:
min_grad = abs(c1[0] - c3[0]) + abs(c2[0] - c3[0])
loc_min = [i, j]
return loc_min
def _createConnectivity(self):
"""
Enforce connectivity of the superpixels. Needs to be optimized.
"""
label = 0
adjlabel = 0
lims = self.width * self.height / self.centers.shape[0]
dx4 = [-1, 0, 1, 0]
dy4 = [0, -1, 0, 1]
new_clusters = -1 * np.ones(self.img.shape[:2]).astype(np.int64)
elements = []
for i in xrange(self.width):
for j in xrange(self.height):
if new_clusters[j, i] == -1:
elements = []
elements.append((j, i))
for dx, dy in zip(dx4, dy4):
x = elements[0][1] + dx
y = elements[0][0] + dy
if (x>=0 and x < self.width and
y>=0 and y < self.height and
new_clusters[y, x] >=0):
adjlabel = new_clusters[y, x]
count = 1
c = 0
while c < count:
for dx, dy in zip(dx4, dy4):
x = elements[c][1] + dx
y = elements[c][0] + dy
if (x>=0 and x<self.width and y>=0 and y<self.height):
if new_clusters[y, x] == -1 and self.clusters[j, i] == self.clusters[y, x]:
elements.append((y, x))
new_clusters[y, x] = label
count+=1
c+=1
#print count
if (count <= lims >> 2):
for c in range(count):
new_clusters[elements[c]] = adjlabel
label-=1
label+=1
self.new_clusters = new_clusters
def _segmentSuperpixels(self):
img = self.new_clusters
limit = np.max(img)
superpixels = Superpixels()
for label in range(limit+1):
clusterimg = Image(255*(img == label).astype(np.uint8))
blobs = clusterimg.findBlobs()
if blobs is None:
continue
blob = blobs[-1]
blob.image = self.image & clusterimg
superpixels.append(blob)
return superpixels
class Superpixels(FeatureSet):
"""
** SUMMARY **
Superpixels is a class extended from FeatureSet which is a class
extended from Python's list. So, it has all the properties of a list
as well as all the properties of FeatureSet.
Each object of this list is a Blob corresponding to the superpixel.
** EXAMPLE **
>>> image = Image("lenna")
>>> sp = image.segmentSuperpixels(300, 20)
>>> sp.show()
>>> sp.centers()
"""
def __init__(self):
self._drawingImage = None
self.clusterMeanImage = None
pass
def append(self, blob):
list.append(self, blob)
#if len(self) != 1:
#self.image += blob.image.copy()
@LazyProperty
def image(self):
img = None
for sp in self:
if img is None:
img = sp.image
else:
img += sp.image
return img
def draw(self, color=Color.RED, width=2, alpha=255):
"""
**SUMMARY**
Draw all the superpixels, in the given color, to the appropriate layer
By default, this draws the superpixels boundary. If you
provide a width, an outline of the exterior and interior contours is drawn.
**PARAMETERS**
* *color* -The color to render the blob as a color tuple.
* *width* - The width of the drawn blob in pixels, if -1 then filled then the polygon is filled.
* *alpha* - The alpha value of the rendered blob 0=transparent 255=opaque.
**RETURNS**
Image with superpixels drawn on it.
**EXAMPLE**
>>> image = Image("lenna")
>>> sp = image.segmentSuperpixels(300, 20)
>>> sp.draw(color=(255, 0, 255), width=5, alpha=128).show()
"""
img = self.image.copy()
self._drawingImage = Image(self.image.getEmpty(3))
_mLayers = []
for sp in self:
sp.draw(color=color, width=width, alpha=alpha)
self._drawingImage += sp.image.copy()
for layer in sp.image._mLayers:
_mLayers.append(layer)
self._drawingImage._mLayers = copy(_mLayers)
return self._drawingImage.copy()
def show(self, color=Color.RED, width=2, alpha=255):
"""
**SUMMARY**
This function automatically draws the superpixels on the drawing image
and shows it.
** RETURNS **
None
** EXAMPLE **
>>> image = Image("lenna")
>>> sp = image.segmentSuperpixels(300, 20)
>>> sp.show(color=(255, 0, 255), width=5, alpha=128)
"""
if type(self._drawingImage) == type(None):
self.draw(color=color, width=width, alpha=alpha)
self._drawingImage.show()
def colorWithClusterMeans(self):
"""
**SUMMARY**
This function colors each superpixel with its mean color and
return an image.
**RETURNS**
Image with superpixles drawn in its mean color.
**EXAMPLE**
>>> image = Image("lenna")
>>> sp = image.segmentSuperpixels(300, 20)
>>> sp.colorWithClusterMeans().show()
"""
if type(self.clusterMeanImage) != type(None):
return self.clusterMeanImage
self.clusterMeanImage = Image(self.image.getEmpty(3))
_mLayers = []
for sp in self:
color = tuple(reversed(sp.meanColor()))
sp.draw(color=color, width=-1)
self.clusterMeanImage += sp.image.copy()
for layer in sp.image._mLayers:
_mLayers.append(layer)
self.clusterMeanImage._mLayers = copy(_mLayers)
return self.clusterMeanImage
| {
"content_hash": "48a2e1fa6dc79b0d64e8648ae9ebf737",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 104,
"avg_line_length": 36.60535117056856,
"alnum_prop": 0.5229785290086798,
"repo_name": "jayrambhia/SimpleCV2",
"id": "e549d99e215fd558361d3cc34c03b6d0237958c4",
"size": "10945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SimpleCV/Features/Superpixels.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "46344"
},
{
"name": "JavaScript",
"bytes": "41038"
},
{
"name": "Perl",
"bytes": "5044"
},
{
"name": "Python",
"bytes": "1698883"
},
{
"name": "Shell",
"bytes": "18995"
}
],
"symlink_target": ""
} |
"""Creates Github links from @user and #issue text.
Bascially a much simplified version of sphinxcontrib.issuetracker
with support for @user.
"""
import re
import sys
from docutils import nodes
from docutils.transforms import Transform
GITHUB_ISSUE_URL = "https://github.com/{0}/issues/{1}"
GITHUB_USER_URL = "https://github.com/{1}"
if sys.version_info[0] == 2:
str = unicode
class GithubReferences(Transform):
default_priority = 999
def apply(self):
config = self.document.settings.env.config
issue_re = re.compile(config.github_issue_pattern)
mention_re = re.compile(config.github_mention_pattern)
self._replace_pattern(issue_re, GITHUB_ISSUE_URL)
self._replace_pattern(mention_re, GITHUB_USER_URL)
def _replace_pattern(self, pattern, url_format):
project = self.document.settings.env.config.github_project
for node in self.document.traverse(nodes.Text):
parent = node.parent
if isinstance(parent, (nodes.literal, nodes.FixedTextElement)):
continue
text = str(node)
new_nodes = []
last_ref_end = 0
for match in pattern.finditer(text):
head = text[last_ref_end:match.start()]
if head:
new_nodes.append(nodes.Text(head))
last_ref_end = match.end()
ref = url_format.format(project, match.group(1))
link = nodes.reference(
match.group(0),
match.group(0),
refuri=ref
)
new_nodes.append(link)
if not new_nodes:
continue
tail = text[last_ref_end:]
if tail:
new_nodes.append(nodes.Text(tail))
parent.replace(node, new_nodes)
def setup(app):
app.add_config_value("github_project", None, "env")
app.add_config_value("github_issue_pattern", r"#(\d+)", "env")
app.add_config_value("github_mention_pattern", r"@(\w+)", "env")
app.add_transform(GithubReferences)
| {
"content_hash": "e791c31666d22679bd66cf834ee60c5a",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 69,
"avg_line_length": 27.057142857142857,
"alnum_prop": 0.6541710665258712,
"repo_name": "unintended/Cohen",
"id": "1c2583e1bb973c43ae1ca3196125b2f188b5f65f",
"size": "1894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/ext_github.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1155170"
},
{
"name": "Shell",
"bytes": "1345"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import hashlib
import time
import random
from oauth_provider.models import Consumer
class Command(BaseCommand):
help = 'Creates a key and secret for clients connecting to the server'
option_list = BaseCommand.option_list + (
make_option('--name',
dest='consumer_name',
default=False,
help='A name for the consumer'),
)
def handle(self, *args, **options):
if options['consumer_name']:
consumer_name = options['consumer_name']
else:
consumer_name = raw_input('Enter consumer name: ')
key = hashlib.sha1("{0} - {1}".format(random.random(),
time.time())).hexdigest()
secret = hashlib.sha1("{0} - {1}".format(random.random(),
time.time())).hexdigest()
# This field is for some reason defined as 16 chars, while key is 256.
secret = secret[:16]
consumer = Consumer.objects.create(name=consumer_name,
key=key,
secret=secret)
self.stdout.write("Key: %s\n" % key)
self.stdout.write("Secret: %s\n" % secret)
| {
"content_hash": "9a5162ecfb5c379bb0ee5bddee04f305",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 36.891891891891895,
"alnum_prop": 0.5384615384615384,
"repo_name": "vegitron/servicemap",
"id": "edbd0d9c9e3b7cec1e270749149e1c9f4089ec58",
"size": "1365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "servicemap/management/commands/create_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1771"
},
{
"name": "HTML",
"bytes": "7758"
},
{
"name": "Python",
"bytes": "43052"
}
],
"symlink_target": ""
} |
import os
import numpy as np
from tensorflow.python.checkpoint import checkpoint as util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.trackable import autotrackable
from tensorflow.python.trackable import data_structures
from tensorflow.python.util import nest
class InterfaceTests(test.TestCase):
def testMultipleAssignment(self):
root = autotrackable.AutoTrackable()
root.leaf = autotrackable.AutoTrackable()
root.leaf = root.leaf
duplicate_name_dep = autotrackable.AutoTrackable()
with self.assertRaisesRegex(ValueError, "already declared"):
root._track_trackable(duplicate_name_dep, name="leaf")
# No error; we're overriding __setattr__, so we can't really stop people
# from doing this while maintaining backward compatibility.
root.leaf = duplicate_name_dep
root._track_trackable(duplicate_name_dep, name="leaf", overwrite=True)
self.assertIs(duplicate_name_dep, root._lookup_dependency("leaf"))
self.assertIs(duplicate_name_dep, root._trackable_children()["leaf"])
def testRemoveDependency(self):
root = autotrackable.AutoTrackable()
root.a = autotrackable.AutoTrackable()
self.assertEqual(1, len(root._trackable_children()))
self.assertEqual(1, len(root._unconditional_checkpoint_dependencies))
self.assertIs(root.a, root._trackable_children()["a"])
del root.a
self.assertFalse(hasattr(root, "a"))
self.assertEqual(0, len(root._trackable_children()))
self.assertEqual(0, len(root._unconditional_checkpoint_dependencies))
root.a = autotrackable.AutoTrackable()
self.assertEqual(1, len(root._trackable_children()))
self.assertEqual(1, len(root._unconditional_checkpoint_dependencies))
self.assertIs(root.a, root._trackable_children()["a"])
def testListBasic(self):
a = autotrackable.AutoTrackable()
b = autotrackable.AutoTrackable()
a.l = [b]
c = autotrackable.AutoTrackable()
a.l.append(c)
a_deps = util.list_objects(a)
self.assertIn(b, a_deps)
self.assertIn(c, a_deps)
self.assertIn("l", a._trackable_children())
direct_a_dep = a._trackable_children()["l"]
self.assertIn(b, direct_a_dep)
self.assertIn(c, direct_a_dep)
@test_util.run_in_graph_and_eager_modes
def testMutationDirtiesList(self):
a = autotrackable.AutoTrackable()
b = autotrackable.AutoTrackable()
a.l = [b]
c = autotrackable.AutoTrackable()
a.l.insert(0, c)
checkpoint = util.Checkpoint(a=a)
with self.assertRaisesRegex(ValueError, "A list element was replaced"):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes
def testOutOfBandEditDirtiesList(self):
a = autotrackable.AutoTrackable()
b = autotrackable.AutoTrackable()
held_reference = [b]
a.l = held_reference
c = autotrackable.AutoTrackable()
held_reference.append(c)
checkpoint = util.Checkpoint(a=a)
with self.assertRaisesRegex(ValueError, "The wrapped list was modified"):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes
def testNestedLists(self):
a = autotrackable.AutoTrackable()
a.l = []
b = autotrackable.AutoTrackable()
a.l.append([b])
c = autotrackable.AutoTrackable()
a.l[0].append(c)
a_deps = util.list_objects(a)
self.assertIn(b, a_deps)
self.assertIn(c, a_deps)
a.l[0].append(1)
d = autotrackable.AutoTrackable()
a.l[0].append(d)
a_deps = util.list_objects(a)
self.assertIn(d, a_deps)
self.assertIn(b, a_deps)
self.assertIn(c, a_deps)
self.assertNotIn(1, a_deps)
e = autotrackable.AutoTrackable()
f = autotrackable.AutoTrackable()
a.l1 = [[], [e]]
a.l1[0].append(f)
a_deps = util.list_objects(a)
self.assertIn(e, a_deps)
self.assertIn(f, a_deps)
checkpoint = util.Checkpoint(a=a)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
a.l[0].append(data_structures.NoDependency([]))
a.l[0][-1].append(5)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
# Dirtying the inner list means the root object is unsaveable.
a.l[0][1] = 2
with self.assertRaisesRegex(ValueError, "A list element was replaced"):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes
def testAssertions(self):
a = autotrackable.AutoTrackable()
a.l = {"k": [np.zeros([2, 2])]}
self.assertAllEqual(nest.flatten({"k": [np.zeros([2, 2])]}),
nest.flatten(a.l))
self.assertAllClose({"k": [np.zeros([2, 2])]}, a.l)
nest.map_structure(self.assertAllClose, a.l, {"k": [np.zeros([2, 2])]})
a.tensors = {"k": [array_ops.ones([2, 2]), array_ops.zeros([3, 3])]}
self.assertAllClose({"k": [np.ones([2, 2]), np.zeros([3, 3])]},
self.evaluate(a.tensors))
if __name__ == "__main__":
test.main()
| {
"content_hash": "f3c68581863eb3baad565ee042e1e332",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 77,
"avg_line_length": 38.42748091603053,
"alnum_prop": 0.6781883194278904,
"repo_name": "tensorflow/tensorflow-pywrap_saved_model",
"id": "bf86b8055b64a25e870e7d21a00668579e4b9cf5",
"size": "5723",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/python/trackable/autotrackable_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1392153"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "125860957"
},
{
"name": "CMake",
"bytes": "182324"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2123155"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11347297"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300208"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42738981"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9214"
},
{
"name": "Shell",
"bytes": "621427"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7720442"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from flask import url_for
from .conftest import DEFAULT_GROUP_ID
from .conftest import DEFAULT_GROUP_NAME
from .conftest import create_group
from .conftest import create_package
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4')
def test_index(db, client):
resp = client.get(url_for('tracker.index'), follow_redirects=True)
assert 200 == resp.status_code
assert 'text/html; charset=utf-8' == resp.content_type
assert DEFAULT_GROUP_NAME not in resp.data.decode()
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3')
def test_index_vulnerable(db, client):
resp = client.get(url_for('tracker.index_vulnerable'), follow_redirects=True)
assert 200 == resp.status_code
assert DEFAULT_GROUP_NAME in resp.data.decode()
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3')
def test_index_all(db, client):
resp = client.get(url_for('tracker.index_all'), follow_redirects=True)
assert 200 == resp.status_code
assert DEFAULT_GROUP_NAME in resp.data.decode()
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3')
def test_index_json(db, client):
resp = client.get(url_for('tracker.index_json', only_vulernable=False), follow_redirects=True)
assert 200 == resp.status_code
data = resp.get_json()
assert 'application/json; charset=utf-8' == resp.content_type
assert len(data) == 1
assert data[0]['name'] == DEFAULT_GROUP_NAME
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3')
def test_index_vulnerable_json(db, client):
resp = client.get(url_for('tracker.index_vulnerable_json'), follow_redirects=True)
assert 200 == resp.status_code
data = resp.get_json()
assert len(data) == 1
assert data[0]['name'] == DEFAULT_GROUP_NAME
| {
"content_hash": "2da74c9f6c4f326c5f9a56974c2d7965",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 98,
"avg_line_length": 39.61538461538461,
"alnum_prop": 0.6946601941747573,
"repo_name": "jelly/arch-security-tracker",
"id": "255aec49dcee48b2236b27dffc494be8472f4832",
"size": "2060",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9806"
},
{
"name": "HTML",
"bytes": "65601"
},
{
"name": "Makefile",
"bytes": "1441"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "345600"
}
],
"symlink_target": ""
} |
from datetime import timedelta
from functools import update_wrapper
import json
import os
from flask import make_response, request, current_app
import redis
# initialize redis connection for local and CF deployment
def connect_redis_db(redis_service_name = 'p-redis'):
if os.environ.get('VCAP_SERVICES') is None: # running locally
DB_HOST = 'localhost'
DB_PORT = 6379
DB_PW = ''
REDIS_DB = 1
else: # running on CF
env_vars = os.environ['VCAP_SERVICES']
credentials = json.loads(env_vars)[redis_service_name][0]['credentials']
DB_HOST = credentials['host']
DB_PORT = credentials['port']
DB_PW = credentials['password']
REDIS_DB = 0
return redis.StrictRedis(host=DB_HOST,
port=DB_PORT,
password=DB_PW,
db=REDIS_DB)
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'] = \
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
| {
"content_hash": "5e10156ad0916b8aa966fd61b5863230",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 80,
"avg_line_length": 36.875,
"alnum_prop": 0.5804143126177025,
"repo_name": "lwalstad-pivotal/unified-demonstration-sentiment_01",
"id": "9b482de6f570187c2a61daf889871cecd3180031",
"size": "2679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentiment-compute-app/helper_functions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15000"
},
{
"name": "FreeMarker",
"bytes": "16494"
},
{
"name": "HTML",
"bytes": "5585"
},
{
"name": "Java",
"bytes": "22727"
},
{
"name": "JavaScript",
"bytes": "128444"
},
{
"name": "Python",
"bytes": "20258"
}
],
"symlink_target": ""
} |
from mparallel import runner
Runner = runner.Runner
| {
"content_hash": "7ec07d8847620ace6e1939663bb256f6",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 28,
"avg_line_length": 13.5,
"alnum_prop": 0.7962962962962963,
"repo_name": "andersonvom/mparallel",
"id": "6b761b7d166c27463f8b884910267a37537fd9ee",
"size": "54",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mparallel/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6244"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Seriously'
copyright = '2015, Mego'
author = 'Mego'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Seriouslydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Seriously.tex', 'Seriously Documentation',
'Mego', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'seriously', 'Seriously Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Seriously', 'Seriously Documentation',
author, 'Seriously', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "69bae392abe7ab5a752b70483bfece48",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 79,
"avg_line_length": 33.065934065934066,
"alnum_prop": 0.683394261659466,
"repo_name": "Sherlock9/Seriously",
"id": "7b7242b883464f8143857fa009e3c2cccd7b11ec",
"size": "9485",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65250"
}
],
"symlink_target": ""
} |
"""Tests for compute service."""
import base64
import contextlib
import datetime
import operator
import sys
import time
import traceback
import uuid
from eventlet import greenthread
import mock
import mox
from oslo.config import cfg
from oslo import messaging
from oslo.serialization import jsonutils
from oslo.utils import importutils
from oslo.utils import timeutils
from oslo.utils import units
import six
import testtools
from testtools import matchers as testtools_matchers
import nova
from nova import availability_zones
from nova import block_device
from nova import compute
from nova.compute import api as compute_api
from nova.compute import arch
from nova.compute import flavors
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import manager as conductor_manager
from nova.console import type as ctype
from nova import context
from nova import db
from nova import exception
from nova.i18n import _
from nova.image import glance
from nova.network import api as network_api
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import policy
from nova import quota
from nova import test
from nova.tests.compute import eventlet_utils
from nova.tests.compute import fake_resource_tracker
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_block_device
from nova.tests import fake_instance
from nova.tests import fake_network
from nova.tests import fake_network_cache_model
from nova.tests import fake_notifier
from nova.tests import fake_server_actions
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova.tests.objects import test_flavor
from nova.tests.objects import test_migration
from nova.tests.objects import test_network
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import event
from nova.virt import fake
from nova.virt import hardware
from nova.volume import cinder
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
FAKE_IMAGE_REF = 'fake-image-ref'
NODENAME = 'fakenode1'
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
def get_primitive_instance_by_uuid(context, instance_uuid):
"""Helper method to get an instance and then convert it to
a primitive form using jsonutils.
"""
instance = db.instance_get_by_uuid(context, instance_uuid)
return jsonutils.to_primitive(instance)
def unify_instance(instance):
"""Return a dict-like instance for both object-initiated and
model-initiated sources that can reasonably be compared.
"""
newdict = dict()
for k, v in instance.iteritems():
if isinstance(v, datetime.datetime):
# NOTE(danms): DB models and Instance objects have different
# timezone expectations
v = v.replace(tzinfo=None)
elif k == 'fault':
# NOTE(danms): DB models don't have 'fault'
continue
elif k == 'pci_devices':
# NOTE(yonlig.he) pci devices need lazy loading
# fake db does not support it yet.
continue
newdict[k] = v
return newdict
class FakeSchedulerAPI(object):
def run_instance(self, ctxt, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties):
pass
def live_migration(self, ctxt, block_migration, disk_over_commit,
instance, dest):
pass
def prep_resize(self, ctxt, instance, instance_type, image, request_spec,
filter_properties, reservations):
pass
class FakeComputeTaskAPI(object):
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations):
pass
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(network_manager='nova.network.manager.FlatManager')
fake.set_nodes([NODENAME])
self.flags(use_local=True, group='conductor')
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
self.compute = importutils.import_object(CONF.compute_manager)
# execute power syncing synchronously for testing:
self.compute._sync_power_pool = eventlet_utils.SyncPool()
# override tracker with a version that doesn't need the database:
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver, NODENAME)
self.compute._resource_tracker_dict[NODENAME] = fake_rt
def fake_get_compute_nodes_in_db(context, use_slave=False):
fake_compute_nodes = [{'local_gb': 259,
'vcpus_used': 0,
'deleted': 0,
'hypervisor_type': 'powervm',
'created_at': '2013-04-01T00:27:06.000000',
'local_gb_used': 0,
'updated_at': '2013-04-03T00:35:41.000000',
'hypervisor_hostname': 'fake_phyp1',
'memory_mb_used': 512,
'memory_mb': 131072,
'current_workload': 0,
'vcpus': 16,
'cpu_info': 'ppc64,powervm,3940',
'running_vms': 0,
'free_disk_gb': 259,
'service_id': 7,
'hypervisor_version': 7,
'disk_available_least': 265856,
'deleted_at': None,
'free_ram_mb': 130560,
'metrics': '',
'stats': '',
'numa_topology': '',
'id': 2,
'host_ip': '127.0.0.1'}]
return [objects.ComputeNode._from_db_object(
context, objects.ComputeNode(), cn)
for cn in fake_compute_nodes]
def fake_compute_node_delete(context, compute_node_id):
self.assertEqual(2, compute_node_id)
self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
fake_get_compute_nodes_in_db)
self.stubs.Set(db, 'compute_node_delete',
fake_compute_node_delete)
self.compute.update_available_resource(
context.get_admin_context())
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id)
self.none_quotas = objects.Quotas.from_reservations(
self.context, None)
def fake_show(meh, context, id, **kwargs):
if id:
return {'id': id, 'min_disk': None, 'min_ram': None,
'name': 'fake_name',
'status': 'active',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'something_else': 'meow'}}
else:
raise exception.ImageNotFound(image_id=id)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
fake_rpcapi = FakeSchedulerAPI()
fake_taskapi = FakeComputeTaskAPI()
self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi)
self.stubs.Set(self.compute, 'compute_task_api', fake_taskapi)
fake_network.set_stub_network_methods(self.stubs)
fake_server_actions.stub_out_action_events(self.stubs)
def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
def fake_allocate_for_instance(cls, ctxt, instance, *args, **kwargs):
self.assertFalse(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.stubs.Set(network_api.API, 'allocate_for_instance',
fake_allocate_for_instance)
self.compute_api = compute.API()
# Just to make long lines short
self.rt = self.compute._get_resource_tracker(NODENAME)
def tearDown(self):
timeutils.clear_time_override()
ctxt = context.get_admin_context()
fake_image.FakeImageService_reset()
instances = db.instance_get_all(ctxt)
for instance in instances:
db.instance_destroy(ctxt, instance['uuid'])
fake.restore_nodes()
super(BaseTestCase, self).tearDown()
def _create_fake_instance(self, params=None, type_name='m1.tiny',
services=False):
"""Create a test instance."""
if not params:
params = {}
def make_fake_sys_meta():
sys_meta = params.pop("system_metadata", {})
inst_type = flavors.get_flavor_by_name(type_name)
for key in flavors.system_metadata_flavor_props:
sys_meta['instance_type_%s' % key] = inst_type[key]
return sys_meta
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['task_state'] = None
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
inst['node'] = NODENAME
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = arch.X86_64
inst['os_type'] = 'Linux'
inst['system_metadata'] = make_fake_sys_meta()
inst['locked'] = False
inst['created_at'] = timeutils.utcnow()
inst['updated_at'] = timeutils.utcnow()
inst['launched_at'] = timeutils.utcnow()
inst['security_groups'] = []
inst.update(params)
if services:
_create_service_entries(self.context.elevated(),
[['fake_zone', [inst['host']]]])
return db.instance_create(self.context, inst)
def _create_fake_instance_obj(self, params=None, type_name='m1.tiny',
services=False):
db_inst = self._create_fake_instance(params, type_name=type_name,
services=services)
return objects.Instance._from_db_object(
self.context, objects.Instance(), db_inst,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
def _create_instance_type(self, params=None):
"""Create a test instance type."""
if not params:
params = {}
context = self.context.elevated()
inst = {}
inst['name'] = 'm1.small'
inst['memory_mb'] = 1024
inst['vcpus'] = 1
inst['root_gb'] = 20
inst['ephemeral_gb'] = 10
inst['flavorid'] = '1'
inst['swap'] = 2048
inst['rxtx_factor'] = 1
inst.update(params)
return db.flavor_create(context, inst)['id']
def _create_group(self):
values = {'name': 'testgroup',
'description': 'testgroup',
'user_id': self.user_id,
'project_id': self.project_id}
return db.security_group_create(self.context, values)
def _stub_migrate_server(self):
def _fake_migrate_server(*args, **kwargs):
pass
self.stubs.Set(conductor_manager.ComputeTaskManager,
'migrate_server', _fake_migrate_server)
def _init_aggregate_with_host(self, aggr, aggr_name, zone, host):
if not aggr:
aggr = self.api.create_aggregate(self.context, aggr_name, zone)
aggr = self.api.add_host_to_aggregate(self.context, aggr['id'], host)
return aggr
class ComputeVolumeTestCase(BaseTestCase):
def setUp(self):
super(ComputeVolumeTestCase, self).setUp()
self.volume_id = 'fake'
self.fetched_attempts = 0
self.instance = {
'id': 'fake',
'uuid': 'fake',
'name': 'fake',
'root_device_name': '/dev/vda',
}
self.fake_volume = fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': self.volume_id, 'device_name': '/dev/vdb'})
self.instance_object = objects.Instance._from_db_object(
self.context, objects.Instance(),
fake_instance.fake_db_instance())
self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw:
{'id': self.volume_id,
'attach_status': 'detached'})
self.stubs.Set(self.compute.driver, 'get_volume_connector',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'initialize_connection',
lambda *a, **kw: {})
self.stubs.Set(self.compute.volume_api, 'terminate_connection',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'attach',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'detach',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'check_attach',
lambda *a, **kw: None)
self.stubs.Set(greenthread, 'sleep',
lambda *a, **kw: None)
def store_cinfo(context, *args, **kwargs):
self.cinfo = jsonutils.loads(args[-1].get('connection_info'))
return self.fake_volume
self.stubs.Set(self.compute.conductor_api,
'block_device_mapping_update',
store_cinfo)
self.stubs.Set(self.compute.conductor_api,
'block_device_mapping_update_or_create',
store_cinfo)
self.stubs.Set(db, 'block_device_mapping_create', store_cinfo)
self.stubs.Set(db, 'block_device_mapping_update', store_cinfo)
def test_attach_volume_serial(self):
fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
with (mock.patch.object(cinder.API, 'get_volume_encryption_metadata',
return_value={})):
instance = self._create_fake_instance_obj()
self.compute.attach_volume(self.context, self.volume_id,
'/dev/vdb', instance, bdm=fake_bdm)
self.assertEqual(self.cinfo.get('serial'), self.volume_id)
def test_attach_volume_raises(self):
fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
instance = self._create_fake_instance_obj()
def fake_attach(*args, **kwargs):
raise test.TestingException
with contextlib.nested(
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
'attach'),
mock.patch.object(cinder.API, 'unreserve_volume'),
mock.patch.object(objects.BlockDeviceMapping,
'destroy')
) as (mock_attach, mock_unreserve, mock_destroy):
mock_attach.side_effect = fake_attach
self.assertRaises(
test.TestingException, self.compute.attach_volume,
self.context, 'fake', '/dev/vdb',
instance, bdm=fake_bdm)
self.assertTrue(mock_unreserve.called)
self.assertTrue(mock_destroy.called)
def test_detach_volume_api_raises(self):
fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
instance = self._create_fake_instance()
with contextlib.nested(
mock.patch.object(self.compute, '_detach_volume'),
mock.patch.object(self.compute.volume_api, 'detach'),
mock.patch.object(objects.BlockDeviceMapping,
'get_by_volume_id'),
mock.patch.object(fake_bdm, 'destroy')
) as (mock_internal_detach, mock_detach, mock_get, mock_destroy):
mock_detach.side_effect = test.TestingException
mock_get.return_value = fake_bdm
self.assertRaises(
test.TestingException, self.compute.detach_volume,
self.context, 'fake', instance)
mock_internal_detach.assert_called_once_with(self.context,
instance,
fake_bdm)
self.assertTrue(mock_destroy.called)
def test_attach_volume_no_bdm(self):
fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
instance = self._create_fake_instance_obj()
with contextlib.nested(
mock.patch.object(objects.BlockDeviceMapping,
'get_by_volume_id', return_value=fake_bdm),
mock.patch.object(self.compute, '_attach_volume')
) as (mock_get_by_id, mock_attach):
self.compute.attach_volume(self.context, 'fake', '/dev/vdb',
instance, bdm=None)
mock_get_by_id.assert_called_once_with(self.context, 'fake')
self.assertTrue(mock_attach.called)
def test_await_block_device_created_too_slow(self):
self.flags(block_device_allocate_retries=2)
self.flags(block_device_allocate_retries_interval=0.1)
def never_get(context, vol_id):
return {
'status': 'creating',
'id': 'blah',
}
self.stubs.Set(self.compute.volume_api, 'get', never_get)
self.assertRaises(exception.VolumeNotCreated,
self.compute._await_block_device_map_created,
self.context, '1')
def test_await_block_device_created_slow(self):
c = self.compute
self.flags(block_device_allocate_retries=4)
self.flags(block_device_allocate_retries_interval=0.1)
def slow_get(context, vol_id):
if self.fetched_attempts < 2:
self.fetched_attempts += 1
return {
'status': 'creating',
'id': 'blah',
}
return {
'status': 'available',
'id': 'blah',
}
self.stubs.Set(c.volume_api, 'get', slow_get)
attempts = c._await_block_device_map_created(self.context, '1')
self.assertEqual(attempts, 3)
def test_await_block_device_created_retries_negative(self):
c = self.compute
self.flags(block_device_allocate_retries=-1)
self.flags(block_device_allocate_retries_interval=0.1)
def volume_get(context, vol_id):
return {
'status': 'available',
'id': 'blah',
}
self.stubs.Set(c.volume_api, 'get', volume_get)
attempts = c._await_block_device_map_created(self.context, '1')
self.assertEqual(1, attempts)
def test_await_block_device_created_retries_zero(self):
c = self.compute
self.flags(block_device_allocate_retries=0)
self.flags(block_device_allocate_retries_interval=0.1)
def volume_get(context, vol_id):
return {
'status': 'available',
'id': 'blah',
}
self.stubs.Set(c.volume_api, 'get', volume_get)
attempts = c._await_block_device_map_created(self.context, '1')
self.assertEqual(1, attempts)
def test_boot_volume_serial(self):
with (
mock.patch.object(objects.BlockDeviceMapping, 'save')
) as mock_save:
block_device_mapping = [
block_device.BlockDeviceDict({
'id': 1,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': self.volume_id,
'device_name': '/dev/vdb',
'delete_on_termination': False,
})]
prepped_bdm = self.compute._prep_block_device(
self.context, self.instance, block_device_mapping)
mock_save.assert_called_once_with(self.context)
volume_driver_bdm = prepped_bdm['block_device_mapping'][0]
self.assertEqual(volume_driver_bdm['connection_info']['serial'],
self.volume_id)
def test_boot_volume_metadata(self, metadata=True):
def volume_api_get(*args, **kwargs):
if metadata:
return {
'size': 1,
'volume_image_metadata': {'vol_test_key': 'vol_test_value',
'min_ram': u'128',
'min_disk': u'256',
'size': u'536870912'
},
}
else:
return {}
self.stubs.Set(self.compute_api.volume_api, 'get', volume_api_get)
expected_no_metadata = {'min_disk': 0, 'min_ram': 0, 'properties': {},
'size': 0, 'status': 'active'}
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': self.volume_id,
'delete_on_termination': False,
}]
image_meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping)
if metadata:
self.assertEqual(image_meta['properties']['vol_test_key'],
'vol_test_value')
self.assertEqual(128, image_meta['min_ram'])
self.assertEqual(256, image_meta['min_disk'])
self.assertEqual(units.Gi, image_meta['size'])
else:
self.assertEqual(expected_no_metadata, image_meta)
# Test it with new-style BDMs
block_device_mapping = [{
'boot_index': 0,
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': self.volume_id,
'delete_on_termination': False,
}]
image_meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping, legacy_bdm=False)
if metadata:
self.assertEqual(image_meta['properties']['vol_test_key'],
'vol_test_value')
self.assertEqual(128, image_meta['min_ram'])
self.assertEqual(256, image_meta['min_disk'])
self.assertEqual(units.Gi, image_meta['size'])
else:
self.assertEqual(expected_no_metadata, image_meta)
def test_boot_volume_no_metadata(self):
self.test_boot_volume_metadata(metadata=False)
def test_boot_image_metadata(self, metadata=True):
def image_api_get(*args, **kwargs):
if metadata:
return {
'properties': {'img_test_key': 'img_test_value'}
}
else:
return {}
self.stubs.Set(self.compute_api.image_api, 'get', image_api_get)
block_device_mapping = [{
'boot_index': 0,
'source_type': 'image',
'destination_type': 'local',
'image_id': "fake-image",
'delete_on_termination': True,
}]
image_meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping, legacy_bdm=False)
if metadata:
self.assertEqual('img_test_value',
image_meta['properties']['img_test_key'])
else:
self.assertEqual(image_meta, {})
def test_boot_image_no_metadata(self):
self.test_boot_image_metadata(metadata=False)
def test_poll_bandwidth_usage_disabled(self):
ctxt = 'MockContext'
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
# None of the mocks should be called.
self.mox.ReplayAll()
self.flags(bandwidth_poll_interval=0)
self.compute._poll_bandwidth_usage(ctxt)
self.mox.UnsetStubs()
def test_poll_bandwidth_usage_not_implemented(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(self.compute.driver, 'get_all_bw_counters')
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
self.mox.StubOutWithMock(time, 'time')
self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host')
# Following methods will be called
utils.last_completed_audit_period().AndReturn((0, 0))
time.time().AndReturn(10)
# Note - time called two more times from Log
time.time().AndReturn(20)
time.time().AndReturn(21)
objects.InstanceList.get_by_host(ctxt, 'fake-mini',
use_slave=True).AndReturn([])
self.compute.driver.get_all_bw_counters([]).AndRaise(
NotImplementedError)
self.mox.ReplayAll()
self.flags(bandwidth_poll_interval=1)
self.compute._poll_bandwidth_usage(ctxt)
# A second call won't call the stubs again as the bandwidth
# poll is now disabled
self.compute._poll_bandwidth_usage(ctxt)
self.mox.UnsetStubs()
@mock.patch.object(objects.InstanceList, 'get_by_host')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host):
fake_instance = mock.Mock(uuid='fake-instance-uuid')
mock_get_by_host.return_value = [fake_instance]
volume_bdm = mock.Mock(id=1, is_volume=True)
not_volume_bdm = mock.Mock(id=2, is_volume=False)
mock_get_by_inst.return_value = [volume_bdm, not_volume_bdm]
expected_host_bdms = [{'instance': fake_instance,
'instance_bdms': [volume_bdm]}]
got_host_bdms = self.compute._get_host_volume_bdms('fake-context')
mock_get_by_host.assert_called_once_with('fake-context',
self.compute.host)
mock_get_by_inst.assert_called_once_with('fake-context',
'fake-instance-uuid',
use_slave=False)
self.assertEqual(expected_host_bdms, got_host_bdms)
def test_poll_volume_usage_disabled(self):
ctxt = 'MockContext'
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
# None of the mocks should be called.
self.mox.ReplayAll()
self.flags(volume_usage_poll_interval=0)
self.compute._poll_volume_usage(ctxt)
self.mox.UnsetStubs()
def test_poll_volume_usage_returns_no_vols(self):
ctxt = 'MockContext'
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
# Following methods are called.
utils.last_completed_audit_period().AndReturn((0, 0))
self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([])
self.mox.ReplayAll()
self.flags(volume_usage_poll_interval=10)
self.compute._poll_volume_usage(ctxt)
self.mox.UnsetStubs()
def test_poll_volume_usage_with_data(self):
ctxt = 'MockContext'
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(self.compute, '_update_volume_usage_cache')
self.stubs.Set(self.compute.driver, 'get_all_volume_usage',
lambda x, y: [3, 4])
# All the mocks are called
utils.last_completed_audit_period().AndReturn((10, 20))
self.compute._get_host_volume_bdms(ctxt,
use_slave=True).AndReturn([1, 2])
self.compute._update_volume_usage_cache(ctxt, [3, 4])
self.mox.ReplayAll()
self.flags(volume_usage_poll_interval=10)
self.compute._poll_volume_usage(ctxt)
self.mox.UnsetStubs()
def test_detach_volume_usage(self):
# Test that detach volume update the volume usage cache table correctly
instance = self._create_fake_instance_obj()
bdm = fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'device_name': '/dev/vdb',
'connection_info': '{}', 'instance_uuid': instance['uuid'],
'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 1})
host_volume_bdms = {'id': 1, 'device_name': '/dev/vdb',
'connection_info': '{}', 'instance_uuid': instance['uuid'],
'volume_id': 1}
self.mox.StubOutWithMock(db, 'block_device_mapping_get_by_volume_id')
self.mox.StubOutWithMock(self.compute.driver, 'block_stats')
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
# The following methods will be called
db.block_device_mapping_get_by_volume_id(self.context, 1, []).\
AndReturn(bdm)
self.compute.driver.block_stats(instance['name'], 'vdb').\
AndReturn([1L, 30L, 1L, 20L, None])
self.compute._get_host_volume_bdms(self.context,
use_slave=True).AndReturn(
host_volume_bdms)
self.compute.driver.get_all_volume_usage(
self.context, host_volume_bdms).AndReturn(
[{'volume': 1,
'rd_req': 1,
'rd_bytes': 10,
'wr_req': 1,
'wr_bytes': 5,
'instance': instance}])
db.block_device_mapping_get_by_volume_id(self.context, 1, []).\
AndReturn(bdm)
self.mox.ReplayAll()
def fake_get_volume_encryption_metadata(self, context, volume_id):
return {}
self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
fake_get_volume_encryption_metadata)
self.compute.attach_volume(self.context, 1, '/dev/vdb', instance)
# Poll volume usage & then detach the volume. This will update the
# total fields in the volume usage cache.
self.flags(volume_usage_poll_interval=10)
self.compute._poll_volume_usage(self.context)
# Check that a volume.usage and volume.attach notification was sent
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
self.compute.detach_volume(self.context, 1, instance)
# Check that volume.attach, 2 volume.usage, and volume.detach
# notifications were sent
self.assertEqual(4, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('compute.instance.volume.attach', msg.event_type)
msg = fake_notifier.NOTIFICATIONS[2]
self.assertEqual('volume.usage', msg.event_type)
payload = msg.payload
self.assertEqual(instance['uuid'], payload['instance_id'])
self.assertEqual('fake', payload['user_id'])
self.assertEqual('fake', payload['tenant_id'])
self.assertEqual(1, payload['reads'])
self.assertEqual(30, payload['read_bytes'])
self.assertEqual(1, payload['writes'])
self.assertEqual(20, payload['write_bytes'])
self.assertIsNone(payload['availability_zone'])
msg = fake_notifier.NOTIFICATIONS[3]
self.assertEqual('compute.instance.volume.detach', msg.event_type)
# Check the database for the
volume_usages = db.vol_get_usage_by_time(self.context, 0)
self.assertEqual(1, len(volume_usages))
volume_usage = volume_usages[0]
self.assertEqual(0, volume_usage['curr_reads'])
self.assertEqual(0, volume_usage['curr_read_bytes'])
self.assertEqual(0, volume_usage['curr_writes'])
self.assertEqual(0, volume_usage['curr_write_bytes'])
self.assertEqual(1, volume_usage['tot_reads'])
self.assertEqual(30, volume_usage['tot_read_bytes'])
self.assertEqual(1, volume_usage['tot_writes'])
self.assertEqual(20, volume_usage['tot_write_bytes'])
def test_prepare_image_mapping(self):
swap_size = 1
ephemeral_size = 1
instance_type = {'swap': swap_size,
'ephemeral_gb': ephemeral_size}
mappings = [
{'virtual': 'ami', 'device': 'sda1'},
{'virtual': 'root', 'device': '/dev/sda1'},
{'virtual': 'swap', 'device': 'sdb4'},
{'virtual': 'ephemeral0', 'device': 'sdc1'},
{'virtual': 'ephemeral1', 'device': 'sdc2'},
]
preped_bdm = self.compute_api._prepare_image_mapping(
instance_type, mappings)
expected_result = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': swap_size
},
{
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': CONF.default_ephemeral_format,
'boot_index': -1,
'volume_size': ephemeral_size
},
{
'device_name': '/dev/sdc2',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': CONF.default_ephemeral_format,
'boot_index': -1,
'volume_size': ephemeral_size
}
]
for expected, got in zip(expected_result, preped_bdm):
self.assertThat(expected, matchers.IsSubDictOf(got))
def test_validate_bdm(self):
def fake_get(self, context, res_id):
return {'id': res_id}
def fake_check_attach(*args, **kwargs):
pass
self.stubs.Set(cinder.API, 'get', fake_get)
self.stubs.Set(cinder.API, 'get_snapshot', fake_get)
self.stubs.Set(cinder.API, 'check_attach',
fake_check_attach)
volume_id = '55555555-aaaa-bbbb-cccc-555555555555'
snapshot_id = '66666666-aaaa-bbbb-cccc-555555555555'
image_id = '77777777-aaaa-bbbb-cccc-555555555555'
instance = self._create_fake_instance()
instance_type = {'swap': 1, 'ephemeral_gb': 2}
mappings = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
},
{
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': 'disk',
'volume_id': volume_id,
'guest_format': None,
'boot_index': 1,
'volume_size': 6
},
{
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': snapshot_id,
'device_type': 'disk',
'guest_format': None,
'boot_index': 0,
'volume_size': 4
},
{
'device_name': '/dev/sda3',
'source_type': 'image',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': 2,
'volume_size': 1
}
]
# Make sure it passes at first
self.compute_api._validate_bdm(self.context, instance,
instance_type, mappings)
# Boot sequence
mappings[2]['boot_index'] = 2
self.assertRaises(exception.InvalidBDMBootSequence,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings)
mappings[2]['boot_index'] = 0
# number of local block_devices
self.flags(max_local_block_devices=1)
self.assertRaises(exception.InvalidBDMLocalsLimit,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings)
ephemerals = [
{
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'volume_id': volume_id,
'guest_format': None,
'boot_index': -1,
'volume_size': 1
},
{
'device_name': '/dev/vdc',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'volume_id': volume_id,
'guest_format': None,
'boot_index': -1,
'volume_size': 1
}]
self.flags(max_local_block_devices=4)
# More ephemerals are OK as long as they are not over the size limit
self.compute_api._validate_bdm(self.context, instance,
instance_type, mappings + ephemerals)
# Ephemerals over the size limit
ephemerals[0]['volume_size'] = 3
self.assertRaises(exception.InvalidBDMEphemeralSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings + ephemerals)
self.assertRaises(exception.InvalidBDMEphemeralSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings + [ephemerals[0]])
# Swap over the size limit
mappings[0]['volume_size'] = 3
self.assertRaises(exception.InvalidBDMSwapSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings)
mappings[0]['volume_size'] = 1
additional_swap = [
{
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
}]
# More than one swap
self.assertRaises(exception.InvalidBDMFormat,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings + additional_swap)
image_no_size = [
{
'device_name': '/dev/sda4',
'source_type': 'image',
'image_id': image_id,
'destination_type': 'volume',
'boot_index': -1,
'volume_size': None,
}]
self.assertRaises(exception.InvalidBDM,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings + image_no_size)
def test_validate_bdm_media_service_exceptions(self):
instance_type = {'swap': 1, 'ephemeral_gb': 1}
all_mappings = [{'id': 1,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': self.volume_id,
'device_name': 'vda',
'boot_index': 0,
'delete_on_termination': False}]
# First we test a list of invalid status values that should result
# in an InvalidVolume exception being raised.
status_values = (
# First two check that the status is 'available'.
('creating', 'detached'),
('error', 'detached'),
# Checks that the attach_status is 'detached'.
('available', 'attached')
)
for status, attach_status in status_values:
def fake_volume_get(self, ctxt, volume_id):
return {'id': volume_id,
'status': status,
'attach_status': attach_status}
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.assertRaises(exception.InvalidVolume,
self.compute_api._validate_bdm,
self.context, self.instance,
instance_type, all_mappings)
# Now we test a 404 case that results in InvalidBDMVolume.
def fake_volume_get_not_found(self, context, volume_id):
raise exception.VolumeNotFound(volume_id)
self.stubs.Set(cinder.API, 'get', fake_volume_get_not_found)
self.assertRaises(exception.InvalidBDMVolume,
self.compute_api._validate_bdm,
self.context, self.instance,
instance_type, all_mappings)
# Check that the volume status is 'available' and attach_status is
# 'detached' and accept the request if so
def fake_volume_get_ok(self, context, volume_id):
return {'id': volume_id,
'status': 'available',
'attach_status': 'detached'}
self.stubs.Set(cinder.API, 'get', fake_volume_get_ok)
self.compute_api._validate_bdm(self.context, self.instance,
instance_type, all_mappings)
def test_volume_snapshot_create(self):
self.assertRaises(messaging.ExpectedException,
self.compute.volume_snapshot_create, self.context,
self.instance_object, 'fake_id', {})
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.volume_snapshot_create, self.context,
self.instance_object, 'fake_id', {})
def test_volume_snapshot_delete(self):
self.assertRaises(messaging.ExpectedException,
self.compute.volume_snapshot_delete, self.context,
self.instance_object, 'fake_id', 'fake_id2', {})
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.volume_snapshot_delete, self.context,
self.instance_object, 'fake_id', 'fake_id2', {})
@mock.patch.object(cinder.API, 'create',
side_effect=exception.OverQuota(overs='volumes'))
def test_prep_block_device_over_quota_failure(self, mock_create):
instance = self._create_fake_instance()
bdms = [
block_device.BlockDeviceDict({
'boot_index': 0,
'guest_format': None,
'connection_info': None,
'device_type': u'disk',
'source_type': 'image',
'destination_type': 'volume',
'volume_size': 1,
'image_id': 1,
'device_name': '/dev/vdb',
})]
self.assertRaises(exception.InvalidBDM,
compute_manager.ComputeManager()._prep_block_device,
self.context, instance, bdms)
self.assertTrue(mock_create.called)
@mock.patch.object(nova.virt.block_device, 'get_swap')
@mock.patch.object(nova.virt.block_device, 'convert_blanks')
@mock.patch.object(nova.virt.block_device, 'convert_images')
@mock.patch.object(nova.virt.block_device, 'convert_snapshots')
@mock.patch.object(nova.virt.block_device, 'convert_volumes')
@mock.patch.object(nova.virt.block_device, 'convert_ephemerals')
@mock.patch.object(nova.virt.block_device, 'convert_swap')
@mock.patch.object(nova.virt.block_device, 'attach_block_devices')
def test_prep_block_device_with_blanks(self, attach_block_devices,
convert_swap, convert_ephemerals,
convert_volumes, convert_snapshots,
convert_images, convert_blanks,
get_swap):
instance = self._create_fake_instance()
instance['root_device_name'] = '/dev/vda'
root_volume = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'instance_uuid': 'fake-instance',
'source_type': 'image',
'destination_type': 'volume',
'image_id': 'fake-image-id-1',
'volume_size': 1,
'boot_index': 0}))
blank_volume1 = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'instance_uuid': 'fake-instance',
'source_type': 'blank',
'destination_type': 'volume',
'volume_size': 1,
'boot_index': 1}))
blank_volume2 = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'instance_uuid': 'fake-instance',
'source_type': 'blank',
'destination_type': 'volume',
'volume_size': 1,
'boot_index': 2}))
bdms = [blank_volume1, blank_volume2, root_volume]
def fake_attach_block_devices(bdm, *args, **kwargs):
return bdm
convert_swap.return_value = []
convert_ephemerals.return_value = []
convert_volumes.return_value = [blank_volume1, blank_volume2]
convert_snapshots.return_value = []
convert_images.return_value = [root_volume]
convert_blanks.return_value = []
attach_block_devices.side_effect = fake_attach_block_devices
get_swap.return_value = []
expected_block_device_info = {
'root_device_name': '/dev/vda',
'swap': [],
'ephemerals': [],
'block_device_mapping': bdms
}
manager = compute_manager.ComputeManager()
manager.use_legacy_block_device_info = False
block_device_info = manager._prep_block_device(self.context, instance,
bdms)
convert_swap.assert_called_once_with(bdms)
convert_ephemerals.assert_called_once_with(bdms)
convert_volumes.assert_called_once_with(bdms)
convert_snapshots.assert_called_once_with(bdms)
convert_images.assert_called_once_with(bdms)
convert_blanks.assert_called_once_with(bdms)
self.assertEqual(expected_block_device_info, block_device_info)
self.assertEqual(4, attach_block_devices.call_count)
get_swap.assert_called_once_with([])
class ComputeTestCase(BaseTestCase):
def test_wrap_instance_fault(self):
inst = {"uuid": "fake_uuid"}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise NotImplementedError()
self.assertRaises(NotImplementedError, failer,
self.compute, self.context, instance=inst)
self.assertTrue(called['fault_added'])
def test_wrap_instance_fault_instance_in_args(self):
inst = {"uuid": "fake_uuid"}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise NotImplementedError()
self.assertRaises(NotImplementedError, failer,
self.compute, self.context, inst)
self.assertTrue(called['fault_added'])
def test_wrap_instance_fault_no_instance(self):
inst = {"uuid": "fake_uuid"}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.assertRaises(exception.InstanceNotFound, failer,
self.compute, self.context, inst)
self.assertFalse(called['fault_added'])
@mock.patch.object(objects.InstanceActionEvent, 'event_start')
@mock.patch.object(objects.InstanceActionEvent,
'event_finish_with_failure')
def test_wrap_instance_event(self, mock_finish, mock_start):
inst = {"uuid": "fake_uuid"}
@compute_manager.wrap_instance_event
def fake_event(self, context, instance):
pass
fake_event(self.compute, self.context, instance=inst)
self.assertTrue(mock_start.called)
self.assertTrue(mock_finish.called)
@mock.patch.object(objects.InstanceActionEvent, 'event_start')
@mock.patch.object(objects.InstanceActionEvent,
'event_finish_with_failure')
def test_wrap_instance_event_return(self, mock_finish, mock_start):
inst = {"uuid": "fake_uuid"}
@compute_manager.wrap_instance_event
def fake_event(self, context, instance):
return True
retval = fake_event(self.compute, self.context, instance=inst)
self.assertTrue(retval)
self.assertTrue(mock_start.called)
self.assertTrue(mock_finish.called)
@mock.patch.object(objects.InstanceActionEvent, 'event_start')
@mock.patch.object(objects.InstanceActionEvent,
'event_finish_with_failure')
def test_wrap_instance_event_log_exception(self, mock_finish, mock_start):
inst = {"uuid": "fake_uuid"}
@compute_manager.wrap_instance_event
def fake_event(self2, context, instance):
raise exception.NovaException()
self.assertRaises(exception.NovaException, fake_event,
self.compute, self.context, instance=inst)
self.assertTrue(mock_start.called)
self.assertTrue(mock_finish.called)
args, kwargs = mock_finish.call_args
self.assertIsInstance(kwargs['exc_val'], exception.NovaException)
def test_object_compat(self):
db_inst = fake_instance.fake_db_instance()
@compute_manager.object_compat
def test_fn(_self, context, instance):
self.assertIsInstance(instance, objects.Instance)
self.assertEqual(instance.uuid, db_inst['uuid'])
test_fn(None, self.context, instance=db_inst)
def test_object_compat_more_positional_args(self):
db_inst = fake_instance.fake_db_instance()
@compute_manager.object_compat
def test_fn(_self, context, instance, pos_arg_1, pos_arg_2):
self.assertIsInstance(instance, objects.Instance)
self.assertEqual(instance.uuid, db_inst['uuid'])
self.assertEqual(pos_arg_1, 'fake_pos_arg1')
self.assertEqual(pos_arg_2, 'fake_pos_arg2')
test_fn(None, self.context, db_inst, 'fake_pos_arg1', 'fake_pos_arg2')
def test_create_instance_with_img_ref_associates_config_drive(self):
# Make sure create associates a config drive.
instance = self._create_fake_instance_obj(
params={'config_drive': '1234', })
try:
self.compute.run_instance(self.context, instance, {}, {},
[], None, None, True, None, False)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertTrue(instance['config_drive'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_associates_config_drive(self):
# Make sure create associates a config drive.
instance = self._create_fake_instance_obj(
params={'config_drive': '1234', })
try:
self.compute.run_instance(self.context, instance, {}, {},
[], None, None, True, None, False)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertTrue(instance['config_drive'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_unlimited_memory(self):
# Default of memory limit=None is unlimited.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
params = {"memory_mb": 999999999999}
filter_properties = {'limits': {'memory_mb': None}}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used'])
def test_create_instance_unlimited_disk(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
params = {"root_gb": 999999999999,
"ephemeral_gb": 99999999999}
filter_properties = {'limits': {'disk_gb': None}}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
def test_create_multiple_instances_then_starve(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
filter_properties = {'limits': {'memory_mb': 4096, 'disk_gb': 1000}}
params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(1024, self.rt.compute_node['memory_mb_used'])
self.assertEqual(256, self.rt.compute_node['local_gb_used'])
params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(3072, self.rt.compute_node['memory_mb_used'])
self.assertEqual(768, self.rt.compute_node['local_gb_used'])
params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}
instance = self._create_fake_instance_obj(params)
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance,
{}, filter_properties, [], None, None, True, None, False)
def test_create_multiple_instance_with_neutron_port(self):
instance_type = flavors.get_default_flavor()
def fake_is_neutron():
return True
self.stubs.Set(utils, 'is_neutron', fake_is_neutron)
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id='adadds')])
self.assertRaises(exception.MultiplePortsNotApplicable,
self.compute_api.create,
self.context,
instance_type=instance_type,
image_href=None,
max_count=2,
requested_networks=requested_networks)
def test_create_instance_with_oversubscribed_ram(self):
# Test passing of oversubscribed ram policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
instance_mb = int(total_mem_mb * 1.45)
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"memory_mb": instance_mb, "root_gb": 128,
"ephemeral_gb": 128}
instance = self._create_fake_instance_obj(params)
limits = {'memory_mb': oversub_limit_mb}
filter_properties = {'limits': limits}
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used'])
def test_create_instance_with_oversubscribed_ram_fail(self):
"""Test passing of oversubscribed ram policy from the scheduler, but
with insufficient memory.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
instance_mb = int(total_mem_mb * 1.55)
# build an instance, specifying an amount of memory that exceeds
# both total_mem_mb and the oversubscribed limit:
params = {"memory_mb": instance_mb, "root_gb": 128,
"ephemeral_gb": 128}
instance = self._create_fake_instance(params)
filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance, {},
filter_properties, [], None, None, True, None, False)
def test_create_instance_with_oversubscribed_cpu(self):
# Test passing of oversubscribed cpu policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
limits = {'vcpu': 3}
filter_properties = {'limits': limits}
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
self.assertEqual(1, resources['vcpus'])
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 2}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(2, self.rt.compute_node['vcpus_used'])
# create one more instance:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 1}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(3, self.rt.compute_node['vcpus_used'])
# delete the instance:
instance['vm_state'] = vm_states.DELETED
self.rt.update_usage(self.context,
instance=instance)
self.assertEqual(2, self.rt.compute_node['vcpus_used'])
# now oversubscribe vcpus and fail:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 2}
instance = self._create_fake_instance_obj(params)
limits = {'vcpu': 3}
filter_properties = {'limits': limits}
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance, {},
filter_properties, [], None, None, True, None, False)
def test_create_instance_with_oversubscribed_disk(self):
# Test passing of oversubscribed disk policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
instance_gb = int(total_disk_gb * 1.45)
# build an instance, specifying an amount of disk that exceeds
# total_disk_gb, but is less than the oversubscribed limit:
params = {"root_gb": instance_gb, "memory_mb": 10}
instance = self._create_fake_instance_obj(params)
limits = {'disk_gb': oversub_limit_gb}
filter_properties = {'limits': limits}
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used'])
def test_create_instance_with_oversubscribed_disk_fail(self):
"""Test passing of oversubscribed disk policy from the scheduler, but
with insufficient disk.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
instance_gb = int(total_disk_gb * 1.55)
# build an instance, specifying an amount of disk that exceeds
# total_disk_gb, but is less than the oversubscribed limit:
params = {"root_gb": instance_gb, "memory_mb": 10}
instance = self._create_fake_instance(params)
limits = {'disk_gb': oversub_limit_gb}
filter_properties = {'limits': limits}
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance, {},
filter_properties, [], None, None, True, None, False)
def test_create_instance_without_node_param(self):
instance = self._create_fake_instance_obj({'node': None})
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertEqual(NODENAME, instance['node'])
def test_create_instance_no_image(self):
# Create instance with no image provided.
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self._assert_state({'vm_state': vm_states.ACTIVE,
'task_state': None})
def test_default_access_ip(self):
self.flags(default_access_ip_network_name='test1')
fake_network.unset_stub_network_methods(self.stubs)
instance = self._create_fake_instance_obj()
orig_update = self.compute._instance_update
# Make sure the access_ip_* updates happen in the same DB
# update as the set to ACTIVE.
def _instance_update(ctxt, instance_uuid, **kwargs):
if kwargs.get('vm_state', None) == vm_states.ACTIVE:
self.assertEqual(kwargs['access_ip_v4'], '192.168.1.100')
self.assertEqual(kwargs['access_ip_v6'], '2001:db8:0:1::1')
return orig_update(ctxt, instance_uuid, **kwargs)
self.stubs.Set(self.compute, '_instance_update', _instance_update)
try:
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertEqual(instance['access_ip_v4'], '192.168.1.100')
self.assertEqual(instance['access_ip_v6'],
'2001:db8:0:1:dcad:beff:feef:1')
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_no_default_access_ip(self):
instance = self._create_fake_instance_obj()
try:
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertFalse(instance['access_ip_v4'])
self.assertFalse(instance['access_ip_v6'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_fail_to_schedule_persists(self):
# check the persistence of the ERROR(scheduling) state.
params = {'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING}
self._create_fake_instance(params=params)
# check state is failed even after the periodic poll
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING})
def test_run_instance_setup_block_device_mapping_fail(self):
"""block device mapping failure test.
Make sure that when there is a block device mapping problem,
the instance goes to ERROR state, keeping the task state
"""
def fake(*args, **kwargs):
raise exception.InvalidBDM()
self.stubs.Set(nova.compute.manager.ComputeManager,
'_prep_block_device', fake)
instance = self._create_fake_instance()
self.assertRaises(exception.InvalidBDM, self.compute.run_instance,
self.context, instance=instance, request_spec={},
filter_properties={}, requested_networks=[],
injected_files=None, admin_password=None,
is_first_time=True, node=None,
legacy_bdm_in_spec=False)
# check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
@mock.patch('nova.compute.manager.ComputeManager._prep_block_device',
side_effect=exception.OverQuota(overs='volumes'))
def test_setup_block_device_over_quota_fail(self, mock_prep_block_dev):
"""block device mapping over quota failure test.
Make sure when we're over volume quota according to Cinder client, the
appropriate exception is raised and the instances to ERROR state, keep
the task state.
"""
instance = self._create_fake_instance()
self.assertRaises(exception.OverQuota, self.compute.run_instance,
self.context, instance=instance, request_spec={},
filter_properties={}, requested_networks=[],
injected_files=None, admin_password=None,
is_first_time=True, node=None,
legacy_bdm_in_spec=False)
# check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.assertTrue(mock_prep_block_dev.called)
def test_run_instance_spawn_fail(self):
"""spawn failure test.
Make sure that when there is a spawning problem,
the instance goes to ERROR state, keeping the task state.
"""
def fake(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'spawn', fake)
instance = self._create_fake_instance_obj()
self.assertRaises(test.TestingException, self.compute.run_instance,
self.context, instance=instance, request_spec={},
filter_properties={}, requested_networks=[],
injected_files=None, admin_password=None,
is_first_time=True, node=None,
legacy_bdm_in_spec=False)
# check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
def test_run_instance_dealloc_network_instance_not_found(self):
"""spawn network deallocate test.
Make sure that when an instance is not found during spawn
that the network is deallocated
"""
instance = self._create_fake_instance_obj()
def fake(*args, **kwargs):
raise exception.InstanceNotFound(instance_id="fake")
self.stubs.Set(self.compute.driver, 'spawn', fake)
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
def test_run_instance_bails_on_missing_instance(self):
# Make sure that run_instance() will quickly ignore a deleted instance
called = {}
instance = self._create_fake_instance()
def fake_instance_update(self, *a, **args):
called['instance_update'] = True
raise exception.InstanceNotFound(instance_id='foo')
self.stubs.Set(self.compute, '_instance_update', fake_instance_update)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertIn('instance_update', called)
def test_run_instance_bails_on_deleting_instance(self):
# Make sure that run_instance() will quickly ignore a deleting instance
called = {}
instance = self._create_fake_instance()
def fake_instance_update(self, *a, **args):
called['instance_update'] = True
raise exception.UnexpectedDeletingTaskStateError(
expected='scheduling', actual='deleting')
self.stubs.Set(self.compute, '_instance_update', fake_instance_update)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertIn('instance_update', called)
def test_run_instance_bails_on_missing_instance_2(self):
# Make sure that run_instance() will quickly ignore a deleted instance
called = {}
instance = self._create_fake_instance()
def fake_default_block_device_names(self, *a, **args):
called['default_block_device_names'] = True
raise exception.InstanceNotFound(instance_id='foo')
self.stubs.Set(self.compute, '_default_block_device_names',
fake_default_block_device_names)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertIn('default_block_device_names', called)
def test_can_terminate_on_error_state(self):
# Make sure that the instance can be terminated in ERROR state.
# check failed to schedule --> terminate
params = {'vm_state': vm_states.ERROR}
instance = self._create_fake_instance_obj(params=params)
self.compute.terminate_instance(self.context, instance, [], [])
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
self.context, instance['uuid'])
# Double check it's not there for admins, either.
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
self.context.elevated(), instance['uuid'])
def test_run_terminate(self):
# Make sure it is possible to run and terminate instance.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context, instance, [], [])
instances = db.instance_get_all(self.context)
LOG.info("After terminating instances: %s", instances)
self.assertEqual(len(instances), 0)
admin_deleted_context = context.get_admin_context(
read_deleted="only")
instance = db.instance_get_by_uuid(admin_deleted_context,
instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.DELETED)
self.assertIsNone(instance['task_state'])
def test_run_terminate_with_vol_attached(self):
"""Make sure it is possible to run and terminate instance with volume
attached
"""
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
def fake_check_attach(*args, **kwargs):
pass
def fake_reserve_volume(*args, **kwargs):
pass
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
def fake_terminate_connection(self, context, volume_id, connector):
pass
def fake_detach(self, context, volume_id):
pass
bdms = []
def fake_rpc_reserve_block_device_name(self, context, instance, device,
volume_id, **kwargs):
bdm = objects.BlockDeviceMapping(
**{'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 1,
'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc'})
bdm.create(context)
bdms.append(bdm)
return bdm
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
self.stubs.Set(cinder.API, 'reserve_volume',
fake_reserve_volume)
self.stubs.Set(cinder.API, 'terminate_connection',
fake_terminate_connection)
self.stubs.Set(cinder.API, 'detach', fake_detach)
self.stubs.Set(compute_rpcapi.ComputeAPI,
'reserve_block_device_name',
fake_rpc_reserve_block_device_name)
self.compute_api.attach_volume(self.context, instance, 1,
'/dev/vdc')
self.compute.terminate_instance(self.context,
instance, bdms, [])
instances = db.instance_get_all(self.context)
LOG.info("After terminating instances: %s", instances)
self.assertEqual(len(instances), 0)
bdms = db.block_device_mapping_get_all_by_instance(self.context,
instance['uuid'])
self.assertEqual(len(bdms), 0)
def test_run_terminate_no_image(self):
"""Make sure instance started without image (from volume)
can be termintad without issues
"""
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self._assert_state({'vm_state': vm_states.ACTIVE,
'task_state': None})
self.compute.terminate_instance(self.context, instance, [], [])
instances = db.instance_get_all(self.context)
self.assertEqual(len(instances), 0)
def test_terminate_no_network(self):
# This is as reported in LP bug 1008875
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
self.mox.ReplayAll()
self.compute.terminate_instance(self.context, instance, [], [])
instances = db.instance_get_all(self.context)
LOG.info("After terminating instances: %s", instances)
self.assertEqual(len(instances), 0)
def test_run_terminate_timestamps(self):
# Make sure timestamps are set for launched and destroyed.
instance = self._create_fake_instance_obj()
instance['launched_at'] = None
self.assertIsNone(instance['launched_at'])
self.assertIsNone(instance['deleted_at'])
launch = timeutils.utcnow()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instance.refresh()
self.assertTrue(instance['launched_at'].replace(tzinfo=None) > launch)
self.assertIsNone(instance['deleted_at'])
terminate = timeutils.utcnow()
self.compute.terminate_instance(self.context, instance, [], [])
with utils.temporary_mutation(self.context, read_deleted='only'):
instance = db.instance_get_by_uuid(self.context,
instance['uuid'])
self.assertTrue(instance['launched_at'].replace(
tzinfo=None) < terminate)
self.assertTrue(instance['deleted_at'].replace(
tzinfo=None) > terminate)
def test_run_terminate_deallocate_net_failure_sets_error_state(self):
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
def _fake_deallocate_network(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute, '_deallocate_network',
_fake_deallocate_network)
try:
self.compute.terminate_instance(self.context, instance, [], [])
except test.TestingException:
pass
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
def test_stop(self):
# Ensure instance can be stopped.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
inst_uuid = instance['uuid']
extra = ['system_metadata', 'metadata']
inst_obj = objects.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj)
self.compute.terminate_instance(self.context, instance, [], [])
def test_start(self):
# Ensure instance can be started.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
extra = ['system_metadata', 'metadata']
inst_uuid = instance['uuid']
inst_obj = objects.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj)
inst_obj.task_state = task_states.POWERING_ON
inst_obj.save(self.context)
self.compute.start_instance(self.context, instance=inst_obj)
self.compute.terminate_instance(self.context, instance, [], [])
def test_stop_start_no_image(self):
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
extra = ['system_metadata', 'metadata']
inst_uuid = instance['uuid']
inst_obj = objects.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj)
inst_obj.task_state = task_states.POWERING_ON
inst_obj.save(self.context)
self.compute.start_instance(self.context, instance=inst_obj)
self.compute.terminate_instance(self.context, instance, [], [])
def test_rescue(self):
# Ensure instance can be rescued and unrescued.
called = {'rescued': False,
'unrescued': False}
def fake_rescue(self, context, instance_ref, network_info, image_meta,
rescue_password):
called['rescued'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
def fake_unrescue(self, instance_ref, network_info):
called['unrescued'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
fake_unrescue)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instance.task_state = task_states.RESCUING
instance.save()
self.compute.rescue_instance(self.context, instance, None)
self.assertTrue(called['rescued'])
instance.task_state = task_states.UNRESCUING
instance.save()
self.compute.unrescue_instance(self.context, instance)
self.assertTrue(called['unrescued'])
self.compute.terminate_instance(self.context, instance, [], [])
def test_rescue_notifications(self):
# Ensure notifications on instance rescue.
def fake_rescue(self, context, instance_ref, network_info, image_meta,
rescue_password):
pass
self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
fake_notifier.NOTIFICATIONS = []
instance.task_state = task_states.RESCUING
instance.save()
self.compute.rescue_instance(self.context, instance, None)
expected_notifications = ['compute.instance.rescue.start',
'compute.instance.exists',
'compute.instance.rescue.end']
self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS],
expected_notifications)
for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
self.assertEqual(msg.event_type, expected_notifications[n])
self.assertEqual(msg.priority, 'INFO')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance.uuid)
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertIn('rescue_image_name', msg.payload)
self.compute.terminate_instance(self.context, instance, [], [])
def test_unrescue_notifications(self):
# Ensure notifications on instance rescue.
def fake_unrescue(self, instance_ref, network_info):
pass
self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
fake_unrescue)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
fake_notifier.NOTIFICATIONS = []
instance.task_state = task_states.UNRESCUING
instance.save()
self.compute.unrescue_instance(self.context, instance)
expected_notifications = ['compute.instance.unrescue.start',
'compute.instance.unrescue.end']
self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS],
expected_notifications)
for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
self.assertEqual(msg.event_type, expected_notifications[n])
self.assertEqual(msg.priority, 'INFO')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance.uuid)
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance, [], [])
def test_rescue_handle_err(self):
# If the driver fails to rescue, instance state should remain the same
# and the exception should be converted to InstanceNotRescuable
inst_obj = self._create_fake_instance_obj()
self.mox.StubOutWithMock(self.compute, '_get_rescue_image')
self.mox.StubOutWithMock(nova.virt.fake.FakeDriver, 'rescue')
self.compute._get_rescue_image(
mox.IgnoreArg(), inst_obj, mox.IgnoreArg()).AndReturn({})
nova.virt.fake.FakeDriver.rescue(
mox.IgnoreArg(), inst_obj, [], mox.IgnoreArg(), 'password'
).AndRaise(RuntimeError("Try again later"))
self.mox.ReplayAll()
expected_message = ('Instance %s cannot be rescued: '
'Driver Error: Try again later' % inst_obj.uuid)
inst_obj.vm_state = 'some_random_state'
with testtools.ExpectedException(
exception.InstanceNotRescuable, expected_message):
self.compute.rescue_instance(
self.context, instance=inst_obj,
rescue_password='password')
self.assertEqual('some_random_state', inst_obj.vm_state)
@mock.patch.object(nova.compute.utils, "get_image_metadata")
@mock.patch.object(nova.virt.fake.FakeDriver, "rescue")
def test_rescue_with_image_specified(self, mock_rescue,
mock_get_image_metadata):
image_ref = "image-ref"
rescue_image_meta = {}
params = {"task_state": task_states.RESCUING}
instance = self._create_fake_instance_obj(params=params)
ctxt = context.get_admin_context()
mock_context = mock.Mock()
mock_context.elevated.return_value = ctxt
mock_get_image_metadata.return_value = rescue_image_meta
self.compute.rescue_instance(mock_context, instance=instance,
rescue_password="password", rescue_image_ref=image_ref)
mock_get_image_metadata.assert_called_with(ctxt,
self.compute.image_api,
image_ref, instance)
mock_rescue.assert_called_with(ctxt, instance, [],
rescue_image_meta, 'password')
self.compute.terminate_instance(ctxt, instance, [], [])
@mock.patch.object(nova.compute.utils, "get_image_metadata")
@mock.patch.object(nova.virt.fake.FakeDriver, "rescue")
def test_rescue_with_base_image_when_image_not_specified(self,
mock_rescue, mock_get_image_metadata):
image_ref = "image-ref"
system_meta = {"image_base_image_ref": image_ref}
rescue_image_meta = {}
params = {"task_state": task_states.RESCUING,
"system_metadata": system_meta}
instance = self._create_fake_instance_obj(params=params)
ctxt = context.get_admin_context()
mock_context = mock.Mock()
mock_context.elevated.return_value = ctxt
mock_get_image_metadata.return_value = rescue_image_meta
self.compute.rescue_instance(mock_context, instance=instance,
rescue_password="password")
mock_get_image_metadata.assert_called_with(ctxt,
self.compute.image_api,
image_ref, instance)
mock_rescue.assert_called_with(ctxt, instance, [],
rescue_image_meta, 'password')
self.compute.terminate_instance(self.context, instance, [], [])
def test_power_on(self):
# Ensure instance can be powered on.
called = {'power_on': False}
def fake_driver_power_on(self, context, instance, network_info,
block_device_info):
called['power_on'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on',
fake_driver_power_on)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
extra = ['system_metadata', 'metadata']
inst_obj = objects.Instance.get_by_uuid(self.context,
instance['uuid'],
expected_attrs=extra)
inst_obj.task_state = task_states.POWERING_ON
inst_obj.save(self.context)
self.compute.start_instance(self.context, instance=inst_obj)
self.assertTrue(called['power_on'])
self.compute.terminate_instance(self.context, inst_obj, [], [])
def test_power_off(self):
# Ensure instance can be powered off.
called = {'power_off': False}
def fake_driver_power_off(self, instance,
shutdown_timeout, shutdown_attempts):
called['power_off'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off',
fake_driver_power_off)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
extra = ['system_metadata', 'metadata']
inst_obj = objects.Instance.get_by_uuid(self.context,
instance['uuid'],
expected_attrs=extra)
inst_obj.task_state = task_states.POWERING_OFF
inst_obj.save(self.context)
self.compute.stop_instance(self.context, instance=inst_obj)
self.assertTrue(called['power_off'])
self.compute.terminate_instance(self.context, inst_obj, [], [])
def test_pause(self):
# Ensure instance can be paused and unpaused.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None, None, True,
None, False)
instance.task_state = task_states.PAUSING
instance.save()
fake_notifier.NOTIFICATIONS = []
self.compute.pause_instance(self.context, instance=instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.pause.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.pause.end')
instance.task_state = task_states.UNPAUSING
instance.save()
fake_notifier.NOTIFICATIONS = []
self.compute.unpause_instance(self.context, instance=instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.unpause.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.unpause.end')
self.compute.terminate_instance(self.context, instance, [], [])
def test_suspend(self):
# ensure instance can be suspended and resumed.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instance.task_state = task_states.SUSPENDING
instance.save()
self.compute.suspend_instance(self.context, instance)
instance.task_state = task_states.RESUMING
instance.save()
self.compute.resume_instance(self.context, instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_suspend_error(self):
# Ensure vm_state is ERROR when suspend error occurs.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
with mock.patch.object(self.compute.driver, 'suspend',
side_effect=test.TestingException):
self.assertRaises(test.TestingException,
self.compute.suspend_instance,
self.context,
instance=instance)
instance = db.instance_get_by_uuid(self.context, instance.uuid)
self.assertEqual(vm_states.ERROR, instance.vm_state)
def test_suspend_not_implemented(self):
# Ensure expected exception is raised and the vm_state of instance
# restore to original value if suspend is not implemented by driver
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
with mock.patch.object(self.compute.driver, 'suspend',
side_effect=NotImplementedError('suspend test')):
self.assertRaises(NotImplementedError,
self.compute.suspend_instance,
self.context,
instance=instance)
instance = db.instance_get_by_uuid(self.context, instance.uuid)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
def test_suspend_rescued(self):
# ensure rescued instance can be suspended and resumed.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instance.vm_state = vm_states.RESCUED
instance.task_state = task_states.SUSPENDING
instance.save()
self.compute.suspend_instance(self.context, instance)
self.assertEqual(instance.vm_state, vm_states.SUSPENDED)
instance.task_state = task_states.RESUMING
instance.save()
self.compute.resume_instance(self.context, instance)
self.assertEqual(instance.vm_state, vm_states.RESCUED)
self.compute.terminate_instance(self.context, instance, [], [])
def test_resume_no_old_state(self):
# ensure a suspended instance with no old_vm_state is resumed to the
# ACTIVE state
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instance.vm_state = vm_states.SUSPENDED
instance.task_state = task_states.RESUMING
instance.save()
self.compute.resume_instance(self.context, instance)
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.compute.terminate_instance(self.context, instance, [], [])
def test_rebuild(self):
# Ensure instance can be rebuilt.
instance = self._create_fake_instance_obj()
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, instance,
image_ref, image_ref,
injected_files=[],
new_pass="new_password",
orig_sys_metadata=sys_metadata,
bdms=[], recreate=False,
on_shared_storage=False)
self.compute.terminate_instance(self.context, instance, [], [])
def test_rebuild_driver(self):
# Make sure virt drivers can override default rebuild
called = {'rebuild': False}
def fake(**kwargs):
instance = kwargs['instance']
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(
expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
called['rebuild'] = True
self.stubs.Set(self.compute.driver, 'rebuild', fake)
instance = self._create_fake_instance_obj()
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, instance,
image_ref, image_ref,
injected_files=[],
new_pass="new_password",
orig_sys_metadata=sys_metadata,
bdms=[], recreate=False,
on_shared_storage=False)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [], [])
def test_rebuild_no_image(self):
# Ensure instance can be rebuilt when started with no image.
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, instance,
'', '', injected_files=[],
new_pass="new_password",
orig_sys_metadata=sys_metadata, bdms=[],
recreate=False, on_shared_storage=False)
self.compute.terminate_instance(self.context, instance, [], [])
def test_rebuild_launched_at_time(self):
# Ensure instance can be rebuilt.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = self._create_fake_instance_obj()
image_ref = instance['image_ref']
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
timeutils.set_time_override(cur_time)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, instance,
image_ref, image_ref,
injected_files=[],
new_pass="new_password",
orig_sys_metadata={},
bdms=[], recreate=False,
on_shared_storage=False)
instance.refresh()
self.assertEqual(cur_time,
instance['launched_at'].replace(tzinfo=None))
self.compute.terminate_instance(self.context, instance, [], [])
def test_rebuild_with_injected_files(self):
# Ensure instance can be rebuilt with injected files.
injected_files = [
('/a/b/c', base64.b64encode('foobarbaz')),
]
self.decoded_files = [
('/a/b/c', 'foobarbaz'),
]
def _spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
self.assertEqual(self.decoded_files, injected_files)
self.stubs.Set(self.compute.driver, 'spawn', _spawn)
instance = self._create_fake_instance_obj()
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, instance,
image_ref, image_ref,
injected_files=injected_files,
new_pass="new_password",
orig_sys_metadata=sys_metadata,
bdms=[], recreate=False,
on_shared_storage=False)
self.compute.terminate_instance(self.context, instance, [], [])
def _test_reboot(self, soft,
test_delete=False, test_unrescue=False,
fail_reboot=False, fail_running=False):
reboot_type = soft and 'SOFT' or 'HARD'
task_pending = (soft and task_states.REBOOT_PENDING
or task_states.REBOOT_PENDING_HARD)
task_started = (soft and task_states.REBOOT_STARTED
or task_states.REBOOT_STARTED_HARD)
expected_task = (soft and task_states.REBOOTING
or task_states.REBOOTING_HARD)
expected_tasks = (soft and (task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED)
or (task_states.REBOOTING_HARD,
task_states.REBOOT_PENDING_HARD,
task_states.REBOOT_STARTED_HARD))
# This is a true unit test, so we don't need the network stubs.
fake_network.unset_stub_network_methods(self.stubs)
self.mox.StubOutWithMock(self.compute,
'_get_instance_block_device_info')
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_instance_update')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute.driver, 'reboot')
# FIXME(comstud): I don't feel like the context needs to
# be elevated at all. Hopefully remove elevated from
# reboot_instance and remove the stub here in a future patch.
# econtext would just become self.context below then.
econtext = self.context.elevated()
db_instance = fake_instance.fake_db_instance(
**dict(uuid='fake-instance',
power_state=power_state.NOSTATE,
vm_state=vm_states.ACTIVE,
task_state=expected_task,
launched_at=timeutils.utcnow()))
instance = objects.Instance._from_db_object(econtext,
objects.Instance(),
db_instance)
updated_dbinstance1 = fake_instance.fake_db_instance(
**dict(uuid='updated-instance1',
power_state=10003,
vm_state=vm_states.ACTIVE,
task_state=expected_task,
launched_at=timeutils.utcnow()))
updated_dbinstance2 = fake_instance.fake_db_instance(
**dict(uuid='updated-instance2',
power_state=10003,
vm_state=vm_states.ACTIVE,
task_state=expected_task,
launched_at=timeutils.utcnow()))
if test_unrescue:
instance.vm_state = vm_states.RESCUED
instance.obj_reset_changes()
fake_nw_model = network_model.NetworkInfo()
fake_block_dev_info = 'fake_block_dev_info'
fake_power_state1 = 10001
fake_power_state2 = power_state.RUNNING
fake_power_state3 = 10002
# Beginning of calls we expect.
self.mox.StubOutWithMock(self.context, 'elevated')
self.context.elevated().AndReturn(econtext)
self.compute._get_instance_block_device_info(
econtext, instance).AndReturn(fake_block_dev_info)
self.compute._get_instance_nw_info(econtext,
instance).AndReturn(
fake_nw_model)
self.compute._notify_about_instance_usage(econtext,
instance,
'reboot.start')
self.compute._get_power_state(econtext,
instance).AndReturn(fake_power_state1)
db.instance_update_and_get_original(econtext, instance['uuid'],
{'task_state': task_pending,
'expected_task_state': expected_tasks,
'power_state': fake_power_state1},
update_cells=False,
columns_to_join=['system_metadata']
).AndReturn((None,
updated_dbinstance1))
expected_nw_info = fake_nw_model
db.instance_update_and_get_original(econtext,
updated_dbinstance1['uuid'],
{'task_state': task_started,
'expected_task_state': task_pending},
update_cells=False,
columns_to_join=['system_metadata']
).AndReturn((None,
updated_dbinstance1))
# Annoying. driver.reboot is wrapped in a try/except, and
# doesn't re-raise. It eats exception generated by mox if
# this is called with the wrong args, so we have to hack
# around it.
reboot_call_info = {}
expected_call_info = {
'args': (econtext, instance, expected_nw_info,
reboot_type),
'kwargs': {'block_device_info': fake_block_dev_info}}
fault = exception.InstanceNotFound(instance_id='instance-0000')
def fake_reboot(*args, **kwargs):
reboot_call_info['args'] = args
reboot_call_info['kwargs'] = kwargs
# NOTE(sirp): Since `bad_volumes_callback` is a function defined
# within `reboot_instance`, we don't have access to its value and
# can't stub it out, thus we skip that comparison.
kwargs.pop('bad_volumes_callback')
if fail_reboot:
raise fault
self.stubs.Set(self.compute.driver, 'reboot', fake_reboot)
# Power state should be updated again
if not fail_reboot or fail_running:
new_power_state = fake_power_state2
self.compute._get_power_state(econtext,
instance).AndReturn(fake_power_state2)
else:
new_power_state = fake_power_state3
self.compute._get_power_state(econtext,
instance).AndReturn(fake_power_state3)
if test_delete:
fault = exception.InstanceNotFound(
instance_id=instance['uuid'])
db.instance_update_and_get_original(
econtext, updated_dbinstance1['uuid'],
{'power_state': new_power_state,
'task_state': None,
'vm_state': vm_states.ACTIVE},
update_cells=False,
columns_to_join=['system_metadata'],
).AndRaise(fault)
self.compute._notify_about_instance_usage(
econtext,
instance,
'reboot.end')
elif fail_reboot and not fail_running:
db.instance_update_and_get_original(
econtext, updated_dbinstance1['uuid'],
{'vm_state': vm_states.ERROR},
update_cells=False,
columns_to_join=['system_metadata'],
).AndRaise(fault)
else:
db.instance_update_and_get_original(
econtext, updated_dbinstance1['uuid'],
{'power_state': new_power_state,
'task_state': None,
'vm_state': vm_states.ACTIVE},
update_cells=False,
columns_to_join=['system_metadata'],
).AndReturn((None, updated_dbinstance2))
if fail_running:
self.compute._notify_about_instance_usage(econtext, instance,
'reboot.error', fault=fault)
self.compute._notify_about_instance_usage(
econtext,
instance,
'reboot.end')
self.mox.ReplayAll()
if not fail_reboot or fail_running:
self.compute.reboot_instance(self.context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
else:
self.assertRaises(exception.InstanceNotFound,
self.compute.reboot_instance,
self.context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
self.assertEqual(expected_call_info, reboot_call_info)
def test_reboot_soft(self):
self._test_reboot(True)
def test_reboot_soft_and_delete(self):
self._test_reboot(True, True)
def test_reboot_soft_and_rescued(self):
self._test_reboot(True, False, True)
def test_reboot_soft_and_delete_and_rescued(self):
self._test_reboot(True, True, True)
def test_reboot_hard(self):
self._test_reboot(False)
def test_reboot_hard_and_delete(self):
self._test_reboot(False, True)
def test_reboot_hard_and_rescued(self):
self._test_reboot(False, False, True)
def test_reboot_hard_and_delete_and_rescued(self):
self._test_reboot(False, True, True)
def test_reboot_fail(self):
self._test_reboot(False, fail_reboot=True)
def test_reboot_fail_running(self):
self._test_reboot(False, fail_reboot=True,
fail_running=True)
def test_get_instance_block_device_info_source_image(self):
bdms = block_device_obj.block_device_make_list(self.context,
[fake_block_device.FakeDbBlockDeviceDict({
'id': 3,
'volume_id': u'4cbc9e62-6ba0-45dd-b647-934942ead7d6',
'instance_uuid': 'fake-instance',
'device_name': '/dev/vda',
'connection_info': '{"driver_volume_type": "rbd"}',
'source_type': 'image',
'destination_type': 'volume',
'image_id': 'fake-image-id-1',
'boot_index': 0
})])
with (mock.patch.object(
objects.BlockDeviceMappingList,
'get_by_instance_uuid',
return_value=bdms)
) as mock_get_by_instance:
block_device_info = (
self.compute._get_instance_block_device_info(
self.context, self._create_fake_instance())
)
expected = {
'swap': None,
'ephemerals': [],
'block_device_mapping': [{
'connection_info': {
'driver_volume_type': 'rbd'
},
'mount_device': '/dev/vda',
'delete_on_termination': False
}]
}
self.assertTrue(mock_get_by_instance.called)
self.assertEqual(block_device_info, expected)
def test_get_instance_block_device_info_passed_bdms(self):
bdms = block_device_obj.block_device_make_list(self.context,
[fake_block_device.FakeDbBlockDeviceDict({
'id': 3,
'volume_id': u'4cbc9e62-6ba0-45dd-b647-934942ead7d6',
'device_name': '/dev/vdd',
'connection_info': '{"driver_volume_type": "rbd"}',
'source_type': 'volume',
'destination_type': 'volume'})
])
with (mock.patch.object(
objects.BlockDeviceMappingList,
'get_by_instance_uuid')) as mock_get_by_instance:
block_device_info = (
self.compute._get_instance_block_device_info(
self.context, self._create_fake_instance(), bdms=bdms)
)
expected = {
'swap': None,
'ephemerals': [],
'block_device_mapping': [{
'connection_info': {
'driver_volume_type': 'rbd'
},
'mount_device': '/dev/vdd',
'delete_on_termination': False
}]
}
self.assertFalse(mock_get_by_instance.called)
self.assertEqual(block_device_info, expected)
def test_get_instance_block_device_info_swap_and_ephemerals(self):
instance = self._create_fake_instance()
ephemeral0 = fake_block_device.FakeDbBlockDeviceDict({
'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'disk_bus': 'virtio',
'delete_on_termination': True,
'guest_format': None,
'volume_size': 1,
'boot_index': -1
})
ephemeral1 = fake_block_device.FakeDbBlockDeviceDict({
'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdc',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'disk_bus': 'virtio',
'delete_on_termination': True,
'guest_format': None,
'volume_size': 2,
'boot_index': -1
})
swap = fake_block_device.FakeDbBlockDeviceDict({
'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdd',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'disk_bus': 'virtio',
'delete_on_termination': True,
'guest_format': 'swap',
'volume_size': 1,
'boot_index': -1
})
bdms = block_device_obj.block_device_make_list(self.context,
[swap, ephemeral0, ephemeral1])
with (
mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=bdms)
) as mock_get_by_instance_uuid:
expected_block_device_info = {
'swap': {'device_name': '/dev/vdd', 'swap_size': 1},
'ephemerals': [{'device_name': '/dev/vdb', 'num': 0, 'size': 1,
'virtual_name': 'ephemeral0'},
{'device_name': '/dev/vdc', 'num': 1, 'size': 2,
'virtual_name': 'ephemeral1'}],
'block_device_mapping': []
}
block_device_info = (
self.compute._get_instance_block_device_info(
self.context, instance)
)
mock_get_by_instance_uuid.assert_called_once_with(self.context,
instance['uuid'])
self.assertEqual(expected_block_device_info, block_device_info)
def test_inject_network_info(self):
# Ensure we can inject network info.
called = {'inject': False}
def fake_driver_inject_network(self, instance, network_info):
called['inject'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_network_info',
fake_driver_inject_network)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.compute.inject_network_info(self.context, instance=instance)
self.assertTrue(called['inject'])
self.compute.terminate_instance(self.context,
instance, [], [])
def test_reset_network(self):
# Ensure we can reset networking on an instance.
called = {'count': 0}
def fake_driver_reset_network(self, instance):
called['count'] += 1
self.stubs.Set(nova.virt.fake.FakeDriver, 'reset_network',
fake_driver_reset_network)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.compute.reset_network(self.context, instance)
self.assertEqual(called['count'], 1)
self.compute.terminate_instance(self.context, instance, [], [])
def _get_snapshotting_instance(self):
# Ensure instance can be snapshotted.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
instance.save()
return instance
def test_snapshot(self):
inst_obj = self._get_snapshotting_instance()
self.compute.snapshot_instance(self.context, image_id='fakesnap',
instance=inst_obj)
def test_snapshot_no_image(self):
inst_obj = self._get_snapshotting_instance()
inst_obj.image_ref = ''
inst_obj.save()
self.compute.snapshot_instance(self.context, image_id='fakesnap',
instance=inst_obj)
def _test_snapshot_fails(self, raise_during_cleanup, method,
expected_state=True):
def fake_snapshot(*args, **kwargs):
raise test.TestingException()
self.fake_image_delete_called = False
def fake_delete(self_, context, image_id):
self.fake_image_delete_called = True
if raise_during_cleanup:
raise Exception()
self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
inst_obj = self._get_snapshotting_instance()
if method == 'snapshot':
self.assertRaises(test.TestingException,
self.compute.snapshot_instance,
self.context, image_id='fakesnap',
instance=inst_obj)
else:
self.assertRaises(test.TestingException,
self.compute.backup_instance,
self.context, image_id='fakesnap',
instance=inst_obj, backup_type='fake',
rotation=1)
self.assertEqual(expected_state, self.fake_image_delete_called)
self._assert_state({'task_state': None})
@mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups')
def test_backup_fails(self, mock_rotate):
self._test_snapshot_fails(False, 'backup')
@mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups')
def test_backup_fails_cleanup_ignores_exception(self, mock_rotate):
self._test_snapshot_fails(True, 'backup')
@mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_do_snapshot_instance')
def test_backup_fails_rotate_backup(self, mock_snap, mock_rotate):
mock_rotate.side_effect = test.TestingException()
self._test_snapshot_fails(True, 'backup', False)
def test_snapshot_fails(self):
self._test_snapshot_fails(False, 'snapshot')
def test_snapshot_fails_cleanup_ignores_exception(self):
self._test_snapshot_fails(True, 'snapshot')
def _test_snapshot_deletes_image_on_failure(self, status, exc):
self.fake_image_delete_called = False
def fake_show(self_, context, image_id, **kwargs):
self.assertEqual('fakesnap', image_id)
image = {'id': image_id,
'status': status}
return image
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
def fake_delete(self_, context, image_id):
self.fake_image_delete_called = True
self.assertEqual('fakesnap', image_id)
self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
def fake_snapshot(*args, **kwargs):
raise exc
self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
fake_image.stub_out_image_service(self.stubs)
inst_obj = self._get_snapshotting_instance()
self.compute.snapshot_instance(self.context, image_id='fakesnap',
instance=inst_obj)
def test_snapshot_fails_with_glance_error(self):
image_not_found = exception.ImageNotFound(image_id='fakesnap')
self._test_snapshot_deletes_image_on_failure('error', image_not_found)
self.assertFalse(self.fake_image_delete_called)
self._assert_state({'task_state': None})
def test_snapshot_fails_with_task_state_error(self):
deleting_state_error = exception.UnexpectedDeletingTaskStateError(
expected=task_states.IMAGE_SNAPSHOT, actual=task_states.DELETING)
self._test_snapshot_deletes_image_on_failure(
'error', deleting_state_error)
self.assertTrue(self.fake_image_delete_called)
self._test_snapshot_deletes_image_on_failure(
'active', deleting_state_error)
self.assertFalse(self.fake_image_delete_called)
def test_snapshot_fails_with_instance_not_found(self):
instance_not_found = exception.InstanceNotFound(instance_id='uuid')
self._test_snapshot_deletes_image_on_failure(
'error', instance_not_found)
self.assertTrue(self.fake_image_delete_called)
self._test_snapshot_deletes_image_on_failure(
'active', instance_not_found)
self.assertFalse(self.fake_image_delete_called)
def test_snapshot_handles_cases_when_instance_is_deleted(self):
inst_obj = self._get_snapshotting_instance()
inst_obj.task_state = task_states.DELETING
inst_obj.save()
self.compute.snapshot_instance(self.context, image_id='fakesnap',
instance=inst_obj)
def test_snapshot_handles_cases_when_instance_is_not_found(self):
inst_obj = self._get_snapshotting_instance()
inst_obj2 = objects.Instance.get_by_uuid(self.context, inst_obj.uuid)
inst_obj2.destroy()
self.compute.snapshot_instance(self.context, image_id='fakesnap',
instance=inst_obj)
def _assert_state(self, state_dict):
"""Assert state of VM is equal to state passed as parameter."""
instances = db.instance_get_all(self.context)
self.assertEqual(len(instances), 1)
if 'vm_state' in state_dict:
self.assertEqual(state_dict['vm_state'], instances[0]['vm_state'])
if 'task_state' in state_dict:
self.assertEqual(state_dict['task_state'],
instances[0]['task_state'])
if 'power_state' in state_dict:
self.assertEqual(state_dict['power_state'],
instances[0]['power_state'])
def test_console_output(self):
# Make sure we can get console output from instance.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
output = self.compute.get_console_output(self.context,
instance=instance, tail_length=None)
self.assertEqual(output, 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
self.compute.terminate_instance(self.context, instance, [], [])
def test_console_output_tail(self):
# Make sure we can get console output from instance.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
output = self.compute.get_console_output(self.context,
instance=instance, tail_length=2)
self.assertEqual(output, 'ANOTHER\nLAST LINE')
self.compute.terminate_instance(self.context, instance, [], [])
def test_console_output_not_implemented(self):
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
self.stubs.Set(self.compute.driver, 'get_console_output',
fake_not_implemented)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_console_output, self.context,
instance, 0)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.get_console_output, self.context,
instance, 0)
self.compute.terminate_instance(self.context, instance, [], [])
def test_console_output_instance_not_found(self):
def fake_not_found(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake-instance')
self.stubs.Set(self.compute.driver, 'get_console_output',
fake_not_found)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_console_output, self.context,
instance, 0)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.InstanceNotFound,
self.compute.get_console_output, self.context,
instance, 0)
self.compute.terminate_instance(self.context, instance, [], [])
def test_novnc_vnc_console(self):
# Make sure we can a vnc console for an instance.
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
# Try with the full instance
console = self.compute.get_vnc_console(self.context, 'novnc',
instance=instance)
self.assertTrue(console)
self.compute.terminate_instance(self.context, instance, [], [])
def test_validate_console_port_vnc(self):
self.flags(vnc_enabled=True)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj()
def fake_driver_get_console(*args, **kwargs):
return ctype.ConsoleVNC(host="fake_host", port=5900)
self.stubs.Set(self.compute.driver, "get_vnc_console",
fake_driver_get_console)
self.assertTrue(self.compute.validate_console_port(
context=self.context, instance=instance, port=5900,
console_type="novnc"))
def test_validate_console_port_spice(self):
self.flags(vnc_enabled=True)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj()
def fake_driver_get_console(*args, **kwargs):
return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88)
self.stubs.Set(self.compute.driver, "get_spice_console",
fake_driver_get_console)
self.assertTrue(self.compute.validate_console_port(
context=self.context, instance=instance, port=5900,
console_type="spice-html5"))
def test_validate_console_port_rdp(self):
self.flags(enabled=True, group='rdp')
instance = self._create_fake_instance_obj()
def fake_driver_get_console(*args, **kwargs):
return ctype.ConsoleRDP(host="fake_host", port=5900)
self.stubs.Set(self.compute.driver, "get_rdp_console",
fake_driver_get_console)
self.assertTrue(self.compute.validate_console_port(
context=self.context, instance=instance, port=5900,
console_type="rdp-html5"))
def test_validate_console_port_wrong_port(self):
self.flags(vnc_enabled=True)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj()
def fake_driver_get_console(*args, **kwargs):
return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88)
self.stubs.Set(self.compute.driver, "get_vnc_console",
fake_driver_get_console)
self.assertFalse(self.compute.validate_console_port(
context=self.context, instance=instance, port="wrongport",
console_type="spice-html5"))
def test_xvpvnc_vnc_console(self):
# Make sure we can a vnc console for an instance.
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
console = self.compute.get_vnc_console(self.context, 'xvpvnc',
instance=instance)
self.assertTrue(console)
self.compute.terminate_instance(self.context, instance, [], [])
def test_invalid_vnc_console_type(self):
# Raise useful error if console type is an unrecognised string.
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_vnc_console,
self.context, 'invalid', instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_vnc_console,
self.context, 'invalid', instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_missing_vnc_console_type(self):
# Raise useful error is console type is None.
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_vnc_console,
self.context, None, instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_vnc_console,
self.context, None, instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_get_vnc_console_not_implemented(self):
self.stubs.Set(self.compute.driver, 'get_vnc_console',
fake_not_implemented)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_vnc_console,
self.context, 'novnc', instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.get_vnc_console,
self.context, 'novnc', instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_spicehtml5_spice_console(self):
# Make sure we can a spice console for an instance.
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
# Try with the full instance
console = self.compute.get_spice_console(self.context, 'spice-html5',
instance=instance)
self.assertTrue(console)
self.compute.terminate_instance(self.context, instance, [], [])
def test_invalid_spice_console_type(self):
# Raise useful error if console type is an unrecognised string
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_spice_console,
self.context, 'invalid', instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_spice_console,
self.context, 'invalid', instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_missing_spice_console_type(self):
# Raise useful error is console type is None
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_spice_console,
self.context, None, instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_spice_console,
self.context, None, instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_rdphtml5_rdp_console(self):
# Make sure we can a rdp console for an instance.
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='rdp')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
# Try with the full instance
console = self.compute.get_rdp_console(self.context, 'rdp-html5',
instance=instance)
self.assertTrue(console)
self.compute.terminate_instance(self.context, instance, [], [])
def test_invalid_rdp_console_type(self):
# Raise useful error if console type is an unrecognised string
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='rdp')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_rdp_console,
self.context, 'invalid', instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_rdp_console,
self.context, 'invalid', instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_missing_rdp_console_type(self):
# Raise useful error is console type is None
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='rdp')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_rdp_console,
self.context, None, instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_rdp_console,
self.context, None, instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_vnc_console_instance_not_ready(self):
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = self._create_fake_instance_obj(
params={'vm_state': vm_states.BUILDING})
def fake_driver_get_console(*args, **kwargs):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.stubs.Set(self.compute.driver, "get_vnc_console",
fake_driver_get_console)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.InstanceNotReady,
self.compute.get_vnc_console, self.context, 'novnc',
instance=instance)
def test_spice_console_instance_not_ready(self):
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj(
params={'vm_state': vm_states.BUILDING})
def fake_driver_get_console(*args, **kwargs):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.stubs.Set(self.compute.driver, "get_spice_console",
fake_driver_get_console)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.InstanceNotReady,
self.compute.get_spice_console, self.context, 'spice-html5',
instance=instance)
def test_rdp_console_instance_not_ready(self):
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='rdp')
instance = self._create_fake_instance_obj(
params={'vm_state': vm_states.BUILDING})
def fake_driver_get_console(*args, **kwargs):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.stubs.Set(self.compute.driver, "get_rdp_console",
fake_driver_get_console)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.InstanceNotReady,
self.compute.get_rdp_console, self.context, 'rdp-html5',
instance=instance)
def test_vnc_console_disabled(self):
self.flags(vnc_enabled=False)
instance = self._create_fake_instance_obj(
params={'vm_state': vm_states.BUILDING})
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeUnavailable,
self.compute.get_vnc_console, self.context, 'novnc',
instance=instance)
def test_spice_console_disabled(self):
self.flags(enabled=False, group='spice')
instance = self._create_fake_instance_obj(
params={'vm_state': vm_states.BUILDING})
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeUnavailable,
self.compute.get_spice_console, self.context, 'spice-html5',
instance=instance)
def test_rdp_console_disabled(self):
self.flags(enabled=False, group='rdp')
instance = self._create_fake_instance_obj(
params={'vm_state': vm_states.BUILDING})
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeUnavailable,
self.compute.get_rdp_console, self.context, 'rdp-html5',
instance=instance)
def test_diagnostics(self):
# Make sure we can get diagnostics for an instance.
expected_diagnostic = {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
'vda_read': 262144,
'vda_read_req': 112,
'vda_write': 5778432,
'vda_write_req': 488,
'vnet1_rx': 2070139,
'vnet1_rx_drop': 0,
'vnet1_rx_errors': 0,
'vnet1_rx_packets': 26701,
'vnet1_tx': 140208,
'vnet1_tx_drop': 0,
'vnet1_tx_errors': 0,
'vnet1_tx_packets': 662,
}
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, [], None,
None, True, None, False)
diagnostics = self.compute.get_diagnostics(self.context,
instance=instance)
self.assertEqual(diagnostics, expected_diagnostic)
self.compute.terminate_instance(self.context, instance, [], [])
def test_instance_diagnostics(self):
# Make sure we can get diagnostics for an instance.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
diagnostics = self.compute.get_instance_diagnostics(self.context,
instance=instance)
expected = {'config_drive': True,
'cpu_details': [{'time': 17300000000}],
'disk_details': [{'errors_count': 0,
'id': 'fake-disk-id',
'read_bytes': 262144,
'read_requests': 112,
'write_bytes': 5778432,
'write_requests': 488}],
'driver': 'fake',
'hypervisor_os': 'fake-os',
'memory_details': {'maximum': 524288, 'used': 0},
'nic_details': [{'mac_address': '01:23:45:67:89:ab',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 2070139,
'rx_packets': 26701,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 140208,
'tx_packets': 662}],
'state': 'running',
'uptime': 46664,
'version': '1.0'}
self.assertEqual(expected, diagnostics)
self.compute.terminate_instance(self.context, instance, [], [])
def test_add_fixed_ip_usage_notification(self):
def dummy(*args, **kwargs):
pass
self.stubs.Set(network_api.API, 'add_fixed_ip_to_instance',
dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'inject_network_info', dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'reset_network', dummy)
instance = self._create_fake_instance_obj()
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.compute.add_fixed_ip_to_instance(self.context, network_id=1,
instance=instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
self.compute.terminate_instance(self.context, instance, [], [])
def test_remove_fixed_ip_usage_notification(self):
def dummy(*args, **kwargs):
pass
self.stubs.Set(network_api.API, 'remove_fixed_ip_from_instance',
dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'inject_network_info', dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'reset_network', dummy)
instance = self._create_fake_instance_obj()
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.compute.remove_fixed_ip_from_instance(self.context, 1,
instance=instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
self.compute.terminate_instance(self.context, instance, [], [])
def test_run_instance_usage_notification(self, request_spec=None):
# Ensure run instance generates appropriate usage notification.
request_spec = request_spec or {}
instance = self._create_fake_instance_obj()
expected_image_name = request_spec.get('image', {}).get('name', '')
self.compute.run_instance(self.context, instance, request_spec,
{}, [], None, None, True, None, False)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
instance.refresh()
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type, 'compute.instance.create.start')
# The last event is the one with the sugar in it.
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'compute.instance.create.end')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(expected_image_name, payload['image_name'])
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
self.assertEqual(payload['state'], 'active')
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
self.assertIn('fixed_ips', payload)
self.assertTrue(payload['launched_at'])
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
self.assertEqual('Success', payload['message'])
self.compute.terminate_instance(self.context, instance, [], [])
def test_run_instance_image_usage_notification(self):
request_spec = {'image': {'name': 'fake_name', 'key': 'value'}}
self.test_run_instance_usage_notification(request_spec=request_spec)
def test_run_instance_usage_notification_volume_meta(self):
# Volume's image metadata won't contain the image name
request_spec = {'image': {'key': 'value'}}
self.test_run_instance_usage_notification(request_spec=request_spec)
def test_run_instance_end_notification_on_abort(self):
# Test that an end notif is sent if the build is aborted
instance = self._create_fake_instance_obj()
instance_uuid = instance['uuid']
def build_inst_abort(*args, **kwargs):
raise exception.BuildAbortException(reason="already deleted",
instance_uuid=instance_uuid)
self.stubs.Set(self.compute, '_build_instance', build_inst_abort)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type, 'compute.instance.create.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type, 'compute.instance.create.end')
self.assertEqual('INFO', msg.priority)
payload = msg.payload
message = payload['message']
self.assertNotEqual(-1, message.find("already deleted"))
def test_run_instance_error_notification_on_reschedule(self):
# Test that error notif is sent if the build got rescheduled
instance = self._create_fake_instance_obj()
instance_uuid = instance['uuid']
def build_inst_fail(*args, **kwargs):
raise exception.RescheduledException(instance_uuid=instance_uuid,
reason="something bad happened")
self.stubs.Set(self.compute, '_build_instance', build_inst_fail)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type, 'compute.instance.create.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type, 'compute.instance.create.error')
self.assertEqual('ERROR', msg.priority)
payload = msg.payload
message = payload['message']
self.assertNotEqual(-1, message.find("something bad happened"))
def test_run_instance_error_notification_on_failure(self):
# Test that error notif is sent if build fails hard
instance = self._create_fake_instance_obj()
def build_inst_fail(*args, **kwargs):
raise test.TestingException("i'm dying")
self.stubs.Set(self.compute, '_build_instance', build_inst_fail)
self.assertRaises(test.TestingException, self.compute.run_instance,
self.context, instance, {}, {}, [], None, None, True, None,
False)
self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type, 'compute.instance.create.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type, 'compute.instance.create.error')
self.assertEqual('ERROR', msg.priority)
payload = msg.payload
message = payload['message']
self.assertNotEqual(-1, message.find("i'm dying"))
def test_terminate_usage_notification(self):
# Ensure terminate_instance generates correct usage notification.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
fake_notifier.NOTIFICATIONS = []
timeutils.set_time_override(cur_time)
self.compute.terminate_instance(self.context, instance, [], [])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'compute.instance.delete.start')
msg1 = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg1.event_type, 'compute.instance.shutdown.start')
msg1 = fake_notifier.NOTIFICATIONS[2]
self.assertEqual(msg1.event_type, 'compute.instance.shutdown.end')
msg1 = fake_notifier.NOTIFICATIONS[3]
self.assertEqual(msg1.event_type, 'compute.instance.delete.end')
payload = msg1.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
self.assertIn('terminated_at', payload)
self.assertIn('deleted_at', payload)
self.assertEqual(payload['terminated_at'], timeutils.strtime(cur_time))
self.assertEqual(payload['deleted_at'], timeutils.strtime(cur_time))
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
def test_run_instance_existing(self):
# Ensure failure when running an instance that already exists.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertRaises(exception.InstanceExists,
self.compute.run_instance,
self.context, instance, {}, {}, [], None, None, True,
None, False)
self.compute.terminate_instance(self.context, instance, [], [])
def test_run_instance_queries_macs(self):
# run_instance should ask the driver for node mac addresses and pass
# that to the network_api in use.
fake_network.unset_stub_network_methods(self.stubs)
instance = self._create_fake_instance_obj()
macs = set(['01:23:45:67:89:ab'])
self.mox.StubOutWithMock(self.compute.network_api,
"allocate_for_instance")
self.compute.network_api.allocate_for_instance(
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
vpn=False, macs=macs,
security_groups=[], dhcp_options=None).AndReturn(
fake_network.fake_get_instance_nw_info(self.stubs, 1, 1))
self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance")
self.compute.driver.macs_for_instance(
mox.IsA(instance_obj.Instance)).AndReturn(macs)
self.mox.ReplayAll()
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
def _create_server_group(self):
group_instance = self._create_fake_instance_obj(
params=dict(host=self.compute.host))
instance_group = objects.InstanceGroup(self.context)
instance_group.user_id = self.user_id
instance_group.project_id = self.project_id
instance_group.name = 'messi'
instance_group.uuid = str(uuid.uuid4())
instance_group.members = [group_instance.uuid]
instance_group.policies = ['anti-affinity']
fake_notifier.NOTIFICATIONS = []
instance_group.create()
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(instance_group.name, msg.payload['name'])
self.assertEqual(instance_group.members, msg.payload['members'])
self.assertEqual(instance_group.policies, msg.payload['policies'])
self.assertEqual(instance_group.project_id, msg.payload['project_id'])
self.assertEqual(instance_group.uuid, msg.payload['uuid'])
self.assertEqual('servergroup.create', msg.event_type)
return instance_group
def _run_instance_reschedules_on_anti_affinity_violation(self, group,
hint):
instance = self._create_fake_instance_obj()
filter_properties = {'scheduler_hints': {'group': hint}}
self.assertRaises(exception.RescheduledException,
self.compute._build_instance,
self.context, {}, filter_properties,
[], None, None, True, None, instance,
None, False)
def test_run_instance_reschedules_on_anti_affinity_violation_by_name(self):
group = self._create_server_group()
self._run_instance_reschedules_on_anti_affinity_violation(group,
group.name)
def test_run_instance_reschedules_on_anti_affinity_violation_by_uuid(self):
group = self._create_server_group()
self._run_instance_reschedules_on_anti_affinity_violation(group,
group.uuid)
def test_instance_set_to_error_on_uncaught_exception(self):
# Test that instance is set to error state when exception is raised.
instance = self._create_fake_instance_obj()
self.mox.StubOutWithMock(self.compute.network_api,
"allocate_for_instance")
self.mox.StubOutWithMock(self.compute.network_api,
"deallocate_for_instance")
self.compute.network_api.allocate_for_instance(
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
vpn=False, macs=None,
security_groups=[], dhcp_options=None
).AndRaise(messaging.RemoteError())
self.compute.network_api.deallocate_for_instance(
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None).MultipleTimes()
fake_network.unset_stub_network_methods(self.stubs)
self.mox.ReplayAll()
self.assertRaises(messaging.RemoteError,
self.compute.run_instance,
self.context, instance, {}, {}, None, None, None,
True, None, False)
instance.refresh()
self.assertEqual(vm_states.ERROR, instance.vm_state)
self.compute.terminate_instance(self.context, instance, [], [])
def test_delete_instance_keeps_net_on_power_off_fail(self):
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
exp = exception.InstancePowerOffFailure(reason='')
self.compute.driver.destroy(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(exp)
# mox will detect if _deallocate_network gets called unexpectedly
self.mox.ReplayAll()
instance = self._create_fake_instance_obj()
self.assertRaises(exception.InstancePowerOffFailure,
self.compute._delete_instance,
self.context,
instance,
[],
self.none_quotas)
def test_delete_instance_loses_net_on_other_fail(self):
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
exp = test.TestingException()
self.compute.driver.destroy(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(exp)
self.compute._deallocate_network(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
instance = self._create_fake_instance_obj()
self.assertRaises(test.TestingException,
self.compute._delete_instance,
self.context,
instance,
[],
self.none_quotas)
def test_delete_instance_deletes_console_auth_tokens(self):
instance = self._create_fake_instance_obj()
self.flags(vnc_enabled=True)
self.tokens_deleted = False
def fake_delete_tokens(*args, **kwargs):
self.tokens_deleted = True
cauth_rpcapi = self.compute.consoleauth_rpcapi
self.stubs.Set(cauth_rpcapi, 'delete_tokens_for_instance',
fake_delete_tokens)
self.compute._delete_instance(self.context, instance, [],
self.none_quotas)
self.assertTrue(self.tokens_deleted)
def test_delete_instance_deletes_console_auth_tokens_cells(self):
instance = self._create_fake_instance_obj()
self.flags(vnc_enabled=True)
self.flags(enable=True, group='cells')
self.tokens_deleted = False
def fake_delete_tokens(*args, **kwargs):
self.tokens_deleted = True
cells_rpcapi = self.compute.cells_rpcapi
self.stubs.Set(cells_rpcapi, 'consoleauth_delete_tokens',
fake_delete_tokens)
self.compute._delete_instance(self.context, instance,
[], self.none_quotas)
self.assertTrue(self.tokens_deleted)
def test_instance_termination_exception_sets_error(self):
"""Test that we handle InstanceTerminationFailure
which is propagated up from the underlying driver.
"""
instance = self._create_fake_instance_obj()
def fake_delete_instance(context, instance, bdms,
reservations=None):
raise exception.InstanceTerminationFailure(reason='')
self.stubs.Set(self.compute, '_delete_instance',
fake_delete_instance)
self.assertRaises(exception.InstanceTerminationFailure,
self.compute.terminate_instance,
self.context,
instance, [], [])
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
def test_network_is_deallocated_on_spawn_failure(self):
# When a spawn fails the network must be deallocated.
instance = self._create_fake_instance_obj()
self.mox.StubOutWithMock(self.compute, "_prep_block_device")
self.compute._prep_block_device(
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(messaging.RemoteError('', '', ''))
self.mox.ReplayAll()
self.assertRaises(messaging.RemoteError,
self.compute.run_instance,
self.context, instance, {}, {}, None, None, None,
True, None, False)
self.compute.terminate_instance(self.context, instance, [], [])
def test_lock(self):
# FIXME(comstud): This test is such crap. This is testing
# compute API lock functionality in a test class for the compute
# manager by running an instance. Hello? We should just have
# unit tests in test_compute_api that test the check_instance_lock
# decorator and make sure that appropriate compute_api methods
# have the decorator.
instance = self._create_fake_instance_obj()
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
non_admin_context = context.RequestContext(None,
None,
is_admin=False)
def check_task_state(task_state):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_state)
instance.refresh()
# should fail with locked nonadmin context
self.compute_api.lock(self.context, instance)
self.assertRaises(exception.InstanceIsLocked,
self.compute_api.reboot,
non_admin_context, instance, 'SOFT')
check_task_state(None)
# should fail with invalid task state
self.compute_api.unlock(self.context, instance)
instance.task_state = task_states.REBOOTING
instance.save()
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.reboot,
non_admin_context, instance, 'SOFT')
check_task_state(task_states.REBOOTING)
# should succeed with admin context
instance.task_state = None
instance.save()
self.compute_api.reboot(self.context, instance, 'SOFT')
check_task_state(task_states.REBOOTING)
self.compute.terminate_instance(self.context, instance, [], [])
def _check_locked_by(self, instance_uuid, locked_by):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['locked'], locked_by is not None)
self.assertEqual(instance['locked_by'], locked_by)
return instance
def test_override_owner_lock(self):
# FIXME(comstud): This test is such crap. This is testing
# compute API lock functionality in a test class for the compute
# manager by running an instance. Hello? We should just have
# unit tests in test_compute_api that test the check_instance_lock
# decorator and make sure that appropriate compute_api methods
# have the decorator.
admin_context = context.RequestContext('admin-user',
'admin-project',
is_admin=True)
instance = self._create_fake_instance_obj()
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
# Ensure that an admin can override the owner lock
self.compute_api.lock(self.context, instance)
self._check_locked_by(instance_uuid, 'owner')
self.compute_api.unlock(admin_context, instance)
self._check_locked_by(instance_uuid, None)
def test_upgrade_owner_lock(self):
# FIXME(comstud): This test is such crap. This is testing
# compute API lock functionality in a test class for the compute
# manager by running an instance. Hello? We should just have
# unit tests in test_compute_api that test the check_instance_lock
# decorator and make sure that appropriate compute_api methods
# have the decorator.
admin_context = context.RequestContext('admin-user',
'admin-project',
is_admin=True)
instance = self._create_fake_instance_obj()
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
# Ensure that an admin can upgrade the lock and that
# the owner can no longer unlock
self.compute_api.lock(self.context, instance)
self.compute_api.lock(admin_context, instance)
self._check_locked_by(instance_uuid, 'admin')
instance.refresh()
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.unlock,
self.context, instance)
self._check_locked_by(instance_uuid, 'admin')
self.compute_api.unlock(admin_context, instance)
self._check_locked_by(instance_uuid, None)
def _test_state_revert(self, instance, operation, pre_task_state,
kwargs=None, vm_state=None):
if kwargs is None:
kwargs = {}
# The API would have set task_state, so do that here to test
# that the state gets reverted on failure
db.instance_update(self.context, instance['uuid'],
{"task_state": pre_task_state})
orig_elevated = self.context.elevated
orig_notify = self.compute._notify_about_instance_usage
def _get_an_exception(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.context, 'elevated', _get_an_exception)
self.stubs.Set(self.compute,
'_notify_about_instance_usage', _get_an_exception)
func = getattr(self.compute, operation)
self.assertRaises(test.TestingException,
func, self.context, instance=instance, **kwargs)
# self.context.elevated() is called in tearDown()
self.stubs.Set(self.context, 'elevated', orig_elevated)
self.stubs.Set(self.compute,
'_notify_about_instance_usage', orig_notify)
# Fetch the instance's task_state and make sure it reverted to None.
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
if vm_state:
self.assertEqual(instance.vm_state, vm_state)
self.assertIsNone(instance["task_state"])
def test_state_revert(self):
# ensure that task_state is reverted after a failed operation.
migration = objects.Migration()
migration.instance_uuid = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
migration.new_instance_type_id = '1'
actions = [
("reboot_instance", task_states.REBOOTING,
{'block_device_info': [],
'reboot_type': 'SOFT'}),
("stop_instance", task_states.POWERING_OFF),
("start_instance", task_states.POWERING_ON),
("terminate_instance", task_states.DELETING,
{'bdms': [],
'reservations': []},
vm_states.ERROR),
("soft_delete_instance", task_states.SOFT_DELETING,
{'reservations': []}),
("restore_instance", task_states.RESTORING),
("rebuild_instance", task_states.REBUILDING,
{'orig_image_ref': None,
'image_ref': None,
'injected_files': [],
'new_pass': '',
'orig_sys_metadata': {},
'bdms': [],
'recreate': False,
'on_shared_storage': False}),
("set_admin_password", task_states.UPDATING_PASSWORD,
{'new_pass': None}),
("rescue_instance", task_states.RESCUING,
{'rescue_password': None}),
("unrescue_instance", task_states.UNRESCUING),
("revert_resize", task_states.RESIZE_REVERTING,
{'migration': migration,
'reservations': []}),
("prep_resize", task_states.RESIZE_PREP,
{'image': {},
'instance_type': {},
'reservations': [],
'request_spec': {},
'filter_properties': {},
'node': None}),
("resize_instance", task_states.RESIZE_PREP,
{'migration': migration,
'image': {},
'reservations': [],
'instance_type': {}}),
("pause_instance", task_states.PAUSING),
("unpause_instance", task_states.UNPAUSING),
("suspend_instance", task_states.SUSPENDING),
("resume_instance", task_states.RESUMING),
]
self._stub_out_resize_network_methods()
instance = self._create_fake_instance_obj()
for operation in actions:
self._test_state_revert(instance, *operation)
def _ensure_quota_reservations_committed(self, instance):
"""Mock up commit of quota reservations."""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
nova.quota.QUOTAS.commit(mox.IgnoreArg(), reservations,
project_id=instance['project_id'],
user_id=instance['user_id'])
self.mox.ReplayAll()
return reservations
def _ensure_quota_reservations_rolledback(self, instance):
"""Mock up rollback of quota reservations."""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
nova.quota.QUOTAS.rollback(mox.IgnoreArg(), reservations,
project_id=instance['project_id'],
user_id=instance['user_id'])
self.mox.ReplayAll()
return reservations
def test_quotas_successful_delete(self):
instance = self._create_fake_instance_obj()
resvs = self._ensure_quota_reservations_committed(instance)
self.compute.terminate_instance(self.context, instance,
bdms=[], reservations=resvs)
def test_quotas_failed_delete(self):
instance = self._create_fake_instance_obj()
def fake_shutdown_instance(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute, '_shutdown_instance',
fake_shutdown_instance)
resvs = self._ensure_quota_reservations_rolledback(instance)
self.assertRaises(test.TestingException,
self.compute.terminate_instance,
self.context, instance,
bdms=[], reservations=resvs)
def test_quotas_successful_soft_delete(self):
instance = self._create_fake_instance_obj(
params=dict(task_state=task_states.SOFT_DELETING))
resvs = self._ensure_quota_reservations_committed(instance)
self.compute.soft_delete_instance(self.context, instance,
reservations=resvs)
def test_quotas_failed_soft_delete(self):
instance = self._create_fake_instance_obj(
params=dict(task_state=task_states.SOFT_DELETING))
def fake_soft_delete(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'soft_delete',
fake_soft_delete)
resvs = self._ensure_quota_reservations_rolledback(instance)
self.assertRaises(test.TestingException,
self.compute.soft_delete_instance,
self.context, instance,
reservations=resvs)
def test_quotas_destroy_of_soft_deleted_instance(self):
instance = self._create_fake_instance_obj(
params=dict(vm_state=vm_states.SOFT_DELETED))
# Termination should be successful, but quota reservations
# rolled back because the instance was in SOFT_DELETED state.
resvs = self._ensure_quota_reservations_rolledback(instance)
self.compute.terminate_instance(self.context, instance,
bdms=[], reservations=resvs)
def _stub_out_resize_network_methods(self):
def fake(cls, ctxt, instance, *args, **kwargs):
pass
self.stubs.Set(network_api.API, 'setup_networks_on_host', fake)
self.stubs.Set(network_api.API, 'migrate_instance_start', fake)
self.stubs.Set(network_api.API, 'migrate_instance_finish', fake)
def _test_finish_resize(self, power_on):
# Contrived test to ensure finish_resize doesn't raise anything and
# also tests resize from ACTIVE or STOPPED state which determines
# if the resized instance is powered on or not.
vm_state = None
if power_on:
vm_state = vm_states.ACTIVE
else:
vm_state = vm_states.STOPPED
params = {'vm_state': vm_state}
instance = self._create_fake_instance_obj(params)
image = 'fake-image'
disk_info = 'fake-disk-info'
instance_type = flavors.get_default_flavor()
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={}, reservations=[], request_spec={},
filter_properties={}, node=None)
instance.task_state = task_states.RESIZE_MIGRATED
instance.save()
# NOTE(mriedem): make sure prep_resize set old_vm_state correctly
sys_meta = instance.system_metadata
self.assertIn('old_vm_state', sys_meta)
if power_on:
self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state'])
else:
self.assertEqual(vm_states.STOPPED, sys_meta['old_vm_state'])
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
orig_mig_save = migration.save
orig_inst_save = instance.save
network_api = self.compute.network_api
self.mox.StubOutWithMock(network_api, 'setup_networks_on_host')
self.mox.StubOutWithMock(network_api,
'migrate_instance_finish')
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute,
'_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute.driver, 'finish_migration')
self.mox.StubOutWithMock(self.compute,
'_get_instance_block_device_info')
self.mox.StubOutWithMock(migration, 'save')
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.context, 'elevated')
def _mig_save(context):
self.assertEqual(migration.status, 'finished')
self.assertEqual(vm_state, instance.vm_state)
self.assertEqual(task_states.RESIZE_FINISH, instance.task_state)
orig_mig_save()
def _instance_save1():
self.assertEqual(instance_type['id'],
instance.instance_type_id)
orig_inst_save()
def _instance_save2(expected_task_state=None):
self.assertEqual(task_states.RESIZE_MIGRATED,
expected_task_state)
self.assertEqual(task_states.RESIZE_FINISH, instance.task_state)
orig_inst_save(expected_task_state=expected_task_state)
def _instance_save3(expected_task_state=None):
self.assertEqual(task_states.RESIZE_FINISH,
expected_task_state)
self.assertEqual(vm_states.RESIZED, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertIn('launched_at', instance.obj_what_changed())
orig_inst_save(expected_task_state=expected_task_state)
# First save to update flavor
instance.save().WithSideEffects(_instance_save1)
network_api.setup_networks_on_host(self.context, instance,
'fake-mini')
network_api.migrate_instance_finish(self.context,
mox.IsA(dict),
mox.IsA(dict))
self.compute._get_instance_nw_info(
self.context, instance).AndReturn('fake-nwinfo1')
# 2nd save to update task state
exp_kwargs = dict(expected_task_state=task_states.RESIZE_MIGRATED)
instance.save(**exp_kwargs).WithSideEffects(_instance_save2)
self.compute._notify_about_instance_usage(
self.context, instance, 'finish_resize.start',
network_info='fake-nwinfo1')
self.compute._get_instance_block_device_info(
self.context, instance,
refresh_conn_info=True).AndReturn('fake-bdminfo')
# nova.conf sets the default flavor to m1.small and the test
# sets the default flavor to m1.tiny so they should be different
# which makes this a resize
self.compute.driver.finish_migration(self.context, migration,
instance, disk_info,
'fake-nwinfo1',
image, True,
'fake-bdminfo', power_on)
# Ensure instance status updates is after the migration finish
self.context.elevated().AndReturn(self.context)
migration.save(self.context).WithSideEffects(_mig_save)
exp_kwargs = dict(expected_task_state=task_states.RESIZE_FINISH)
instance.save(**exp_kwargs).WithSideEffects(_instance_save3)
self.compute._notify_about_instance_usage(
self.context, instance, 'finish_resize.end',
network_info='fake-nwinfo1')
# NOTE(comstud): This actually does the mox.ReplayAll()
reservations = self._ensure_quota_reservations_committed(instance)
self.compute.finish_resize(self.context,
migration=migration,
disk_info=disk_info, image=image, instance=instance,
reservations=reservations)
def test_finish_resize_from_active(self):
self._test_finish_resize(power_on=True)
def test_finish_resize_from_stopped(self):
self._test_finish_resize(power_on=False)
def test_finish_resize_with_volumes(self):
"""Contrived test to ensure finish_resize doesn't raise anything."""
# create instance
instance = self._create_fake_instance_obj()
# create volume
volume_id = 'fake'
volume = {'instance_uuid': None,
'device_name': None,
'id': volume_id,
'attach_status': 'detached'}
bdm = objects.BlockDeviceMapping(
**{'source_type': 'volume',
'destination_type': 'volume',
'volume_id': volume_id,
'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc'})
bdm.create(self.context)
# stub out volume attach
def fake_volume_get(self, context, volume_id):
return volume
self.stubs.Set(cinder.API, "get", fake_volume_get)
def fake_volume_check_attach(self, context, volume_id, instance):
pass
self.stubs.Set(cinder.API, "check_attach", fake_volume_check_attach)
def fake_get_volume_encryption_metadata(self, context, volume_id):
return {}
self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
fake_get_volume_encryption_metadata)
orig_connection_data = {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % volume_id,
'target_portal': '127.0.0.0.1:3260',
'volume_id': volume_id,
}
connection_info = {
'driver_volume_type': 'iscsi',
'data': orig_connection_data,
}
def fake_init_conn(self, context, volume_id, session):
return connection_info
self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn)
def fake_attach(self, context, volume_id, instance_uuid, device_name,
mode='rw'):
volume['instance_uuid'] = instance_uuid
volume['device_name'] = device_name
self.stubs.Set(cinder.API, "attach", fake_attach)
# stub out virt driver attach
def fake_get_volume_connector(*args, **kwargs):
return {}
self.stubs.Set(self.compute.driver, 'get_volume_connector',
fake_get_volume_connector)
def fake_attach_volume(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'attach_volume',
fake_attach_volume)
# attach volume to instance
self.compute.attach_volume(self.context, volume['id'],
'/dev/vdc', instance, bdm=bdm)
# assert volume attached correctly
self.assertEqual(volume['device_name'], '/dev/vdc')
disk_info = db.block_device_mapping_get_all_by_instance(
self.context, instance.uuid)
self.assertEqual(len(disk_info), 1)
for bdm in disk_info:
self.assertEqual(bdm['device_name'], volume['device_name'])
self.assertEqual(bdm['connection_info'],
jsonutils.dumps(connection_info))
# begin resize
instance_type = flavors.get_default_flavor()
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={}, reservations=[], request_spec={},
filter_properties={}, node=None)
# fake out detach for prep_resize (and later terminate)
def fake_terminate_connection(self, context, volume, connector):
connection_info['data'] = None
self.stubs.Set(cinder.API, "terminate_connection",
fake_terminate_connection)
self._stub_out_resize_network_methods()
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={}, reservations=[],
instance_type=jsonutils.to_primitive(instance_type))
# assert bdm is unchanged
disk_info = db.block_device_mapping_get_all_by_instance(
self.context, instance.uuid)
self.assertEqual(len(disk_info), 1)
for bdm in disk_info:
self.assertEqual(bdm['device_name'], volume['device_name'])
cached_connection_info = jsonutils.loads(bdm['connection_info'])
self.assertEqual(cached_connection_info['data'],
orig_connection_data)
# but connection was terminated
self.assertIsNone(connection_info['data'])
# stub out virt driver finish_migration
def fake(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
instance.task_state = task_states.RESIZE_MIGRATED
instance.save()
reservations = self._ensure_quota_reservations_committed(instance)
# new initialize connection
new_connection_data = dict(orig_connection_data)
new_iqn = 'iqn.2010-10.org.openstack:%s.2' % volume_id,
new_connection_data['target_iqn'] = new_iqn
def fake_init_conn_with_data(self, context, volume, session):
connection_info['data'] = new_connection_data
return connection_info
self.stubs.Set(cinder.API, "initialize_connection",
fake_init_conn_with_data)
self.compute.finish_resize(self.context,
migration=migration,
disk_info={}, image={}, instance=instance,
reservations=reservations)
# assert volume attached correctly
disk_info = db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])
self.assertEqual(len(disk_info), 1)
for bdm in disk_info:
self.assertEqual(bdm['connection_info'],
jsonutils.dumps(connection_info))
# stub out detach
def fake_detach(self, context, volume_uuid):
volume['device_path'] = None
volume['instance_uuid'] = None
self.stubs.Set(cinder.API, "detach", fake_detach)
# clean up
self.compute.terminate_instance(self.context, instance, [], [])
def test_finish_resize_handles_error(self):
# Make sure we don't leave the instance in RESIZE on error.
def throw_up(*args, **kwargs):
raise test.TestingException()
def fake(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'finish_migration', throw_up)
self._stub_out_resize_network_methods()
old_flavor_name = 'm1.tiny'
instance = self._create_fake_instance_obj(type_name=old_flavor_name)
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_type = flavors.get_flavor_by_name('m1.small')
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={}, reservations=reservations,
request_spec={}, filter_properties={},
node=None)
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
instance.refresh()
instance.task_state = task_states.RESIZE_MIGRATED
instance.save()
self.assertRaises(test.TestingException, self.compute.finish_resize,
self.context,
migration=migration,
disk_info={}, image={}, instance=instance,
reservations=reservations)
instance.refresh()
self.assertEqual(vm_states.ERROR, instance.vm_state)
old_flavor = flavors.get_flavor_by_name(old_flavor_name)
self.assertEqual(old_flavor['memory_mb'], instance.memory_mb)
self.assertEqual(old_flavor['vcpus'], instance.vcpus)
self.assertEqual(old_flavor['root_gb'], instance.root_gb)
self.assertEqual(old_flavor['ephemeral_gb'], instance.ephemeral_gb)
self.assertEqual(old_flavor['id'], instance.instance_type_id)
self.assertNotEqual(instance_type['id'], instance.instance_type_id)
def test_save_instance_info(self):
old_flavor_name = 'm1.tiny'
new_flavor_name = 'm1.small'
instance = self._create_fake_instance_obj(type_name=old_flavor_name)
new_flavor = flavors.get_flavor_by_name(new_flavor_name)
self.compute._save_instance_info(instance, new_flavor,
instance.system_metadata)
self.assertEqual(new_flavor['memory_mb'], instance.memory_mb)
self.assertEqual(new_flavor['vcpus'], instance.vcpus)
self.assertEqual(new_flavor['root_gb'], instance.root_gb)
self.assertEqual(new_flavor['ephemeral_gb'], instance.ephemeral_gb)
self.assertEqual(new_flavor['id'], instance.instance_type_id)
self.assertEqual(new_flavor['id'], instance.instance_type_id)
def test_rebuild_instance_notification(self):
# Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
inst_ref = self._create_fake_instance_obj()
self.compute.run_instance(self.context, inst_ref, {}, {}, None, None,
None, True, None, False)
timeutils.set_time_override(cur_time)
fake_notifier.NOTIFICATIONS = []
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
orig_sys_metadata = db.instance_system_metadata_get(self.context,
inst_ref['uuid'])
image_ref = instance["image_ref"]
new_image_ref = image_ref + '-new_image_ref'
db.instance_update(self.context, inst_ref['uuid'],
{'image_ref': new_image_ref})
password = "new_password"
inst_ref.task_state = task_states.REBUILDING
inst_ref.save()
self.compute.rebuild_instance(self.context,
inst_ref,
image_ref, new_image_ref,
injected_files=[],
new_pass=password,
orig_sys_metadata=orig_sys_metadata,
bdms=[], recreate=False,
on_shared_storage=False)
inst_ref.refresh()
image_ref_url = glance.generate_image_url(image_ref)
new_image_ref_url = glance.generate_image_url(new_image_ref)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.exists')
self.assertEqual(msg.payload['image_ref_url'], image_ref_url)
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.rebuild.start')
self.assertEqual(msg.payload['image_ref_url'], new_image_ref_url)
self.assertEqual(msg.payload['image_name'], 'fake_name')
msg = fake_notifier.NOTIFICATIONS[2]
self.assertEqual(msg.event_type,
'compute.instance.rebuild.end')
self.assertEqual(msg.priority, 'INFO')
payload = msg.payload
self.assertEqual(payload['image_name'], 'fake_name')
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], inst_ref['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
self.assertEqual(payload['image_ref_url'], new_image_ref_url)
self.compute.terminate_instance(self.context, inst_ref, [], [])
def test_finish_resize_instance_notification(self):
# Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = self._create_fake_instance_obj()
new_type = flavors.get_flavor_by_name('m1.small')
new_type = jsonutils.to_primitive(new_type)
new_type_id = new_type['id']
flavor_id = new_type['flavorid']
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = 'foo'
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=new_type, image={}, reservations=[],
request_spec={}, filter_properties={}, node=None)
self._stub_out_resize_network_methods()
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={}, instance_type=new_type,
reservations=[])
timeutils.set_time_override(cur_time)
fake_notifier.NOTIFICATIONS = []
self.compute.finish_resize(self.context,
migration=migration, reservations=[],
disk_info={}, image={}, instance=instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.finish_resize.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.finish_resize.end')
self.assertEqual(msg.priority, 'INFO')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance.uuid)
self.assertEqual(payload['instance_type'], 'm1.small')
self.assertEqual(str(payload['instance_type_id']), str(new_type_id))
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance, [], [])
def test_resize_instance_notification(self):
# Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
timeutils.set_time_override(cur_time)
fake_notifier.NOTIFICATIONS = []
instance.host = 'foo'
instance.task_state = task_states.RESIZE_PREP
instance.save()
instance_type = flavors.get_default_flavor()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={}, reservations=[],
request_spec={}, filter_properties={}, node=None)
db.migration_get_by_instance_and_status(self.context.elevated(),
instance.uuid,
'pre-migrating')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.exists')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.resize.prep.start')
msg = fake_notifier.NOTIFICATIONS[2]
self.assertEqual(msg.event_type,
'compute.instance.resize.prep.end')
self.assertEqual(msg.priority, 'INFO')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance.uuid)
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance, [], [])
def test_prep_resize_instance_migration_error_on_same_host(self):
"""Ensure prep_resize raise a migration error if destination is set on
the same source host and allow_resize_to_same_host is false
"""
self.flags(host="foo", allow_resize_to_same_host=False)
instance = self._create_fake_instance_obj()
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = self.compute.host
instance.save()
instance_type = flavors.get_default_flavor()
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations, request_spec={},
filter_properties={}, node=None)
self.compute.terminate_instance(self.context, instance, [], [])
def test_prep_resize_instance_migration_error_on_none_host(self):
"""Ensure prep_resize raises a migration error if destination host is
not defined
"""
instance = self._create_fake_instance_obj()
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = None
instance.save()
instance_type = flavors.get_default_flavor()
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations, request_spec={},
filter_properties={}, node=None)
self.compute.terminate_instance(self.context, instance, [], [])
def test_resize_instance_driver_error(self):
# Ensure instance status set to Error on resize error.
def throw_up(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
throw_up)
instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor()
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations, request_spec={},
filter_properties={}, node=None)
instance.task_state = task_states.RESIZE_PREP
instance.save()
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
# verify
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration, image={},
reservations=reservations,
instance_type=jsonutils.to_primitive(instance_type))
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()
self.assertEqual(instance.vm_state, vm_states.ERROR)
self.compute.terminate_instance(self.context, instance, [], [])
def test_resize_instance_driver_rollback(self):
# Ensure instance status set to Running after rollback.
def throw_up(*args, **kwargs):
raise exception.InstanceFaultRollback(test.TestingException())
self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
throw_up)
instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor()
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations, request_spec={},
filter_properties={}, node=None)
instance.task_state = task_states.RESIZE_PREP
instance.save()
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration, image={},
reservations=reservations,
instance_type=jsonutils.to_primitive(instance_type))
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertIsNone(instance.task_state)
self.compute.terminate_instance(self.context, instance, [], [])
def _test_resize_instance(self, clean_shutdown=True):
# Ensure instance can be migrated/resized.
instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor()
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={}, reservations=[],
request_spec={}, filter_properties={}, node=None)
# verify 'old_vm_state' was set on system_metadata
instance.refresh()
sys_meta = instance.system_metadata
self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state'])
self._stub_out_resize_network_methods()
instance.task_state = task_states.RESIZE_PREP
instance.save()
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
with contextlib.nested(
mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value='fake_bdms'),
mock.patch.object(
self.compute, '_get_instance_block_device_info',
return_value='fake_bdinfo'),
mock.patch.object(self.compute, '_terminate_volume_connections'),
mock.patch.object(self.compute, '_get_power_off_values',
return_value=(1, 2))
) as (mock_get_by_inst_uuid, mock_get_instance_vol_bdinfo,
mock_terminate_vol_conn, mock_get_power_off_values):
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={}, reservations=[],
instance_type=jsonutils.to_primitive(instance_type),
clean_shutdown=clean_shutdown)
mock_get_instance_vol_bdinfo.assert_called_once_with(
self.context, instance, bdms='fake_bdms')
mock_terminate_vol_conn.assert_called_once_with(self.context,
instance, 'fake_bdms')
mock_get_power_off_values.assert_caleld_once_with(self.context,
instance, clean_shutdown)
self.assertEqual(migration.dest_compute, instance.host)
self.compute.terminate_instance(self.context, instance, [], [])
def test_resize_instance(self):
self._test_resize_instance()
def test_resize_instance_forced_shutdown(self):
self._test_resize_instance(clean_shutdown=False)
def _test_confirm_resize(self, power_on):
# Common test case method for confirm_resize
def fake(*args, **kwargs):
pass
def fake_confirm_migration_driver(*args, **kwargs):
# Confirm the instance uses the new type in finish_resize
inst = args[1]
sys_meta = inst['system_metadata']
self.assertEqual(sys_meta['instance_type_flavorid'], '3')
old_vm_state = None
p_state = None
if power_on:
old_vm_state = vm_states.ACTIVE
p_state = power_state.RUNNING
else:
old_vm_state = vm_states.STOPPED
p_state = power_state.SHUTDOWN
params = {'vm_state': old_vm_state, 'power_state': p_state}
instance = self._create_fake_instance_obj(params)
self.flags(allow_resize_to_same_host=True)
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
self.stubs.Set(self.compute.driver, 'confirm_migration',
fake_confirm_migration_driver)
self._stub_out_resize_network_methods()
reservations = self._ensure_quota_reservations_committed(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
# Confirm the instance size before the resize starts
instance.refresh()
instance_type_ref = db.flavor_get(self.context,
instance.instance_type_id)
self.assertEqual(instance_type_ref['flavorid'], '1')
instance.vm_state = old_vm_state
instance.power_state = p_state
instance.save()
new_instance_type_ref = db.flavor_get_by_flavor_id(
self.context, 3)
new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
self.compute.prep_resize(self.context,
instance=instance,
instance_type=new_instance_type_p,
image={}, reservations=reservations, request_spec={},
filter_properties={}, node=None)
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
# NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata
sys_meta = instance.system_metadata
self.assertEqual(old_vm_state, sys_meta['old_vm_state'])
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
reservations=[],
instance_type=new_instance_type_p)
self.compute.finish_resize(self.context,
migration=migration, reservations=[],
disk_info={}, image={}, instance=instance)
# Prove that the instance size is now the new size
instance_type_ref = db.flavor_get(self.context,
instance.instance_type_id)
self.assertEqual(instance_type_ref['flavorid'], '3')
# Finally, confirm the resize and verify the new flavor is applied
instance.task_state = None
instance.save()
self.compute.confirm_resize(self.context, instance=instance,
reservations=reservations,
migration=migration)
instance.refresh()
instance_type_ref = db.flavor_get(self.context,
instance.instance_type_id)
self.assertEqual(instance_type_ref['flavorid'], '3')
self.assertEqual('fake-mini', migration.source_compute)
self.assertEqual(old_vm_state, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertEqual(p_state, instance.power_state)
self.compute.terminate_instance(self.context, instance, [], [])
def test_confirm_resize_from_active(self):
self._test_confirm_resize(power_on=True)
def test_confirm_resize_from_stopped(self):
self._test_confirm_resize(power_on=False)
def _test_finish_revert_resize(self, power_on,
remove_old_vm_state=False):
"""Convenience method that does most of the work for the
test_finish_revert_resize tests.
:param power_on -- True if testing resize from ACTIVE state, False if
testing resize from STOPPED state.
:param remove_old_vm_state -- True if testing a case where the
'old_vm_state' system_metadata is not present when the
finish_revert_resize method is called.
"""
def fake(*args, **kwargs):
pass
def fake_finish_revert_migration_driver(*args, **kwargs):
# Confirm the instance uses the old type in finish_revert_resize
inst = args[1]
sys_meta = inst.system_metadata
self.assertEqual(sys_meta['instance_type_flavorid'], '1')
old_vm_state = None
if power_on:
old_vm_state = vm_states.ACTIVE
else:
old_vm_state = vm_states.STOPPED
params = {'vm_state': old_vm_state}
instance = self._create_fake_instance_obj(params)
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
self.stubs.Set(self.compute.driver, 'finish_revert_migration',
fake_finish_revert_migration_driver)
self._stub_out_resize_network_methods()
reservations = self._ensure_quota_reservations_committed(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.refresh()
instance_type_ref = db.flavor_get(self.context,
instance.instance_type_id)
self.assertEqual(instance_type_ref['flavorid'], '1')
old_vm_state = instance['vm_state']
instance.host = 'foo'
instance.vm_state = old_vm_state
instance.save()
new_instance_type_ref = db.flavor_get_by_flavor_id(
self.context, 3)
new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
self.compute.prep_resize(self.context,
instance=instance,
instance_type=new_instance_type_p,
image={}, reservations=reservations, request_spec={},
filter_properties={}, node=None)
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
# NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata
sys_meta = instance.system_metadata
self.assertEqual(old_vm_state, sys_meta['old_vm_state'])
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
reservations=[],
instance_type=new_instance_type_p)
self.compute.finish_resize(self.context,
migration=migration, reservations=[],
disk_info={}, image={}, instance=instance)
# Prove that the instance size is now the new size
instance_type_ref = db.flavor_get(self.context,
instance['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], '3')
instance.task_state = task_states.RESIZE_REVERTING
instance.save()
self.compute.revert_resize(self.context,
migration=migration, instance=instance,
reservations=reservations)
instance.refresh()
if remove_old_vm_state:
# need to wipe out the old_vm_state from system_metadata
# before calling finish_revert_resize
sys_meta = instance.system_metadata
sys_meta.pop('old_vm_state')
# Have to reset for save() to work
instance.system_metadata = sys_meta
instance.save()
self.compute.finish_revert_resize(self.context,
migration=migration,
instance=instance, reservations=reservations)
self.assertIsNone(instance.task_state)
instance_type_ref = db.flavor_get(self.context,
instance['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], '1')
self.assertEqual(instance.host, migration.source_compute)
if remove_old_vm_state:
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
else:
self.assertEqual(old_vm_state, instance.vm_state)
def test_finish_revert_resize_from_active(self):
self._test_finish_revert_resize(power_on=True)
def test_finish_revert_resize_from_stopped(self):
self._test_finish_revert_resize(power_on=False)
def test_finish_revert_resize_from_stopped_remove_old_vm_state(self):
# in this case we resize from STOPPED but end up with ACTIVE
# because the old_vm_state value is not present in
# finish_revert_resize
self._test_finish_revert_resize(power_on=False,
remove_old_vm_state=True)
def _test_cleanup_stored_instance_types(self, old, new, revert=False):
instance = self._create_fake_instance_obj()
migration = dict(old_instance_type_id=old,
new_instance_type_id=new)
instance.system_metadata = dict(instance_type_id=old)
sys_meta = dict(instance.system_metadata)
self.mox.StubOutWithMock(flavors, 'extract_flavor')
self.mox.StubOutWithMock(flavors, 'delete_flavor_info')
self.mox.StubOutWithMock(flavors, 'save_flavor_info')
if revert:
flavors.extract_flavor(instance, 'old_').AndReturn(
{'instance_type_id': old})
flavors.extract_flavor(instance).AndReturn(
{'instance_type_id': new})
flavors.save_flavor_info(
sys_meta, {'instance_type_id': old}).AndReturn(sys_meta)
else:
flavors.extract_flavor(instance).AndReturn(
{'instance_type_id': new})
flavors.extract_flavor(instance, 'old_').AndReturn(
{'instance_type_id': old})
flavors.delete_flavor_info(
sys_meta, 'old_').AndReturn(sys_meta)
flavors.delete_flavor_info(
sys_meta, 'new_').AndReturn(sys_meta)
self.mox.ReplayAll()
res = self.compute._cleanup_stored_instance_types(migration, instance,
revert)
self.assertEqual(res,
(sys_meta,
{'instance_type_id': revert and old or new},
{'instance_type_id': revert and new or old}))
def test_cleanup_stored_instance_types_for_resize(self):
self._test_cleanup_stored_instance_types('1', '2')
def test_cleanup_stored_instance_types_for_resize_with_update(self):
self._test_cleanup_stored_instance_types('1', '2', True)
def test_cleanup_stored_instance_types_for_migration(self):
self._test_cleanup_stored_instance_types('1', '1')
def test_cleanup_stored_instance_types_for_migration_with_update(self):
self._test_cleanup_stored_instance_types('1', '1', True)
def test_get_by_flavor_id(self):
flavor_type = flavors.get_flavor_by_flavor_id(1)
self.assertEqual(flavor_type['name'], 'm1.tiny')
def test_resize_same_source_fails(self):
"""Ensure instance fails to migrate when source and destination are
the same host.
"""
instance = self._create_fake_instance_obj()
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.refresh()
instance_type = flavors.get_default_flavor()
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations, request_spec={},
filter_properties={}, node=None)
self.compute.terminate_instance(self.context, instance, [], [])
def test_resize_instance_handles_migration_error(self):
# Ensure vm_state is ERROR when error occurs.
def raise_migration_failure(*args):
raise test.TestingException()
self.stubs.Set(self.compute.driver,
'migrate_disk_and_power_off',
raise_migration_failure)
instance = self._create_fake_instance_obj()
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_type = flavors.get_default_flavor()
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={}, reservations=reservations,
request_spec={}, filter_properties={},
node=None)
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration, image={},
reservations=reservations,
instance_type=jsonutils.to_primitive(instance_type))
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()
self.assertEqual(instance.vm_state, vm_states.ERROR)
self.compute.terminate_instance(self.context, instance, [], [])
def test_pre_live_migration_instance_has_no_fixed_ip(self):
# Confirm that no exception is raised if there is no fixed ip on
# pre_live_migration
instance = self._create_fake_instance_obj()
c = context.get_admin_context()
self.mox.ReplayAll()
self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
{'block_device_mapping': []},
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
def test_pre_live_migration_works_correctly(self):
# Confirm setup_compute_volume is called when volume is mounted.
def stupid(*args, **kwargs):
return fake_network.fake_get_instance_nw_info(self.stubs)
self.stubs.Set(nova.compute.manager.ComputeManager,
'_get_instance_nw_info', stupid)
# creating instance testdata
instance = self._create_fake_instance_obj({'host': 'dummy'})
c = context.get_admin_context()
nw_info = fake_network.fake_get_instance_nw_info(self.stubs)
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
{'swap': None, 'ephemerals': [],
'block_device_mapping': []},
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.StubOutWithMock(self.compute.driver,
'ensure_filtering_rules_for_instance')
self.compute.driver.ensure_filtering_rules_for_instance(
mox.IsA(instance), nw_info)
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.compute.network_api.setup_networks_on_host(c, instance,
self.compute.host)
fake_notifier.NOTIFICATIONS = []
# start test
self.mox.ReplayAll()
migrate_data = {'is_shared_instance_path': False}
ret = self.compute.pre_live_migration(c, instance=instance,
block_migration=False, disk=None,
migrate_data=migrate_data)
self.assertIsNone(ret)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.pre.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.pre.end')
# cleanup
db.instance_destroy(c, instance['uuid'])
def test_live_migration_exception_rolls_back(self):
# Confirm exception when pre_live_migration fails.
c = context.get_admin_context()
instance = self._create_fake_instance_obj(
{'host': 'src_host',
'task_state': task_states.MIGRATING})
updated_instance = self._create_fake_instance_obj(
{'host': 'fake-dest-host'})
dest_host = updated_instance['host']
fake_bdms = [
objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'vol1-id', 'source_type': 'volume',
'destination_type': 'volume'})),
objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'vol2-id', 'source_type': 'volume',
'destination_type': 'volume'}))
]
# creating mocks
self.mox.StubOutWithMock(self.compute.driver,
'get_instance_disk_info')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'pre_live_migration')
self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'remove_volume_connection')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'rollback_live_migration_at_destination')
self.compute.driver.get_instance_disk_info(
instance.name).AndReturn('fake_disk')
self.compute.compute_rpcapi.pre_live_migration(c,
instance, True, 'fake_disk', dest_host,
{}).AndRaise(test.TestingException())
self.compute.network_api.setup_networks_on_host(c,
instance, self.compute.host)
objects.BlockDeviceMappingList.get_by_instance_uuid(c,
instance.uuid).AndReturn(fake_bdms)
self.compute.compute_rpcapi.remove_volume_connection(
c, instance, 'vol1-id', dest_host)
self.compute.compute_rpcapi.remove_volume_connection(
c, instance, 'vol2-id', dest_host)
self.compute.compute_rpcapi.rollback_live_migration_at_destination(
c, instance, dest_host, destroy_disks=True, migrate_data={})
# start test
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute.live_migration,
c, dest=dest_host, block_migration=True,
instance=instance, migrate_data={})
instance.refresh()
self.assertEqual('src_host', instance.host)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_live_migration_works_correctly(self):
# Confirm live_migration() works as expected correctly.
# creating instance testdata
c = context.get_admin_context()
instance = self._create_fake_instance_obj()
instance.host = self.compute.host
dest = 'desthost'
migrate_data = {'is_shared_instance_path': False}
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'pre_live_migration')
self.compute.compute_rpcapi.pre_live_migration(
c, instance, False, None, dest, migrate_data)
self.mox.StubOutWithMock(self.compute.network_api,
'migrate_instance_start')
migration = {'source_compute': instance['host'], 'dest_compute': dest}
self.compute.network_api.migrate_instance_start(c, instance,
migration)
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'post_live_migration_at_destination')
self.compute.compute_rpcapi.post_live_migration_at_destination(
c, instance, False, dest)
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.compute.network_api.setup_networks_on_host(c, instance,
instance['host'],
teardown=True)
self.mox.StubOutWithMock(self.compute.instance_events,
'clear_events_for_instance')
self.compute.instance_events.clear_events_for_instance(
mox.IgnoreArg())
# start test
self.mox.ReplayAll()
ret = self.compute.live_migration(c, dest=dest,
instance=instance,
block_migration=False,
migrate_data=migrate_data)
self.assertIsNone(ret)
# cleanup
instance.destroy(c)
def test_post_live_migration_no_shared_storage_working_correctly(self):
"""Confirm post_live_migration() works correctly as expected
for non shared storage migration.
"""
# Create stubs
result = {}
# No share storage live migration don't need to destroy at source
# server because instance has been migrated to destination, but a
# cleanup for block device and network are needed.
def fakecleanup(*args, **kwargs):
result['cleanup'] = True
self.stubs.Set(self.compute.driver, 'cleanup', fakecleanup)
dest = 'desthost'
srchost = self.compute.host
# creating testdata
c = context.get_admin_context()
instance = self._create_fake_instance_obj({
'host': srchost,
'state_description': 'migrating',
'state': power_state.PAUSED,
'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED})
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(instance, [])
self.mox.StubOutWithMock(self.compute.network_api,
'migrate_instance_start')
migration = {'source_compute': srchost, 'dest_compute': dest, }
self.compute.network_api.migrate_instance_start(c, instance,
migration)
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'post_live_migration_at_destination')
self.compute.compute_rpcapi.post_live_migration_at_destination(
c, instance, False, dest)
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.compute.network_api.setup_networks_on_host(c, instance,
self.compute.host,
teardown=True)
self.mox.StubOutWithMock(self.compute.instance_events,
'clear_events_for_instance')
self.compute.instance_events.clear_events_for_instance(
mox.IgnoreArg())
# start test
self.mox.ReplayAll()
migrate_data = {'is_shared_instance_path': False}
self.compute._post_live_migration(c, instance, dest,
migrate_data=migrate_data)
self.assertIn('cleanup', result)
self.assertEqual(result['cleanup'], True)
def test_post_live_migration_working_correctly(self):
# Confirm post_live_migration() works as expected correctly.
dest = 'desthost'
srchost = self.compute.host
# creating testdata
c = context.get_admin_context()
instance = self._create_fake_instance_obj({
'host': srchost,
'state_description': 'migrating',
'state': power_state.PAUSED})
instance.update({'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED})
instance.save(c)
# creating mocks
with contextlib.nested(
mock.patch.object(self.compute.driver, 'post_live_migration'),
mock.patch.object(self.compute.driver, 'unfilter_instance'),
mock.patch.object(self.compute.network_api,
'migrate_instance_start'),
mock.patch.object(self.compute.compute_rpcapi,
'post_live_migration_at_destination'),
mock.patch.object(self.compute.driver,
'post_live_migration_at_source'),
mock.patch.object(self.compute.network_api,
'setup_networks_on_host'),
mock.patch.object(self.compute.instance_events,
'clear_events_for_instance'),
mock.patch.object(self.compute, 'update_available_resource')
) as (
post_live_migration, unfilter_instance,
migrate_instance_start, post_live_migration_at_destination,
post_live_migration_at_source, setup_networks_on_host,
clear_events, update_available_resource
):
self.compute._post_live_migration(c, instance, dest)
post_live_migration.assert_has_calls([
mock.call(c, instance, {'swap': None, 'ephemerals': [],
'block_device_mapping': []}, None)])
unfilter_instance.assert_has_calls([mock.call(instance, [])])
migration = {'source_compute': srchost,
'dest_compute': dest, }
migrate_instance_start.assert_has_calls([
mock.call(c, instance, migration)])
post_live_migration_at_destination.assert_has_calls([
mock.call(c, instance, False, dest)])
post_live_migration_at_source.assert_has_calls(
[mock.call(c, instance, [])])
setup_networks_on_host.assert_has_calls([
mock.call(c, instance, self.compute.host, teardown=True)])
clear_events.assert_called_once_with(instance)
update_available_resource.assert_has_calls([mock.call(c)])
def test_post_live_migration_terminate_volume_connections(self):
c = context.get_admin_context()
instance = self._create_fake_instance_obj({
'host': self.compute.host,
'state_description': 'migrating',
'state': power_state.PAUSED})
instance.update({'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED})
instance.save(c)
bdms = block_device_obj.block_device_make_list(c,
[fake_block_device.FakeDbBlockDeviceDict({
'source_type': 'blank', 'guest_format': None,
'destination_type': 'local'}),
fake_block_device.FakeDbBlockDeviceDict({
'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 'fake-volume-id'}),
])
with contextlib.nested(
mock.patch.object(self.compute.network_api,
'migrate_instance_start'),
mock.patch.object(self.compute.compute_rpcapi,
'post_live_migration_at_destination'),
mock.patch.object(self.compute.network_api,
'setup_networks_on_host'),
mock.patch.object(self.compute.instance_events,
'clear_events_for_instance'),
mock.patch.object(self.compute,
'_get_instance_block_device_info'),
mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid'),
mock.patch.object(self.compute.driver, 'get_volume_connector'),
mock.patch.object(cinder.API, 'terminate_connection')
) as (
migrate_instance_start, post_live_migration_at_destination,
setup_networks_on_host, clear_events_for_instance,
get_instance_volume_block_device_info, get_by_instance_uuid,
get_volume_connector, terminate_connection
):
get_by_instance_uuid.return_value = bdms
get_volume_connector.return_value = 'fake-connector'
self.compute._post_live_migration(c, instance, 'dest_host')
terminate_connection.assert_called_once_with(
c, 'fake-volume-id', 'fake-connector')
def _begin_post_live_migration_at_destination(self):
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.mox.StubOutWithMock(self.compute.network_api,
'migrate_instance_finish')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute, '_get_compute_info')
params = {'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED, }
self.instance = self._create_fake_instance_obj(params)
self.admin_ctxt = context.get_admin_context()
self.instance = objects.Instance._from_db_object(self.context,
objects.Instance(),
db.instance_get_by_uuid(self.admin_ctxt, self.instance['uuid']))
self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
self.instance,
self.compute.host)
migration = {'source_compute': self.instance['host'],
'dest_compute': self.compute.host, }
self.compute.network_api.migrate_instance_finish(
self.admin_ctxt, self.instance, migration)
fake_net_info = []
fake_block_dev_info = {'foo': 'bar'}
self.compute.driver.post_live_migration_at_destination(self.admin_ctxt,
self.instance,
fake_net_info,
False,
fake_block_dev_info)
self.compute._get_power_state(self.admin_ctxt,
self.instance).AndReturn(10001)
def _finish_post_live_migration_at_destination(self):
self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
mox.IgnoreArg(), self.compute.host)
fake_notifier.NOTIFICATIONS = []
self.mox.ReplayAll()
self.compute.post_live_migration_at_destination(self.admin_ctxt,
self.instance, False)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.post.dest.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.post.dest.end')
return objects.Instance.get_by_uuid(self.admin_ctxt,
self.instance['uuid'])
def test_post_live_migration_at_destination_with_compute_info(self):
"""The instance's node property should be updated correctly."""
self._begin_post_live_migration_at_destination()
hypervisor_hostname = 'fake_hypervisor_hostname'
fake_compute_info = objects.ComputeNode(
hypervisor_hostname=hypervisor_hostname)
self.compute._get_compute_info(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
fake_compute_info)
updated = self._finish_post_live_migration_at_destination()
self.assertEqual(updated['node'], hypervisor_hostname)
def test_post_live_migration_at_destination_without_compute_info(self):
"""The instance's node property should be set to None if we fail to
get compute_info.
"""
self._begin_post_live_migration_at_destination()
self.compute._get_compute_info(mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(
exception.NotFound())
updated = self._finish_post_live_migration_at_destination()
self.assertIsNone(updated['node'])
def test_rollback_live_migration_at_destination_correctly(self):
# creating instance testdata
c = context.get_admin_context()
instance = self._create_fake_instance_obj({'host': 'dummy'})
fake_notifier.NOTIFICATIONS = []
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.compute.network_api.setup_networks_on_host(c, instance,
self.compute.host,
teardown=True)
self.mox.StubOutWithMock(self.compute.driver,
'rollback_live_migration_at_destination')
self.compute.driver.rollback_live_migration_at_destination(c,
instance, [], {'swap': None, 'ephemerals': [],
'block_device_mapping': []},
destroy_disks=True, migrate_data=None)
# start test
self.mox.ReplayAll()
ret = self.compute.rollback_live_migration_at_destination(c,
instance=instance)
self.assertIsNone(ret)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.rollback.dest.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.rollback.dest.end')
def test_run_kill_vm(self):
# Detect when a vm is terminated behind the scenes.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
instance_name = instances[0]['name']
self.compute.driver.test_remove_vm(instance_name)
# Force the compute manager to do its periodic poll
ctxt = context.get_admin_context()
self.compute._sync_power_states(ctxt)
instances = db.instance_get_all(self.context)
LOG.info("After force-killing instances: %s", instances)
self.assertEqual(len(instances), 1)
self.assertIsNone(instances[0]['task_state'])
def _fill_fault(self, values):
extra = dict([(x, None) for x in ['created_at',
'deleted_at',
'updated_at',
'deleted']])
extra['id'] = 1
extra['details'] = ''
extra.update(values)
return extra
def test_add_instance_fault(self):
instance = self._create_fake_instance()
exc_info = None
def fake_db_fault_create(ctxt, values):
self.assertIn('raise NotImplementedError', values['details'])
del values['details']
expected = {
'code': 500,
'message': 'test',
'instance_uuid': instance['uuid'],
'host': self.compute.host
}
self.assertEqual(expected, values)
return self._fill_fault(expected)
try:
raise NotImplementedError('test')
except NotImplementedError:
exc_info = sys.exc_info()
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
instance,
NotImplementedError('test'),
exc_info)
def test_add_instance_fault_with_remote_error(self):
instance = self._create_fake_instance()
exc_info = None
def fake_db_fault_create(ctxt, values):
self.assertIn('raise messaging.RemoteError', values['details'])
del values['details']
expected = {
'code': 500,
'instance_uuid': instance['uuid'],
'message': 'Remote error: test My Test Message\nNone.',
'host': self.compute.host
}
self.assertEqual(expected, values)
return self._fill_fault(expected)
try:
raise messaging.RemoteError('test', 'My Test Message')
except messaging.RemoteError as exc:
exc_info = sys.exc_info()
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
instance, exc, exc_info)
def test_add_instance_fault_user_error(self):
instance = self._create_fake_instance()
exc_info = None
def fake_db_fault_create(ctxt, values):
expected = {
'code': 400,
'message': 'fake details',
'details': '',
'instance_uuid': instance['uuid'],
'host': self.compute.host
}
self.assertEqual(expected, values)
return self._fill_fault(expected)
user_exc = exception.Invalid('fake details', code=400)
try:
raise user_exc
except exception.Invalid:
exc_info = sys.exc_info()
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
instance, user_exc, exc_info)
def test_add_instance_fault_no_exc_info(self):
instance = self._create_fake_instance()
def fake_db_fault_create(ctxt, values):
expected = {
'code': 500,
'message': 'test',
'details': '',
'instance_uuid': instance['uuid'],
'host': self.compute.host
}
self.assertEqual(expected, values)
return self._fill_fault(expected)
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
instance,
NotImplementedError('test'))
def test_add_instance_fault_long_message(self):
instance = self._create_fake_instance()
message = 300 * 'a'
def fake_db_fault_create(ctxt, values):
expected = {
'code': 500,
'message': message[:255],
'details': '',
'instance_uuid': instance['uuid'],
'host': self.compute.host
}
self.assertEqual(expected, values)
return self._fill_fault(expected)
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
instance,
NotImplementedError(message))
def _test_cleanup_running(self, action):
admin_context = context.get_admin_context()
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
instance1 = self._create_fake_instance_obj({"deleted_at": deleted_at,
"deleted": True})
instance2 = self._create_fake_instance_obj({"deleted_at": deleted_at,
"deleted": True})
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
self.compute._get_instances_on_driver(
admin_context, {'deleted': True,
'soft_deleted': False,
'host': self.compute.host}).AndReturn([instance1,
instance2])
self.flags(running_deleted_instance_timeout=3600,
running_deleted_instance_action=action)
return admin_context, instance1, instance2
def test_cleanup_running_deleted_instances_unrecognized_value(self):
admin_context = context.get_admin_context()
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
instance = self._create_fake_instance_obj({"deleted_at": deleted_at,
"deleted": True})
self.flags(running_deleted_instance_action='foo-action')
with mock.patch.object(
self.compute, '_get_instances_on_driver',
return_value=[instance]):
try:
# We cannot simply use an assertRaises here because the
# exception raised is too generally "Exception". To be sure
# that the exception raised is the expected one, we check
# the message.
self.compute._cleanup_running_deleted_instances(admin_context)
self.fail("Be sure this will never be executed.")
except Exception as e:
self.assertIn("Unrecognized value", six.text_type(e))
def test_cleanup_running_deleted_instances_reap(self):
ctxt, inst1, inst2 = self._test_cleanup_running('reap')
bdms = block_device_obj.block_device_make_list(ctxt, [])
self.mox.StubOutWithMock(self.compute, "_shutdown_instance")
self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
"get_by_instance_uuid")
# Simulate an error and make sure cleanup proceeds with next instance.
self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False).\
AndRaise(test.TestingException)
objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
inst1.uuid, use_slave=True).AndReturn(bdms)
objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
inst2.uuid, use_slave=True).AndReturn(bdms)
self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False).\
AndReturn(None)
self.mox.StubOutWithMock(self.compute, "_cleanup_volumes")
self.compute._cleanup_volumes(ctxt, inst1['uuid'], bdms).\
AndReturn(None)
self.mox.ReplayAll()
self.compute._cleanup_running_deleted_instances(ctxt)
def test_cleanup_running_deleted_instances_shutdown(self):
ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.compute.driver.set_bootable(inst1, False)
self.compute.driver.power_off(inst1)
self.compute.driver.set_bootable(inst2, False)
self.compute.driver.power_off(inst2)
self.mox.ReplayAll()
self.compute._cleanup_running_deleted_instances(ctxt)
def test_cleanup_running_deleted_instances_shutdown_notimpl(self):
ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.compute.driver.set_bootable(inst1, False).AndRaise(
NotImplementedError)
compute_manager.LOG.warn(mox.IgnoreArg())
self.compute.driver.power_off(inst1)
self.compute.driver.set_bootable(inst2, False).AndRaise(
NotImplementedError)
compute_manager.LOG.warn(mox.IgnoreArg())
self.compute.driver.power_off(inst2)
self.mox.ReplayAll()
self.compute._cleanup_running_deleted_instances(ctxt)
def test_cleanup_running_deleted_instances_shutdown_error(self):
ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.mox.StubOutWithMock(compute_manager.LOG, 'exception')
e = test.TestingException('bad')
self.compute.driver.set_bootable(inst1, False)
self.compute.driver.power_off(inst1).AndRaise(e)
compute_manager.LOG.warn(mox.IgnoreArg())
self.compute.driver.set_bootable(inst2, False)
self.compute.driver.power_off(inst2).AndRaise(e)
compute_manager.LOG.warn(mox.IgnoreArg())
self.mox.ReplayAll()
self.compute._cleanup_running_deleted_instances(ctxt)
def test_running_deleted_instances(self):
admin_context = context.get_admin_context()
self.compute.host = 'host'
instance1 = {}
instance1['deleted'] = True
instance1['deleted_at'] = "sometimeago"
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
self.compute._get_instances_on_driver(
admin_context, {'deleted': True,
'soft_deleted': False,
'host': self.compute.host}).AndReturn([instance1])
self.mox.StubOutWithMock(timeutils, 'is_older_than')
timeutils.is_older_than('sometimeago',
CONF.running_deleted_instance_timeout).AndReturn(True)
self.mox.ReplayAll()
val = self.compute._running_deleted_instances(admin_context)
self.assertEqual(val, [instance1])
def test_get_instance_nw_info(self):
fake_network.unset_stub_network_methods(self.stubs)
fake_inst = fake_instance.fake_db_instance(uuid='fake-instance')
fake_nw_info = network_model.NetworkInfo()
self.mox.StubOutWithMock(self.compute.network_api,
'get_instance_nw_info')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_inst['uuid']
).AndReturn(fake_inst)
# NOTE(danms): compute manager will re-query since we're not giving
# it an instance with system_metadata. We're stubbing out the
# subsequent call so we don't need it, but keep this to make sure it
# does the right thing.
db.instance_get_by_uuid(self.context, fake_inst['uuid'],
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
self.compute.network_api.get_instance_nw_info(self.context,
mox.IsA(objects.Instance)).AndReturn(fake_nw_info)
self.mox.ReplayAll()
fake_inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), fake_inst, [])
result = self.compute._get_instance_nw_info(self.context,
fake_inst_obj)
self.assertEqual(fake_nw_info, result)
def test_heal_instance_info_cache(self):
# Update on every call for the test
self.flags(heal_instance_info_cache_interval=-1)
ctxt = context.get_admin_context()
instance_map = {}
instances = []
for x in xrange(8):
inst_uuid = 'fake-uuid-%s' % x
instance_map[inst_uuid] = fake_instance.fake_db_instance(
uuid=inst_uuid, host=CONF.host, created_at=None)
# These won't be in our instance since they're not requested
instances.append(instance_map[inst_uuid])
call_info = {'get_all_by_host': 0, 'get_by_uuid': 0,
'get_nw_info': 0, 'expected_instance': None}
def fake_instance_get_all_by_host(context, host,
columns_to_join, use_slave=False):
call_info['get_all_by_host'] += 1
self.assertEqual([], columns_to_join)
return instances[:]
def fake_instance_get_by_uuid(context, instance_uuid,
columns_to_join, use_slave=False):
if instance_uuid not in instance_map:
raise exception.InstanceNotFound(instance_id=instance_uuid)
call_info['get_by_uuid'] += 1
self.assertEqual(['system_metadata', 'info_cache'],
columns_to_join)
return instance_map[instance_uuid]
# NOTE(comstud): Override the stub in setUp()
def fake_get_instance_nw_info(context, instance, use_slave=False):
# Note that this exception gets caught in compute/manager
# and is ignored. However, the below increment of
# 'get_nw_info' won't happen, and you'll get an assert
# failure checking it below.
self.assertEqual(call_info['expected_instance']['uuid'],
instance['uuid'])
call_info['get_nw_info'] += 1
self.stubs.Set(db, 'instance_get_all_by_host',
fake_instance_get_all_by_host)
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
self.stubs.Set(self.compute, '_get_instance_nw_info',
fake_get_instance_nw_info)
# Make an instance appear to be still Building
instances[0]['vm_state'] = vm_states.BUILDING
# Make an instance appear to be Deleting
instances[1]['task_state'] = task_states.DELETING
# '0', '1' should be skipped..
call_info['expected_instance'] = instances[2]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(1, call_info['get_all_by_host'])
self.assertEqual(0, call_info['get_by_uuid'])
self.assertEqual(1, call_info['get_nw_info'])
call_info['expected_instance'] = instances[3]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(1, call_info['get_all_by_host'])
self.assertEqual(1, call_info['get_by_uuid'])
self.assertEqual(2, call_info['get_nw_info'])
# Make an instance switch hosts
instances[4]['host'] = 'not-me'
# Make an instance disappear
instance_map.pop(instances[5]['uuid'])
# Make an instance switch to be Deleting
instances[6]['task_state'] = task_states.DELETING
# '4', '5', and '6' should be skipped..
call_info['expected_instance'] = instances[7]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(1, call_info['get_all_by_host'])
self.assertEqual(4, call_info['get_by_uuid'])
self.assertEqual(3, call_info['get_nw_info'])
# Should be no more left.
self.assertEqual(0, len(self.compute._instance_uuids_to_heal))
# This should cause a DB query now, so get a list of instances
# where none can be processed to make sure we handle that case
# cleanly. Use just '0' (Building) and '1' (Deleting)
instances = instances[0:2]
self.compute._heal_instance_info_cache(ctxt)
# Should have called the list once more
self.assertEqual(2, call_info['get_all_by_host'])
# Stays the same because we remove invalid entries from the list
self.assertEqual(4, call_info['get_by_uuid'])
# Stays the same because we didn't find anything to process
self.assertEqual(3, call_info['get_nw_info'])
@mock.patch('nova.objects.InstanceList.get_by_filters')
@mock.patch('nova.compute.api.API.unrescue')
def test_poll_rescued_instances(self, unrescue, get):
timed_out_time = timeutils.utcnow() - datetime.timedelta(minutes=5)
not_timed_out_time = timeutils.utcnow()
instances = [objects.Instance(uuid='fake_uuid1',
vm_state=vm_states.RESCUED,
launched_at=timed_out_time),
objects.Instance(uuid='fake_uuid2',
vm_state=vm_states.RESCUED,
launched_at=timed_out_time),
objects.Instance(uuid='fake_uuid3',
vm_state=vm_states.RESCUED,
launched_at=not_timed_out_time)]
unrescued_instances = {'fake_uuid1': False, 'fake_uuid2': False}
def fake_instance_get_all_by_filters(context, filters,
expected_attrs=None,
use_slave=False):
self.assertEqual(["system_metadata"], expected_attrs)
return instances
get.side_effect = fake_instance_get_all_by_filters
def fake_unrescue(context, instance):
unrescued_instances[instance['uuid']] = True
unrescue.side_effect = fake_unrescue
self.flags(rescue_timeout=60)
ctxt = context.get_admin_context()
self.compute._poll_rescued_instances(ctxt)
for instance in unrescued_instances.values():
self.assertTrue(instance)
def test_poll_unconfirmed_resizes(self):
instances = [
fake_instance.fake_db_instance(uuid='fake_uuid1',
vm_state=vm_states.RESIZED,
task_state=None),
fake_instance.fake_db_instance(uuid='noexist'),
fake_instance.fake_db_instance(uuid='fake_uuid2',
vm_state=vm_states.ERROR,
task_state=None),
fake_instance.fake_db_instance(uuid='fake_uuid3',
vm_state=vm_states.ACTIVE,
task_state=
task_states.REBOOTING),
fake_instance.fake_db_instance(uuid='fake_uuid4',
vm_state=vm_states.RESIZED,
task_state=None),
fake_instance.fake_db_instance(uuid='fake_uuid5',
vm_state=vm_states.ACTIVE,
task_state=None),
# The expceted migration result will be None instead of error
# since _poll_unconfirmed_resizes will not change it
# when the instance vm state is RESIZED and task state
# is deleting, see bug 1301696 for more detail
fake_instance.fake_db_instance(uuid='fake_uuid6',
vm_state=vm_states.RESIZED,
task_state='deleting'),
fake_instance.fake_db_instance(uuid='fake_uuid7',
vm_state=vm_states.RESIZED,
task_state='soft-deleting')]
expected_migration_status = {'fake_uuid1': 'confirmed',
'noexist': 'error',
'fake_uuid2': 'error',
'fake_uuid3': 'error',
'fake_uuid4': None,
'fake_uuid5': 'error',
'fake_uuid6': None,
'fake_uuid7': None}
migrations = []
for i, instance in enumerate(instances, start=1):
fake_mig = test_migration.fake_db_migration()
fake_mig.update({'id': i,
'instance_uuid': instance['uuid'],
'status': None})
migrations.append(fake_mig)
def fake_instance_get_by_uuid(context, instance_uuid,
columns_to_join=None, use_slave=False):
self.assertIn('metadata', columns_to_join)
self.assertIn('system_metadata', columns_to_join)
# raise InstanceNotFound exception for uuid 'noexist'
if instance_uuid == 'noexist':
raise exception.InstanceNotFound(instance_id=instance_uuid)
for instance in instances:
if instance['uuid'] == instance_uuid:
return instance
def fake_migration_get_unconfirmed_by_dest_compute(context,
resize_confirm_window, dest_compute, use_slave=False):
self.assertEqual(dest_compute, CONF.host)
return migrations
def fake_migration_update(context, mid, updates):
for migration in migrations:
if migration['id'] == mid:
migration.update(updates)
return migration
def fake_confirm_resize(context, instance, migration=None):
# raise exception for 'fake_uuid4' to check migration status
# does not get set to 'error' on confirm_resize failure.
if instance['uuid'] == 'fake_uuid4':
raise test.TestingException('bomb')
self.assertIsNotNone(migration)
for migration2 in migrations:
if (migration2['instance_uuid'] ==
migration['instance_uuid']):
migration2['status'] = 'confirmed'
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
self.stubs.Set(db, 'migration_get_unconfirmed_by_dest_compute',
fake_migration_get_unconfirmed_by_dest_compute)
self.stubs.Set(db, 'migration_update', fake_migration_update)
self.stubs.Set(self.compute.compute_api, 'confirm_resize',
fake_confirm_resize)
def fetch_instance_migration_status(instance_uuid):
for migration in migrations:
if migration['instance_uuid'] == instance_uuid:
return migration['status']
self.flags(resize_confirm_window=60)
ctxt = context.get_admin_context()
self.compute._poll_unconfirmed_resizes(ctxt)
for instance_uuid, status in expected_migration_status.iteritems():
self.assertEqual(status,
fetch_instance_migration_status(instance_uuid))
def test_instance_build_timeout_mixed_instances(self):
# Tests that instances which failed to build within the configured
# instance_build_timeout value are set to error state.
self.flags(instance_build_timeout=30)
ctxt = context.get_admin_context()
created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
filters = {'vm_state': vm_states.BUILDING, 'host': CONF.host}
# these are the ones that are expired
old_instances = []
for x in xrange(4):
instance = {'uuid': str(uuid.uuid4()), 'created_at': created_at}
instance.update(filters)
old_instances.append(fake_instance.fake_db_instance(**instance))
# not expired
instances = list(old_instances) # copy the contents of old_instances
new_instance = {
'uuid': str(uuid.uuid4()),
'created_at': timeutils.utcnow(),
}
sort_key = 'created_at'
sort_dir = 'desc'
new_instance.update(filters)
instances.append(fake_instance.fake_db_instance(**new_instance))
# need something to return from conductor_api.instance_update
# that is defined outside the for loop and can be used in the mock
# context
fake_instance_ref = {'host': CONF.host, 'node': 'fake'}
# creating mocks
with contextlib.nested(
mock.patch.object(self.compute.db.sqlalchemy.api,
'instance_get_all_by_filters',
return_value=instances),
mock.patch.object(self.compute.conductor_api, 'instance_update',
return_value=fake_instance_ref),
mock.patch.object(self.compute.driver, 'node_is_available',
return_value=False)
) as (
instance_get_all_by_filters,
conductor_instance_update,
node_is_available
):
# run the code
self.compute._check_instance_build_time(ctxt)
# check our assertions
instance_get_all_by_filters.assert_called_once_with(
ctxt, filters,
sort_key,
sort_dir,
marker=None,
columns_to_join=[],
use_slave=True,
limit=None)
self.assertThat(conductor_instance_update.mock_calls,
testtools_matchers.HasLength(len(old_instances)))
self.assertThat(node_is_available.mock_calls,
testtools_matchers.HasLength(len(old_instances)))
for inst in old_instances:
conductor_instance_update.assert_has_calls([
mock.call(ctxt, inst['uuid'],
vm_state=vm_states.ERROR)])
node_is_available.assert_has_calls([
mock.call(fake_instance_ref['node'])])
def test_get_resource_tracker_fail(self):
self.assertRaises(exception.NovaException,
self.compute._get_resource_tracker,
'invalidnodename')
def test_instance_update_host_check(self):
# make sure rt usage doesn't happen if the host or node is different
def fail_get(nodename):
raise test.TestingException(_("wrong host/node"))
self.stubs.Set(self.compute, '_get_resource_tracker', fail_get)
instance = self._create_fake_instance({'host': 'someotherhost'})
self.compute._instance_update(self.context, instance['uuid'])
instance = self._create_fake_instance({'node': 'someothernode'})
self.compute._instance_update(self.context, instance['uuid'])
params = {'host': 'someotherhost', 'node': 'someothernode'}
instance = self._create_fake_instance(params)
self.compute._instance_update(self.context, instance['uuid'])
def test_destroy_evacuated_instance_on_shared_storage(self):
fake_context = context.get_admin_context()
# instances in central db
instances = [
# those are still related to this host
self._create_fake_instance_obj(
{'host': self.compute.host}),
self._create_fake_instance_obj(
{'host': self.compute.host}),
self._create_fake_instance_obj(
{'host': self.compute.host})
]
# those are already been evacuated to other host
evacuated_instance = self._create_fake_instance_obj(
{'host': 'otherhost'})
instances.append(evacuated_instance)
self.mox.StubOutWithMock(self.compute,
'_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute,
'_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute,
'_get_instance_block_device_info')
self.mox.StubOutWithMock(self.compute,
'_is_instance_storage_shared')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.compute._get_instances_on_driver(
fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
self.compute._get_instance_block_device_info(
fake_context, evacuated_instance).AndReturn('fake_bdi')
self.compute._is_instance_storage_shared(fake_context,
evacuated_instance).AndReturn(True)
self.compute.driver.destroy(fake_context, evacuated_instance,
'fake_network_info',
'fake_bdi',
False)
self.mox.ReplayAll()
self.compute._destroy_evacuated_instances(fake_context)
def test_destroy_evacuated_instance_with_disks(self):
fake_context = context.get_admin_context()
# instances in central db
instances = [
# those are still related to this host
self._create_fake_instance_obj(
{'host': self.compute.host}),
self._create_fake_instance_obj(
{'host': self.compute.host}),
self._create_fake_instance_obj(
{'host': self.compute.host})
]
# those are already been evacuated to other host
evacuated_instance = self._create_fake_instance_obj(
{'host': 'otherhost'})
instances.append(evacuated_instance)
self.mox.StubOutWithMock(self.compute,
'_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute,
'_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute,
'_get_instance_block_device_info')
self.mox.StubOutWithMock(self.compute.driver,
'check_instance_shared_storage_local')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'check_instance_shared_storage')
self.mox.StubOutWithMock(self.compute.driver,
'check_instance_shared_storage_cleanup')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.compute._get_instances_on_driver(
fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
self.compute._get_instance_block_device_info(
fake_context, evacuated_instance).AndReturn('fake_bdi')
self.compute.driver.check_instance_shared_storage_local(fake_context,
evacuated_instance).AndReturn({'filename': 'tmpfilename'})
self.compute.compute_rpcapi.check_instance_shared_storage(fake_context,
evacuated_instance,
{'filename': 'tmpfilename'}).AndReturn(False)
self.compute.driver.check_instance_shared_storage_cleanup(fake_context,
{'filename': 'tmpfilename'})
self.compute.driver.destroy(fake_context, evacuated_instance,
'fake_network_info',
'fake_bdi',
True)
self.mox.ReplayAll()
self.compute._destroy_evacuated_instances(fake_context)
def test_destroy_evacuated_instance_not_implemented(self):
fake_context = context.get_admin_context()
# instances in central db
instances = [
# those are still related to this host
self._create_fake_instance_obj(
{'host': self.compute.host}),
self._create_fake_instance_obj(
{'host': self.compute.host}),
self._create_fake_instance_obj(
{'host': self.compute.host})
]
# those are already been evacuated to other host
evacuated_instance = self._create_fake_instance_obj(
{'host': 'otherhost'})
instances.append(evacuated_instance)
self.mox.StubOutWithMock(self.compute,
'_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute,
'_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute,
'_get_instance_block_device_info')
self.mox.StubOutWithMock(self.compute.driver,
'check_instance_shared_storage_local')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'check_instance_shared_storage')
self.mox.StubOutWithMock(self.compute.driver,
'check_instance_shared_storage_cleanup')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.compute._get_instances_on_driver(
fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
self.compute._get_instance_block_device_info(
fake_context, evacuated_instance).AndReturn('fake_bdi')
self.compute.driver.check_instance_shared_storage_local(fake_context,
evacuated_instance).AndRaise(NotImplementedError())
self.compute.driver.destroy(fake_context, evacuated_instance,
'fake_network_info',
'fake_bdi',
True)
self.mox.ReplayAll()
self.compute._destroy_evacuated_instances(fake_context)
def test_complete_partial_deletion(self):
admin_context = context.get_admin_context()
instance = objects.Instance()
instance.id = 1
instance.uuid = 'fake-uuid'
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.system_metadata = {'fake_key': 'fake_value'}
instance.vcpus = 1
instance.memory_mb = 1
instance.project_id = 'fake-prj'
instance.user_id = 'fake-user'
instance.deleted = False
def fake_destroy():
instance.deleted = True
self.stubs.Set(instance, 'destroy', fake_destroy)
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
lambda *a, **k: None)
self.stubs.Set(self.compute,
'_complete_deletion',
lambda *a, **k: None)
self.stubs.Set(objects.Quotas, 'reserve', lambda *a, **k: None)
self.compute._complete_partial_deletion(admin_context, instance)
self.assertNotEqual(0, instance.deleted)
def test_init_instance_for_partial_deletion(self):
admin_context = context.get_admin_context()
instance = objects.Instance(admin_context)
instance.id = 1
instance.vm_state = vm_states.DELETED
instance.deleted = False
def fake_partial_deletion(context, instance):
instance['deleted'] = instance['id']
self.stubs.Set(self.compute,
'_complete_partial_deletion',
fake_partial_deletion)
self.compute._init_instance(admin_context, instance)
self.assertNotEqual(0, instance['deleted'])
def test_partial_deletion_raise_exception(self):
admin_context = context.get_admin_context()
instance = objects.Instance(admin_context)
instance.uuid = str(uuid.uuid4())
instance.vm_state = vm_states.DELETED
instance.deleted = False
self.mox.StubOutWithMock(self.compute, '_complete_partial_deletion')
self.compute._complete_partial_deletion(
admin_context, instance).AndRaise(ValueError)
self.mox.ReplayAll()
self.compute._init_instance(admin_context, instance)
def test_add_remove_fixed_ip_updates_instance_updated_at(self):
def _noop(*args, **kwargs):
pass
self.stubs.Set(self.compute.network_api,
'add_fixed_ip_to_instance', _noop)
self.stubs.Set(self.compute.network_api,
'remove_fixed_ip_from_instance', _noop)
instance = self._create_fake_instance_obj()
updated_at_1 = instance['updated_at']
self.compute.add_fixed_ip_to_instance(self.context, 'fake', instance)
updated_at_2 = db.instance_get_by_uuid(self.context,
instance['uuid'])['updated_at']
self.compute.remove_fixed_ip_from_instance(self.context, 'fake',
instance)
updated_at_3 = db.instance_get_by_uuid(self.context,
instance['uuid'])['updated_at']
updated_ats = (updated_at_1, updated_at_2, updated_at_3)
self.assertEqual(len(updated_ats), len(set(updated_ats)))
def test_no_pending_deletes_for_soft_deleted_instances(self):
self.flags(reclaim_instance_interval=0)
ctxt = context.get_admin_context()
instance = self._create_fake_instance(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': timeutils.utcnow()})
self.compute._run_pending_deletes(ctxt)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertFalse(instance['cleaned'])
def test_reclaim_queued_deletes(self):
self.flags(reclaim_instance_interval=3600)
ctxt = context.get_admin_context()
# Active
self._create_fake_instance(params={'host': CONF.host})
# Deleted not old enough
self._create_fake_instance(params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': timeutils.utcnow()})
# Deleted old enough (only this one should be reclaimed)
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
self._create_fake_instance(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': deleted_at})
# Restoring
# NOTE(hanlind): This specifically tests for a race condition
# where restoring a previously soft deleted instance sets
# deleted_at back to None, causing reclaim to think it can be
# deleted, see LP #1186243.
self._create_fake_instance(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'task_state': task_states.RESTORING})
self.mox.StubOutWithMock(self.compute, '_delete_instance')
self.compute._delete_instance(
ctxt, mox.IsA(objects.Instance), [],
mox.IsA(objects.Quotas))
self.mox.ReplayAll()
self.compute._reclaim_queued_deletes(ctxt)
def test_reclaim_queued_deletes_continue_on_error(self):
# Verify that reclaim continues on error.
self.flags(reclaim_instance_interval=3600)
ctxt = context.get_admin_context()
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
instance1 = self._create_fake_instance_obj(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': deleted_at})
instance2 = self._create_fake_instance_obj(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': deleted_at})
instances = []
instances.append(instance1)
instances.append(instance2)
self.mox.StubOutWithMock(objects.InstanceList,
'get_by_filters')
self.mox.StubOutWithMock(self.compute, '_deleted_old_enough')
self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
self.mox.StubOutWithMock(self.compute, '_delete_instance')
objects.InstanceList.get_by_filters(
ctxt, mox.IgnoreArg(),
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
use_slave=True
).AndReturn(instances)
# The first instance delete fails.
self.compute._deleted_old_enough(instance1, 3600).AndReturn(True)
objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance1.uuid).AndReturn([])
self.compute._delete_instance(ctxt, instance1,
[], self.none_quotas).AndRaise(
test.TestingException)
# The second instance delete that follows.
self.compute._deleted_old_enough(instance2, 3600).AndReturn(True)
objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance2.uuid).AndReturn([])
self.compute._delete_instance(ctxt, instance2,
[], self.none_quotas)
self.mox.ReplayAll()
self.compute._reclaim_queued_deletes(ctxt)
def test_sync_power_states(self):
ctxt = self.context.elevated()
self._create_fake_instance({'host': self.compute.host})
self._create_fake_instance({'host': self.compute.host})
self._create_fake_instance({'host': self.compute.host})
self.mox.StubOutWithMock(self.compute.driver, 'get_info')
self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
# Check to make sure task continues on error.
self.compute.driver.get_info(mox.IgnoreArg()).AndRaise(
exception.InstanceNotFound(instance_id='fake-uuid'))
self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
power_state.NOSTATE).AndRaise(
exception.InstanceNotFound(instance_id='fake-uuid'))
self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
{'state': power_state.RUNNING})
self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
power_state.RUNNING,
use_slave=True)
self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
{'state': power_state.SHUTDOWN})
self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
power_state.SHUTDOWN,
use_slave=True)
self.mox.ReplayAll()
self.compute._sync_power_states(ctxt)
def _test_lifecycle_event(self, lifecycle_event, power_state):
instance = self._create_fake_instance()
uuid = instance['uuid']
self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
if power_state is not None:
self.compute._sync_instance_power_state(
mox.IgnoreArg(),
mox.ContainsKeyValue('uuid', uuid),
power_state)
self.mox.ReplayAll()
self.compute.handle_events(event.LifecycleEvent(uuid, lifecycle_event))
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_lifecycle_events(self):
self._test_lifecycle_event(event.EVENT_LIFECYCLE_STOPPED,
power_state.SHUTDOWN)
self._test_lifecycle_event(event.EVENT_LIFECYCLE_STARTED,
power_state.RUNNING)
self._test_lifecycle_event(event.EVENT_LIFECYCLE_PAUSED,
power_state.PAUSED)
self._test_lifecycle_event(event.EVENT_LIFECYCLE_RESUMED,
power_state.RUNNING)
self._test_lifecycle_event(-1, None)
def test_lifecycle_event_non_existent_instance(self):
# No error raised for non-existent instance because of inherent race
# between database updates and hypervisor events. See bug #1180501.
event_instance = event.LifecycleEvent('does-not-exist',
event.EVENT_LIFECYCLE_STOPPED)
self.compute.handle_events(event_instance)
@mock.patch.object(objects.Migration, 'get_by_id')
@mock.patch.object(objects.Quotas, 'rollback')
def test_confirm_resize_roll_back_quota_migration_not_found(self,
mock_rollback, mock_get_by_id):
instance = self._create_fake_instance_obj()
migration = objects.Migration()
migration.instance_uuid = instance.uuid
migration.status = 'finished'
migration.id = 0
mock_get_by_id.side_effect = exception.MigrationNotFound(
migration_id=0)
self.compute.confirm_resize(self.context, instance=instance,
migration=migration, reservations=[])
self.assertTrue(mock_rollback.called)
@mock.patch.object(instance_obj.Instance, 'get_by_uuid')
@mock.patch.object(objects.Quotas, 'rollback')
def test_confirm_resize_roll_back_quota_instance_not_found(self,
mock_rollback, mock_get_by_id):
instance = self._create_fake_instance_obj()
migration = objects.Migration()
migration.instance_uuid = instance.uuid
migration.status = 'finished'
migration.id = 0
mock_get_by_id.side_effect = exception.InstanceNotFound(
instance_id=instance.uuid)
self.compute.confirm_resize(self.context, instance=instance,
migration=migration, reservations=[])
self.assertTrue(mock_rollback.called)
@mock.patch.object(objects.Migration, 'get_by_id')
@mock.patch.object(objects.Quotas, 'rollback')
def test_confirm_resize_roll_back_quota_status_confirmed(self,
mock_rollback, mock_get_by_id):
instance = self._create_fake_instance_obj()
migration = objects.Migration()
migration.instance_uuid = instance.uuid
migration.status = 'confirmed'
migration.id = 0
mock_get_by_id.return_value = migration
self.compute.confirm_resize(self.context, instance=instance,
migration=migration, reservations=[])
self.assertTrue(mock_rollback.called)
@mock.patch.object(objects.Migration, 'get_by_id')
@mock.patch.object(objects.Quotas, 'rollback')
def test_confirm_resize_roll_back_quota_status_dummy(self,
mock_rollback, mock_get_by_id):
instance = self._create_fake_instance_obj()
migration = objects.Migration()
migration.instance_uuid = instance.uuid
migration.status = 'dummy'
migration.id = 0
mock_get_by_id.return_value = migration
self.compute.confirm_resize(self.context, instance=instance,
migration=migration, reservations=[])
self.assertTrue(mock_rollback.called)
def test_allow_confirm_resize_on_instance_in_deleting_task_state(self):
instance = self._create_fake_instance_obj()
old_type = flavors.extract_flavor(instance)
new_type = flavors.get_flavor_by_flavor_id('4')
sys_meta = instance.system_metadata
sys_meta = flavors.save_flavor_info(sys_meta,
old_type, 'old_')
sys_meta = flavors.save_flavor_info(sys_meta,
new_type, 'new_')
sys_meta = flavors.save_flavor_info(sys_meta,
new_type)
fake_rt = self.mox.CreateMockAnything()
def fake_drop_resize_claim(*args, **kwargs):
pass
def fake_get_resource_tracker(self):
return fake_rt
def fake_setup_networks_on_host(self, *args, **kwargs):
pass
self.stubs.Set(fake_rt, 'drop_resize_claim', fake_drop_resize_claim)
self.stubs.Set(self.compute, '_get_resource_tracker',
fake_get_resource_tracker)
self.stubs.Set(self.compute.network_api, 'setup_networks_on_host',
fake_setup_networks_on_host)
migration = objects.Migration()
migration.instance_uuid = instance.uuid
migration.status = 'finished'
migration.create(self.context.elevated())
instance.task_state = task_states.DELETING
instance.vm_state = vm_states.RESIZED
instance.system_metadata = sys_meta
instance.save()
self.compute.confirm_resize(self.context, instance=instance,
migration=migration, reservations=[])
instance.refresh()
self.assertEqual(vm_states.ACTIVE, instance['vm_state'])
def _get_instance_and_bdm_for_dev_defaults_tests(self):
instance = self._create_fake_instance_obj(
params={'root_device_name': '/dev/vda'})
block_device_mapping = block_device_obj.block_device_make_list(
self.context, [fake_block_device.FakeDbBlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vda',
'source_type': 'volume',
'destination_type': 'volume',
'image_id': 'fake-image-id-1',
'boot_index': 0})])
return instance, block_device_mapping
def test_default_block_device_names_empty_instance_root_dev(self):
instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
instance.root_device_name = None
self.mox.StubOutWithMock(objects.Instance, 'save')
self.mox.StubOutWithMock(self.compute,
'_default_device_names_for_instance')
instance.save().AndReturn(None)
self.compute._default_device_names_for_instance(instance,
'/dev/vda', [], [],
[bdm for bdm in bdms])
self.mox.ReplayAll()
self.compute._default_block_device_names(self.context,
instance,
{}, bdms)
self.assertEqual('/dev/vda', instance.root_device_name)
def test_default_block_device_names_empty_root_device(self):
instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
bdms[0]['device_name'] = None
self.mox.StubOutWithMock(self.compute,
'_default_device_names_for_instance')
self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save')
bdms[0].save().AndReturn(None)
self.compute._default_device_names_for_instance(instance,
'/dev/vda', [], [],
[bdm for bdm in bdms])
self.mox.ReplayAll()
self.compute._default_block_device_names(self.context,
instance,
{}, bdms)
def test_default_block_device_names_no_root_device(self):
instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
instance.root_device_name = None
bdms[0]['device_name'] = None
self.mox.StubOutWithMock(objects.Instance, 'save')
self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save')
self.mox.StubOutWithMock(self.compute,
'_default_root_device_name')
self.mox.StubOutWithMock(self.compute,
'_default_device_names_for_instance')
self.compute._default_root_device_name(instance, mox.IgnoreArg(),
bdms[0]).AndReturn('/dev/vda')
instance.save().AndReturn(None)
bdms[0].save().AndReturn(None)
self.compute._default_device_names_for_instance(instance,
'/dev/vda', [], [],
[bdm for bdm in bdms])
self.mox.ReplayAll()
self.compute._default_block_device_names(self.context,
instance,
{}, bdms)
self.assertEqual('/dev/vda', instance.root_device_name)
def test_default_block_device_names_with_blank_volumes(self):
instance = self._create_fake_instance_obj()
image_meta = {}
root_volume = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'id': 1, 'instance_uuid': 'fake-instance',
'source_type': 'volume',
'destination_type': 'volume',
'image_id': 'fake-image-id-1',
'boot_index': 0}))
blank_volume1 = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'id': 2, 'instance_uuid': 'fake-instance',
'source_type': 'blank',
'destination_type': 'volume',
'boot_index': -1}))
blank_volume2 = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'id': 3, 'instance_uuid': 'fake-instance',
'source_type': 'blank',
'destination_type': 'volume',
'boot_index': -1}))
ephemeral = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'id': 4, 'instance_uuid': 'fake-instance',
'source_type': 'blank',
'destination_type': 'local'}))
swap = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'id': 5, 'instance_uuid': 'fake-instance',
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap'
}))
bdms = block_device_obj.block_device_make_list(
self.context, [root_volume, blank_volume1, blank_volume2,
ephemeral, swap])
with contextlib.nested(
mock.patch.object(self.compute, '_default_root_device_name',
return_value='/dev/vda'),
mock.patch.object(objects.Instance, 'save'),
mock.patch.object(objects.BlockDeviceMapping, 'save'),
mock.patch.object(self.compute,
'_default_device_names_for_instance')
) as (default_root_device, instance_update, object_save,
default_device_names):
self.compute._default_block_device_names(self.context, instance,
image_meta, bdms)
default_root_device.assert_called_once_with(instance, image_meta,
bdms[0])
instance_update.assert_called_once_with()
self.assertEqual('/dev/vda', instance.root_device_name)
self.assertTrue(object_save.called)
default_device_names.assert_called_once_with(instance,
'/dev/vda', [bdms[-2]], [bdms[-1]],
[bdm for bdm in bdms[:-2]])
def test_reserve_block_device_name(self):
instance = self._create_fake_instance_obj(
params={'root_device_name': '/dev/vda'})
bdm = objects.BlockDeviceMapping(
**{'source_type': 'image', 'destination_type': 'local',
'image_id': 'fake-image-id', 'device_name': '/dev/vda',
'instance_uuid': instance.uuid})
bdm.create(self.context)
self.compute.reserve_block_device_name(self.context, instance,
'/dev/vdb', 'fake-volume-id',
'virtio', 'disk')
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, instance.uuid)
bdms = list(bdms)
self.assertEqual(len(bdms), 2)
bdms.sort(key=operator.attrgetter('device_name'))
vol_bdm = bdms[1]
self.assertEqual(vol_bdm.source_type, 'volume')
self.assertEqual(vol_bdm.destination_type, 'volume')
self.assertEqual(vol_bdm.device_name, '/dev/vdb')
self.assertEqual(vol_bdm.volume_id, 'fake-volume-id')
self.assertEqual(vol_bdm.disk_bus, 'virtio')
self.assertEqual(vol_bdm.device_type, 'disk')
class ComputeAPITestCase(BaseTestCase):
def setUp(self):
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
super(ComputeAPITestCase, self).setUp()
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
self.fake_image = {
'id': 1,
'name': 'fake_name',
'status': 'active',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id'},
}
def fake_show(obj, context, image_id, **kwargs):
if image_id:
return self.fake_image
else:
raise exception.ImageNotFound(image_id=image_id)
self.fake_show = fake_show
def _run_instance(self, params=None):
instance = self._create_fake_instance_obj(params, services=True)
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
instance.refresh()
self.assertIsNone(instance['task_state'])
return instance, instance_uuid
def test_create_with_too_little_ram(self):
# Test an instance type with too little memory.
inst_type = flavors.get_default_flavor()
inst_type['memory_mb'] = 1
self.fake_image['min_ram'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.FlavorMemoryTooSmall,
self.compute_api.create, self.context,
inst_type, self.fake_image['id'])
# Now increase the inst_type memory and make sure all is fine.
inst_type['memory_mb'] = 2
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_too_little_disk(self):
# Test an instance type with too little disk space.
inst_type = flavors.get_default_flavor()
inst_type['root_gb'] = 1
self.fake_image['min_disk'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.FlavorDiskTooSmall,
self.compute_api.create, self.context,
inst_type, self.fake_image['id'])
# Now increase the inst_type disk space and make sure all is fine.
inst_type['root_gb'] = 2
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_too_large_image(self):
# Test an instance type with too little disk space.
inst_type = flavors.get_default_flavor()
inst_type['root_gb'] = 1
self.fake_image['size'] = '1073741825'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.FlavorDiskTooSmall,
self.compute_api.create, self.context,
inst_type, self.fake_image['id'])
# Reduce image to 1 GB limit and ensure it works
self.fake_image['size'] = '1073741824'
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_just_enough_ram_and_disk(self):
# Test an instance type with just enough ram and disk space.
inst_type = flavors.get_default_flavor()
inst_type['root_gb'] = 2
inst_type['memory_mb'] = 2
self.fake_image['min_ram'] = 2
self.fake_image['min_disk'] = 2
self.fake_image['name'] = 'fake_name'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_no_ram_and_disk_reqs(self):
# Test an instance type with no min_ram or min_disk.
inst_type = flavors.get_default_flavor()
inst_type['root_gb'] = 1
inst_type['memory_mb'] = 1
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_deleted_image(self):
# If we're given a deleted image by glance, we should not be able to
# build from it
inst_type = flavors.get_default_flavor()
self.fake_image['name'] = 'fake_name'
self.fake_image['status'] = 'DELETED'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
expected_message = (
exception.ImageNotActive.msg_fmt % {'image_id':
self.fake_image['id']})
with testtools.ExpectedException(exception.ImageNotActive,
expected_message):
self.compute_api.create(self.context, inst_type,
self.fake_image['id'])
@mock.patch('nova.virt.hardware.VirtNUMAInstanceTopology.get_constraints')
def test_create_with_numa_topology(self, numa_constraints_mock):
inst_type = flavors.get_default_flavor()
# This is what the stubbed out method will return
fake_image_props = {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'something_else': 'meow'}
numa_topology = hardware.VirtNUMAInstanceTopology(
cells=[hardware.VirtNUMATopologyCell(0, set([1, 2]), 512),
hardware.VirtNUMATopologyCell(1, set([3, 4]), 512)])
numa_constraints_mock.return_value = numa_topology
instances, resv_id = self.compute_api.create(self.context, inst_type,
self.fake_image['id'])
numa_constraints_mock.assert_called_once_with(
inst_type, fake_image_props)
self.assertThat(numa_topology._to_dict(),
matchers.DictMatches(
instances[0].numa_topology
.topology_from_obj()._to_dict()))
def test_create_instance_defaults_display_name(self):
# Verify that an instance cannot be created without a display_name.
cases = [dict(), dict(display_name=None)]
for instance in cases:
(ref, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(),
'fake-image-uuid', **instance)
try:
self.assertIsNotNone(ref[0]['display_name'])
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_sets_system_metadata(self):
# Make sure image properties are copied into system metadata.
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=flavors.get_default_flavor(),
image_href='fake-image-uuid')
try:
sys_metadata = db.instance_system_metadata_get(self.context,
ref[0]['uuid'])
image_props = {'image_kernel_id': 'fake_kernel_id',
'image_ramdisk_id': 'fake_ramdisk_id',
'image_something_else': 'meow', }
for key, value in image_props.iteritems():
self.assertIn(key, sys_metadata)
self.assertEqual(value, sys_metadata[key])
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_saves_type_in_system_metadata(self):
instance_type = flavors.get_default_flavor()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=instance_type,
image_href='some-fake-image')
try:
sys_metadata = db.instance_system_metadata_get(self.context,
ref[0]['uuid'])
instance_type_props = ['name', 'memory_mb', 'vcpus', 'root_gb',
'ephemeral_gb', 'flavorid', 'swap',
'rxtx_factor', 'vcpu_weight']
for key in instance_type_props:
sys_meta_key = "instance_type_%s" % key
self.assertIn(sys_meta_key, sys_metadata)
self.assertEqual(str(instance_type[key]),
str(sys_metadata[sys_meta_key]))
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_associates_security_groups(self):
# Make sure create associates security groups.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=flavors.get_default_flavor(),
image_href='some-fake-image',
security_group=['testgroup'])
try:
self.assertEqual(len(db.security_group_get_by_instance(
self.context, ref[0]['uuid'])), 1)
group = db.security_group_get(self.context, group['id'])
self.assertEqual(1, len(group['instances']))
finally:
db.security_group_destroy(self.context, group['id'])
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_with_invalid_security_group_raises(self):
instance_type = flavors.get_default_flavor()
pre_build_len = len(db.instance_get_all(self.context))
self.assertRaises(exception.SecurityGroupNotFoundForProject,
self.compute_api.create,
self.context,
instance_type=instance_type,
image_href=None,
security_group=['this_is_a_fake_sec_group'])
self.assertEqual(pre_build_len,
len(db.instance_get_all(self.context)))
def test_create_with_large_user_data(self):
# Test an instance type with too much user data.
inst_type = flavors.get_default_flavor()
self.fake_image['min_ram'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceUserDataTooLarge,
self.compute_api.create, self.context, inst_type,
self.fake_image['id'], user_data=('1' * 65536))
def test_create_with_malformed_user_data(self):
# Test an instance type with malformed user data.
inst_type = flavors.get_default_flavor()
self.fake_image['min_ram'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceUserDataMalformed,
self.compute_api.create, self.context, inst_type,
self.fake_image['id'], user_data='banana')
def test_create_with_base64_user_data(self):
# Test an instance type with ok much user data.
inst_type = flavors.get_default_flavor()
self.fake_image['min_ram'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
# NOTE(mikal): a string of length 48510 encodes to 65532 characters of
# base64
(refs, resv_id) = self.compute_api.create(
self.context, inst_type, self.fake_image['id'],
user_data=base64.encodestring('1' * 48510))
db.instance_destroy(self.context, refs[0]['uuid'])
def test_populate_instance_for_create(self):
base_options = {'image_ref': self.fake_image['id'],
'system_metadata': {'fake': 'value'}}
instance = objects.Instance()
instance.update(base_options)
inst_type = flavors.get_flavor_by_name("m1.tiny")
instance = self.compute_api._populate_instance_for_create(
self.context,
instance,
self.fake_image,
1,
security_groups=None,
instance_type=inst_type)
self.assertEqual(str(base_options['image_ref']),
instance['system_metadata']['image_base_image_ref'])
self.assertEqual(vm_states.BUILDING, instance['vm_state'])
self.assertEqual(task_states.SCHEDULING, instance['task_state'])
self.assertEqual(1, instance['launch_index'])
self.assertIsNotNone(instance.get('uuid'))
self.assertEqual([], instance.security_groups.objects)
def test_default_hostname_generator(self):
fake_uuids = [str(uuid.uuid4()) for x in xrange(4)]
orig_populate = self.compute_api._populate_instance_for_create
def _fake_populate(context, base_options, *args, **kwargs):
base_options['uuid'] = fake_uuids.pop(0)
return orig_populate(context, base_options, *args, **kwargs)
self.stubs.Set(self.compute_api,
'_populate_instance_for_create',
_fake_populate)
cases = [(None, 'server-%s' % fake_uuids[0]),
('Hello, Server!', 'hello-server'),
('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
('hello_server', 'hello-server')]
for display_name, hostname in cases:
(ref, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
display_name=display_name)
try:
self.assertEqual(ref[0]['hostname'], hostname)
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_instance_create_adds_to_instance_group(self):
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
group = objects.InstanceGroup(self.context)
group.uuid = str(uuid.uuid4())
group.create()
inst_type = flavors.get_default_flavor()
(refs, resv_id) = self.compute_api.create(
self.context, inst_type, self.fake_image['id'],
scheduler_hints={'group': group.uuid})
group = objects.InstanceGroup.get_by_uuid(self.context, group.uuid)
self.assertIn(refs[0]['uuid'], group.members)
db.instance_destroy(self.context, refs[0]['uuid'])
def test_instance_create_auto_creates_group(self):
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
inst_type = flavors.get_default_flavor()
(refs, resv_id) = self.compute_api.create(
self.context, inst_type, self.fake_image['id'],
scheduler_hints={'group': 'groupname'})
group = objects.InstanceGroup.get_by_name(self.context, 'groupname')
self.assertEqual('groupname', group.name)
self.assertIn('legacy', group.policies)
self.assertEqual(1, len(group.members))
self.assertIn(refs[0]['uuid'], group.members)
# On a second instance, make sure it gets added to the group that was
# auto-created above
(refs2, resv_id) = self.compute_api.create(
self.context, inst_type, self.fake_image['id'],
scheduler_hints={'group': 'groupname'})
group = objects.InstanceGroup.get_by_name(self.context, 'groupname')
self.assertEqual('groupname', group.name)
self.assertIn('legacy', group.policies)
self.assertEqual(2, len(group.members))
self.assertIn(refs[0]['uuid'], group.members)
self.assertIn(refs2[0]['uuid'], group.members)
db.instance_destroy(self.context, refs[0]['uuid'])
def test_destroy_instance_disassociates_security_groups(self):
# Make sure destroying disassociates security groups.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=flavors.get_default_flavor(),
image_href='some-fake-image',
security_group=['testgroup'])
try:
db.instance_destroy(self.context, ref[0]['uuid'])
group = db.security_group_get(self.context, group['id'])
self.assertEqual(0, len(group['instances']))
finally:
db.security_group_destroy(self.context, group['id'])
def test_destroy_security_group_disassociates_instances(self):
# Make sure destroying security groups disassociates instances.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=flavors.get_default_flavor(),
image_href='some-fake-image',
security_group=['testgroup'])
try:
db.security_group_destroy(self.context, group['id'])
admin_deleted_context = context.get_admin_context(
read_deleted="only")
group = db.security_group_get(admin_deleted_context, group['id'])
self.assertEqual(0, len(group['instances']))
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def _test_rebuild(self, vm_state):
instance = self._create_fake_instance_obj()
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
instance = objects.Instance.get_by_uuid(self.context,
instance_uuid)
self.assertIsNone(instance.task_state)
# Set some image metadata that should get wiped out and reset
# as well as some other metadata that should be preserved.
instance.system_metadata.update({
'image_kernel_id': 'old-data',
'image_ramdisk_id': 'old_data',
'image_something_else': 'old-data',
'image_should_remove': 'bye-bye',
'preserved': 'preserve this!'})
instance.save()
# Make sure Compute API updates the image_ref before casting to
# compute manager.
info = {'image_ref': None, 'clean': False}
def fake_rpc_rebuild(context, **kwargs):
info['image_ref'] = kwargs['instance'].image_ref
info['clean'] = kwargs['instance'].obj_what_changed() == set()
self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance',
fake_rpc_rebuild)
image_ref = instance["image_ref"] + '-new_image_ref'
password = "new_password"
instance.vm_state = vm_state
instance.save()
self.compute_api.rebuild(self.context, instance, image_ref, password)
self.assertEqual(info['image_ref'], image_ref)
self.assertTrue(info['clean'])
instance.refresh()
self.assertEqual(instance.task_state, task_states.REBUILDING)
sys_meta = dict([(k, v) for k, v in instance.system_metadata.items()
if not k.startswith('instance_type')])
self.assertEqual(sys_meta,
{'image_kernel_id': 'fake_kernel_id',
'image_min_disk': '1',
'image_ramdisk_id': 'fake_ramdisk_id',
'image_something_else': 'meow',
'preserved': 'preserve this!'})
instance.destroy()
def test_rebuild(self):
self._test_rebuild(vm_state=vm_states.ACTIVE)
def test_rebuild_in_error_state(self):
self._test_rebuild(vm_state=vm_states.ERROR)
def test_rebuild_in_error_not_launched(self):
instance = self._create_fake_instance_obj(params={'image_ref': ''})
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"vm_state": vm_states.ERROR,
"launched_at": None})
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.rebuild,
self.context,
instance,
instance['image_ref'],
"new password")
def test_rebuild_no_image(self):
instance = self._create_fake_instance_obj(params={'image_ref': ''})
instance_uuid = instance.uuid
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
self.compute_api.rebuild(self.context, instance, '', 'new_password')
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.REBUILDING)
def test_rebuild_with_deleted_image(self):
# If we're given a deleted image by glance, we should not be able to
# rebuild from it
instance = self._create_fake_instance_obj(params={'image_ref': '1'})
self.fake_image['name'] = 'fake_name'
self.fake_image['status'] = 'DELETED'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
expected_message = (
exception.ImageNotActive.msg_fmt % {'image_id':
self.fake_image['id']})
with testtools.ExpectedException(exception.ImageNotActive,
expected_message):
self.compute_api.rebuild(self.context, instance,
self.fake_image['id'], 'new_password')
def test_rebuild_with_too_little_ram(self):
instance = self._create_fake_instance_obj(params={'image_ref': '1'})
def fake_extract_flavor(_inst, prefix):
self.assertEqual('', prefix)
return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.fake_image['min_ram'] = 128
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.FlavorMemoryTooSmall,
self.compute_api.rebuild, self.context,
instance, self.fake_image['id'], 'new_password')
# Reduce image memory requirements and make sure it works
self.fake_image['min_ram'] = 64
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild_with_too_little_disk(self):
instance = self._create_fake_instance_obj(params={'image_ref': '1'})
def fake_extract_flavor(_inst, prefix):
self.assertEqual('', prefix)
return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.fake_image['min_disk'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.FlavorDiskTooSmall,
self.compute_api.rebuild, self.context,
instance, self.fake_image['id'], 'new_password')
# Reduce image disk requirements and make sure it works
self.fake_image['min_disk'] = 1
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild_with_just_enough_ram_and_disk(self):
instance = self._create_fake_instance_obj(params={'image_ref': '1'})
def fake_extract_flavor(_inst, prefix):
self.assertEqual('', prefix)
return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.fake_image['min_ram'] = 64
self.fake_image['min_disk'] = 1
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild_with_no_ram_and_disk_reqs(self):
instance = self._create_fake_instance_obj(params={'image_ref': '1'})
def fake_extract_flavor(_inst, prefix):
self.assertEqual('', prefix)
return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild_with_too_large_image(self):
instance = self._create_fake_instance_obj(params={'image_ref': '1'})
def fake_extract_flavor(_inst, prefix):
self.assertEqual('', prefix)
return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.fake_image['size'] = '1073741825'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.FlavorDiskTooSmall,
self.compute_api.rebuild, self.context,
instance, self.fake_image['id'], 'new_password')
# Reduce image to 1 GB limit and ensure it works
self.fake_image['size'] = '1073741824'
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_hostname_create(self):
# Ensure instance hostname is set during creation.
inst_type = flavors.get_flavor_by_name('m1.tiny')
(instances, _) = self.compute_api.create(self.context,
inst_type,
image_href='some-fake-image',
display_name='test host')
self.assertEqual('test-host', instances[0]['hostname'])
def _fake_rescue_block_devices(self, instance, status="in-use"):
fake_bdms = block_device_obj.block_device_make_list(self.context,
[fake_block_device.FakeDbBlockDeviceDict(
{'device_name': '/dev/vda',
'source_type': 'volume',
'boot_index': 0,
'destination_type': 'volume',
'volume_id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66'})])
volume = {'id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66',
'state': 'active', 'instance_uuid': instance['uuid']}
return fake_bdms, volume
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
@mock.patch.object(cinder.API, 'get')
def test_rescue_volume_backed_no_image(self, mock_get_vol, mock_get_bdms):
# Instance started without an image
params = {'image_ref': ''}
volume_backed_inst_1 = self._create_fake_instance_obj(params=params)
bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_1)
mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"}
mock_get_bdms.return_value = bdms
with mock.patch.object(self.compute, '_prep_block_device'):
self.compute.run_instance(self.context,
volume_backed_inst_1, {}, {}, None, None,
None, True, None, False)
self.assertRaises(exception.InstanceNotRescuable,
self.compute_api.rescue, self.context,
volume_backed_inst_1)
self.compute.terminate_instance(self.context, volume_backed_inst_1,
[], [])
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
@mock.patch.object(cinder.API, 'get')
def test_rescue_volume_backed_placeholder_image(self,
mock_get_vol,
mock_get_bdms):
# Instance started with a placeholder image (for metadata)
volume_backed_inst_2 = self._create_fake_instance_obj(
{'image_ref': 'my_placeholder_img',
'root_device_name': '/dev/vda'})
bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_2)
mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"}
mock_get_bdms.return_value = bdms
with mock.patch.object(self.compute, '_prep_block_device'):
self.compute.run_instance(self.context,
volume_backed_inst_2, {}, {}, None, None,
None, True, None, False)
self.assertRaises(exception.InstanceNotRescuable,
self.compute_api.rescue, self.context,
volume_backed_inst_2)
self.compute.terminate_instance(self.context, volume_backed_inst_2,
[], [])
def test_get(self):
# Test get instance.
exp_instance = self._create_fake_instance()
# NOTE(danms): Transform the db object in a similar way as
# the API method will do.
expected = obj_base.obj_to_primitive(
objects.Instance._from_db_object(
self.context, objects.Instance(), exp_instance,
instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
def fake_db_get(_context, _instance_uuid,
columns_to_join=None, use_slave=False):
return exp_instance
self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
instance = self.compute_api.get(self.context, exp_instance['uuid'])
self.assertEqual(unify_instance(expected),
unify_instance(instance))
def test_get_with_admin_context(self):
# Test get instance.
c = context.get_admin_context()
exp_instance = self._create_fake_instance()
# NOTE(danms): Transform the db object in a similar way as
# the API method will do.
expected = obj_base.obj_to_primitive(
objects.Instance._from_db_object(
c, objects.Instance(), exp_instance,
instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
def fake_db_get(context, instance_uuid,
columns_to_join=None, use_slave=False):
return exp_instance
self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
instance = self.compute_api.get(c, exp_instance['uuid'])
self.assertEqual(unify_instance(expected),
unify_instance(instance))
def test_get_with_integer_id(self):
# Test get instance with an integer id.
exp_instance = self._create_fake_instance()
# NOTE(danms): Transform the db object in a similar way as
# the API method will do.
expected = obj_base.obj_to_primitive(
objects.Instance._from_db_object(
self.context, objects.Instance(), exp_instance,
instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
def fake_db_get(_context, _instance_id, columns_to_join=None):
return exp_instance
self.stubs.Set(db, 'instance_get', fake_db_get)
instance = self.compute_api.get(self.context, exp_instance['id'])
self.assertEqual(unify_instance(expected),
unify_instance(instance))
def test_get_all_by_name_regexp(self):
# Test searching instances by name (display_name).
c = context.get_admin_context()
instance1 = self._create_fake_instance({'display_name': 'woot'})
instance2 = self._create_fake_instance({
'display_name': 'woo'})
instance3 = self._create_fake_instance({
'display_name': 'not-woot'})
instances = self.compute_api.get_all(c,
search_opts={'name': '^woo.*'})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance1['uuid'], instance_uuids)
self.assertIn(instance2['uuid'], instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': '^woot.*'})
instance_uuids = [instance['uuid'] for instance in instances]
self.assertEqual(len(instances), 1)
self.assertIn(instance1['uuid'], instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': '.*oot.*'})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance1['uuid'], instance_uuids)
self.assertIn(instance3['uuid'], instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': '^n.*'})
self.assertEqual(len(instances), 1)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance3['uuid'], instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': 'noth.*'})
self.assertEqual(len(instances), 0)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_all_by_multiple_options_at_once(self, fixed_get, network_get):
# Test searching by multiple options at once.
c = context.get_admin_context()
network_manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = (
network_manager.db.fixed_ips_by_virtual_interface)
network_get.return_value = (
dict(test_network.fake_network,
**network_manager.db.network_get(None, 1)))
self.stubs.Set(self.compute_api.network_api,
'get_instance_uuids_by_ip_filter',
network_manager.get_instance_uuids_by_ip_filter)
instance1 = self._create_fake_instance({
'display_name': 'woot',
'id': 1,
'uuid': '00000000-0000-0000-0000-000000000010'})
instance2 = self._create_fake_instance({
'display_name': 'woo',
'id': 20,
'uuid': '00000000-0000-0000-0000-000000000020'})
instance3 = self._create_fake_instance({
'display_name': 'not-woot',
'id': 30,
'uuid': '00000000-0000-0000-0000-000000000030'})
# ip ends up matching 2nd octet here.. so all 3 match ip
# but 'name' only matches one
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.1', 'name': 'not.*'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance3['uuid'])
# ip ends up matching any ip with a '1' in the last octet..
# so instance 1 and 3.. but name should only match #1
# but 'name' only matches one
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.1$', 'name': '^woo.*'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance1['uuid'])
# same as above but no match on name (name matches instance1
# but the ip query doesn't
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.2$', 'name': '^woot.*'})
self.assertEqual(len(instances), 0)
# ip matches all 3... ipv6 matches #2+#3...name matches #3
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.1',
'name': 'not.*',
'ip6': '^.*12.*34.*'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance3['uuid'])
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_image(self):
# Test searching instances by image.
c = context.get_admin_context()
instance1 = self._create_fake_instance({'image_ref': '1234'})
instance2 = self._create_fake_instance({'image_ref': '4567'})
instance3 = self._create_fake_instance({'image_ref': '4567'})
instances = self.compute_api.get_all(c, search_opts={'image': '123'})
self.assertEqual(len(instances), 0)
instances = self.compute_api.get_all(c, search_opts={'image': '1234'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance1['uuid'])
instances = self.compute_api.get_all(c, search_opts={'image': '4567'})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance2['uuid'], instance_uuids)
self.assertIn(instance3['uuid'], instance_uuids)
# Test passing a list as search arg
instances = self.compute_api.get_all(c,
search_opts={'image': ['1234', '4567']})
self.assertEqual(len(instances), 3)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_flavor(self):
# Test searching instances by image.
c = context.get_admin_context()
instance1 = self._create_fake_instance({'instance_type_id': 1})
instance2 = self._create_fake_instance({'instance_type_id': 2})
instance3 = self._create_fake_instance({'instance_type_id': 2})
# NOTE(comstud): Migrations set up the instance_types table
# for us. Therefore, we assume the following is true for
# these tests:
# instance_type_id 1 == flavor 3
# instance_type_id 2 == flavor 1
# instance_type_id 3 == flavor 4
# instance_type_id 4 == flavor 5
# instance_type_id 5 == flavor 2
instances = self.compute_api.get_all(c,
search_opts={'flavor': 5})
self.assertEqual(len(instances), 0)
# ensure unknown filter maps to an exception
self.assertRaises(exception.FlavorNotFound,
self.compute_api.get_all, c,
search_opts={'flavor': 99})
instances = self.compute_api.get_all(c, search_opts={'flavor': 3})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['id'], instance1['id'])
instances = self.compute_api.get_all(c, search_opts={'flavor': 1})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance2['uuid'], instance_uuids)
self.assertIn(instance3['uuid'], instance_uuids)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_state(self):
# Test searching instances by state.
c = context.get_admin_context()
instance1 = self._create_fake_instance({
'power_state': power_state.SHUTDOWN,
})
instance2 = self._create_fake_instance({
'power_state': power_state.RUNNING,
})
instance3 = self._create_fake_instance({
'power_state': power_state.RUNNING,
})
instances = self.compute_api.get_all(c,
search_opts={'power_state': power_state.SUSPENDED})
self.assertEqual(len(instances), 0)
instances = self.compute_api.get_all(c,
search_opts={'power_state': power_state.SHUTDOWN})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance1['uuid'])
instances = self.compute_api.get_all(c,
search_opts={'power_state': power_state.RUNNING})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance2['uuid'], instance_uuids)
self.assertIn(instance3['uuid'], instance_uuids)
# Test passing a list as search arg
instances = self.compute_api.get_all(c,
search_opts={'power_state': [power_state.SHUTDOWN,
power_state.RUNNING]})
self.assertEqual(len(instances), 3)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_metadata(self):
# Test searching instances by metadata.
c = context.get_admin_context()
instance0 = self._create_fake_instance()
instance1 = self._create_fake_instance({
'metadata': {'key1': 'value1'}})
instance2 = self._create_fake_instance({
'metadata': {'key2': 'value2'}})
instance3 = self._create_fake_instance({
'metadata': {'key3': 'value3'}})
instance4 = self._create_fake_instance({
'metadata': {'key3': 'value3',
'key4': 'value4'}})
# get all instances
instances = self.compute_api.get_all(c,
search_opts={'metadata': u"{}"})
self.assertEqual(len(instances), 5)
# wrong key/value combination
instances = self.compute_api.get_all(c,
search_opts={'metadata': u'{"key1": "value3"}'})
self.assertEqual(len(instances), 0)
# non-existing keys
instances = self.compute_api.get_all(c,
search_opts={'metadata': u'{"key5": "value1"}'})
self.assertEqual(len(instances), 0)
# find existing instance
instances = self.compute_api.get_all(c,
search_opts={'metadata': u'{"key2": "value2"}'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance2['uuid'])
instances = self.compute_api.get_all(c,
search_opts={'metadata': u'{"key3": "value3"}'})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance3['uuid'], instance_uuids)
self.assertIn(instance4['uuid'], instance_uuids)
# multiple criteria as a dict
instances = self.compute_api.get_all(c,
search_opts={'metadata': u'{"key3": "value3","key4": "value4"}'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance4['uuid'])
# multiple criteria as a list
instances = self.compute_api.get_all(c,
search_opts=
{'metadata': u'[{"key4": "value4"},{"key3": "value3"}]'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance4['uuid'])
db.instance_destroy(c, instance0['uuid'])
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
db.instance_destroy(c, instance4['uuid'])
def test_get_all_by_system_metadata(self):
# Test searching instances by system metadata.
c = context.get_admin_context()
instance1 = self._create_fake_instance({
'system_metadata': {'key1': 'value1'}})
# find existing instance
instances = self.compute_api.get_all(c,
search_opts={'system_metadata': u'{"key1": "value1"}'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance1['uuid'])
def test_all_instance_metadata(self):
self._create_fake_instance({'metadata': {'key1': 'value1'},
'user_id': 'user1',
'project_id': 'project1'})
self._create_fake_instance({'metadata': {'key2': 'value2'},
'user_id': 'user2',
'project_id': 'project2'})
_context = self.context
_context.user_id = 'user1'
_context.project_id = 'project1'
metadata = self.compute_api.get_all_instance_metadata(_context,
search_filts=[])
self.assertEqual(1, len(metadata))
self.assertEqual(metadata[0]['key'], 'key1')
_context.user_id = 'user2'
_context.project_id = 'project2'
metadata = self.compute_api.get_all_instance_metadata(_context,
search_filts=[])
self.assertEqual(1, len(metadata))
self.assertEqual(metadata[0]['key'], 'key2')
_context = context.get_admin_context()
metadata = self.compute_api.get_all_instance_metadata(_context,
search_filts=[])
self.assertEqual(2, len(metadata))
def test_instance_metadata(self):
meta_changes = [None]
self.flags(notify_on_state_change='vm_state')
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
_context = context.get_admin_context()
instance = self._create_fake_instance_obj({'metadata':
{'key1': 'value1'}})
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, {'key1': 'value1'})
self.compute_api.update_instance_metadata(_context, instance,
{'key2': 'value2'})
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, {'key1': 'value1', 'key2': 'value2'})
self.assertEqual(meta_changes, [{'key2': ['+', 'value2']}])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
msg = fake_notifier.NOTIFICATIONS[0]
payload = msg.payload
self.assertIn('metadata', payload)
self.assertEqual(payload['metadata'], metadata)
new_metadata = {'key2': 'bah', 'key3': 'value3'}
self.compute_api.update_instance_metadata(_context, instance,
new_metadata, delete=True)
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, new_metadata)
self.assertEqual(meta_changes, [{
'key1': ['-'],
'key2': ['+', 'bah'],
'key3': ['+', 'value3'],
}])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[1]
payload = msg.payload
self.assertIn('metadata', payload)
self.assertEqual(payload['metadata'], metadata)
self.compute_api.delete_instance_metadata(_context, instance, 'key2')
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, {'key3': 'value3'})
self.assertEqual(meta_changes, [{'key2': ['-']}])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
msg = fake_notifier.NOTIFICATIONS[2]
payload = msg.payload
self.assertIn('metadata', payload)
self.assertEqual(payload['metadata'], {'key3': 'value3'})
db.instance_destroy(_context, instance['uuid'])
def test_disallow_metadata_changes_during_building(self):
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
pass
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
instance = self._create_fake_instance({'vm_state': vm_states.BUILDING})
instance = dict(instance)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.delete_instance_metadata, self.context,
instance, "key")
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.update_instance_metadata, self.context,
instance, "key")
def test_get_instance_faults(self):
# Get an instances latest fault.
instance = self._create_fake_instance()
fault_fixture = {
'code': 404,
'instance_uuid': instance['uuid'],
'message': "HTTPNotFound",
'details': "Stock details for test",
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
}
def return_fault(_ctxt, instance_uuids):
return dict.fromkeys(instance_uuids, [fault_fixture])
self.stubs.Set(nova.db,
'instance_fault_get_by_instance_uuids',
return_fault)
_context = context.get_admin_context()
output = self.compute_api.get_instance_faults(_context, [instance])
expected = {instance['uuid']: [fault_fixture]}
self.assertEqual(output, expected)
db.instance_destroy(_context, instance['uuid'])
@staticmethod
def _parse_db_block_device_mapping(bdm_ref):
attr_list = ('delete_on_termination', 'device_name', 'no_device',
'virtual_name', 'volume_id', 'volume_size', 'snapshot_id')
bdm = {}
for attr in attr_list:
val = bdm_ref.get(attr, None)
if val:
bdm[attr] = val
return bdm
def test_update_block_device_mapping(self):
swap_size = ephemeral_size = 1
instance_type = {'swap': swap_size, 'ephemeral_gb': ephemeral_size}
instance = self._create_fake_instance_obj()
mappings = [
{'virtual': 'ami', 'device': 'sda1'},
{'virtual': 'root', 'device': '/dev/sda1'},
{'virtual': 'swap', 'device': 'sdb4'},
{'virtual': 'swap', 'device': 'sdb3'},
{'virtual': 'swap', 'device': 'sdb2'},
{'virtual': 'swap', 'device': 'sdb1'},
{'virtual': 'ephemeral0', 'device': 'sdc1'},
{'virtual': 'ephemeral1', 'device': 'sdc2'},
{'virtual': 'ephemeral2', 'device': 'sdc3'}]
block_device_mapping = [
# root
{'device_name': '/dev/sda1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
'delete_on_termination': False},
# overwrite swap
{'device_name': '/dev/sdb2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
{'device_name': '/dev/sdb4',
'no_device': True},
# overwrite ephemeral
{'device_name': '/dev/sdc1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False},
{'device_name': '/dev/sdc2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
'delete_on_termination': False},
{'device_name': '/dev/sdc3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
{'device_name': '/dev/sdc4',
'no_device': True},
# volume
{'device_name': '/dev/sdd1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
'delete_on_termination': False},
{'device_name': '/dev/sdd2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
{'device_name': '/dev/sdd3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
{'device_name': '/dev/sdd4',
'no_device': True}]
image_mapping = self.compute_api._prepare_image_mapping(
instance_type, mappings)
self.compute_api._update_block_device_mapping(
self.context, instance_type, instance['uuid'], image_mapping)
bdms = [block_device.BlockDeviceDict(bdm) for bdm in
db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])]
expected_result = [
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': 'swap', 'device_name': '/dev/sdb1',
'volume_size': swap_size, 'delete_on_termination': True},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': CONF.default_ephemeral_format,
'device_name': '/dev/sdc3', 'delete_on_termination': True},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': CONF.default_ephemeral_format,
'device_name': '/dev/sdc1', 'delete_on_termination': True},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': CONF.default_ephemeral_format,
'device_name': '/dev/sdc2', 'delete_on_termination': True},
]
bdms.sort(key=operator.itemgetter('device_name'))
expected_result.sort(key=operator.itemgetter('device_name'))
self.assertEqual(len(bdms), len(expected_result))
for expected, got in zip(expected_result, bdms):
self.assertThat(expected, matchers.IsSubDictOf(got))
self.compute_api._update_block_device_mapping(
self.context, flavors.get_default_flavor(),
instance['uuid'], block_device_mapping)
bdms = [block_device.BlockDeviceDict(bdm) for bdm in
db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])]
expected_result = [
{'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
'device_name': '/dev/sda1'},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': 'swap', 'device_name': '/dev/sdb1',
'volume_size': swap_size, 'delete_on_termination': True},
{'device_name': '/dev/sdb2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
{'device_name': '/dev/sdb4', 'no_device': True},
{'device_name': '/dev/sdc1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False},
{'device_name': '/dev/sdc2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
'delete_on_termination': False},
{'device_name': '/dev/sdc3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
{'no_device': True, 'device_name': '/dev/sdc4'},
{'device_name': '/dev/sdd1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
'delete_on_termination': False},
{'device_name': '/dev/sdd2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
{'device_name': '/dev/sdd3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
{'no_device': True, 'device_name': '/dev/sdd4'}]
bdms.sort(key=operator.itemgetter('device_name'))
expected_result.sort(key=operator.itemgetter('device_name'))
self.assertEqual(len(bdms), len(expected_result))
for expected, got in zip(expected_result, bdms):
self.assertThat(expected, matchers.IsSubDictOf(got))
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid']):
db.block_device_mapping_destroy(self.context, bdm['id'])
instance.refresh()
self.compute.terminate_instance(self.context, instance, [], [])
def _test_check_and_transform_bdm(self, bdms, expected_bdms,
image_bdms=None, base_options=None,
legacy_bdms=False,
legacy_image_bdms=False):
image_bdms = image_bdms or []
image_meta = {}
if image_bdms:
image_meta = {'properties': {'block_device_mapping': image_bdms}}
if not legacy_image_bdms:
image_meta['properties']['bdm_v2'] = True
base_options = base_options or {'root_device_name': 'vda',
'image_ref': FAKE_IMAGE_REF}
transformed_bdm = self.compute_api._check_and_transform_bdm(
base_options, {}, image_meta, 1, 1, bdms, legacy_bdms)
self.assertThat(expected_bdms,
matchers.DictListMatches(transformed_bdm))
def test_check_and_transform_legacy_bdm_no_image_bdms(self):
legacy_bdms = [
{'device_name': '/dev/vda',
'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False}]
expected_bdms = [block_device.BlockDeviceDict.from_legacy(
legacy_bdms[0])]
expected_bdms[0]['boot_index'] = 0
self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
legacy_bdms=True)
def test_check_and_transform_legacy_bdm_legacy_image_bdms(self):
image_bdms = [
{'device_name': '/dev/vda',
'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False}]
legacy_bdms = [
{'device_name': '/dev/vdb',
'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
'delete_on_termination': False}]
expected_bdms = [
block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]),
block_device.BlockDeviceDict.from_legacy(image_bdms[0])]
expected_bdms[0]['boot_index'] = -1
expected_bdms[1]['boot_index'] = 0
self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
image_bdms=image_bdms,
legacy_bdms=True,
legacy_image_bdms=True)
def test_check_and_transform_legacy_bdm_image_bdms(self):
legacy_bdms = [
{'device_name': '/dev/vdb',
'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
'delete_on_termination': False}]
image_bdms = [block_device.BlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
'boot_index': 0})]
expected_bdms = [
block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]),
image_bdms[0]]
expected_bdms[0]['boot_index'] = -1
self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
image_bdms=image_bdms,
legacy_bdms=True)
def test_check_and_transform_bdm_no_image_bdms(self):
bdms = [block_device.BlockDeviceDict({'source_type': 'image',
'destination_type': 'local',
'image_id': FAKE_IMAGE_REF,
'boot_index': 0})]
expected_bdms = bdms
self._test_check_and_transform_bdm(bdms, expected_bdms)
def test_check_and_transform_bdm_image_bdms(self):
bdms = [block_device.BlockDeviceDict({'source_type': 'image',
'destination_type': 'local',
'image_id': FAKE_IMAGE_REF,
'boot_index': 0})]
image_bdms = [block_device.BlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': '33333333-aaaa-bbbb-cccc-444444444444'})]
expected_bdms = bdms + image_bdms
self._test_check_and_transform_bdm(bdms, expected_bdms,
image_bdms=image_bdms)
def test_check_and_transform_bdm_legacy_image_bdms(self):
bdms = [block_device.BlockDeviceDict({'source_type': 'image',
'destination_type': 'local',
'image_id': FAKE_IMAGE_REF,
'boot_index': 0})]
image_bdms = [{'device_name': '/dev/vda',
'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False}]
expected_bdms = [block_device.BlockDeviceDict.from_legacy(
image_bdms[0])]
expected_bdms[0]['boot_index'] = 0
self._test_check_and_transform_bdm(bdms, expected_bdms,
image_bdms=image_bdms,
legacy_image_bdms=True)
def test_check_and_transform_image(self):
base_options = {'root_device_name': 'vdb',
'image_ref': FAKE_IMAGE_REF}
fake_legacy_bdms = [
{'device_name': '/dev/vda',
'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False}]
image_meta = {'properties': {'block_device_mapping': [
{'device_name': '/dev/vda',
'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333'}]}}
# We get an image BDM
transformed_bdm = self.compute_api._check_and_transform_bdm(
base_options, {}, {}, 1, 1, fake_legacy_bdms, True)
self.assertEqual(len(transformed_bdm), 2)
# No image BDM created if image already defines a root BDM
base_options['root_device_name'] = 'vda'
transformed_bdm = self.compute_api._check_and_transform_bdm(
base_options, {}, image_meta, 1, 1, [], True)
self.assertEqual(len(transformed_bdm), 1)
# No image BDM created
transformed_bdm = self.compute_api._check_and_transform_bdm(
base_options, {}, {}, 1, 1, fake_legacy_bdms, True)
self.assertEqual(len(transformed_bdm), 1)
# Volumes with multiple instances fails
self.assertRaises(exception.InvalidRequest,
self.compute_api._check_and_transform_bdm,
base_options, {}, {}, 1, 2, fake_legacy_bdms, True)
checked_bdm = self.compute_api._check_and_transform_bdm(
base_options, {}, {}, 1, 1, transformed_bdm, True)
self.assertEqual(checked_bdm, transformed_bdm)
def test_volume_size(self):
ephemeral_size = 2
swap_size = 3
volume_size = 5
swap_bdm = {'source_type': 'blank', 'guest_format': 'swap'}
ephemeral_bdm = {'source_type': 'blank', 'guest_format': None}
volume_bdm = {'source_type': 'volume', 'volume_size': volume_size}
inst_type = {'ephemeral_gb': ephemeral_size, 'swap': swap_size}
self.assertEqual(
self.compute_api._volume_size(inst_type, ephemeral_bdm),
ephemeral_size)
ephemeral_bdm['volume_size'] = 42
self.assertEqual(
self.compute_api._volume_size(inst_type, ephemeral_bdm), 42)
self.assertEqual(
self.compute_api._volume_size(inst_type, swap_bdm),
swap_size)
swap_bdm['volume_size'] = 42
self.assertEqual(
self.compute_api._volume_size(inst_type, swap_bdm), 42)
self.assertEqual(
self.compute_api._volume_size(inst_type, volume_bdm),
volume_size)
def test_is_volume_backed_instance(self):
ctxt = self.context
instance = self._create_fake_instance({'image_ref': ''})
self.assertTrue(
self.compute_api.is_volume_backed_instance(ctxt, instance, None))
instance = self._create_fake_instance({'root_device_name': 'vda'})
self.assertFalse(
self.compute_api.is_volume_backed_instance(
ctxt, instance,
block_device_obj.block_device_make_list(ctxt, [])))
bdms = block_device_obj.block_device_make_list(ctxt,
[fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume',
'device_name': '/dev/vda',
'volume_id': 'fake_volume_id',
'boot_index': 0,
'destination_type': 'volume'})])
self.assertTrue(
self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
bdms = block_device_obj.block_device_make_list(ctxt,
[fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume',
'device_name': '/dev/vda',
'volume_id': 'fake_volume_id',
'destination_type': 'local',
'boot_index': 0,
'snapshot_id': None}),
fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume',
'device_name': '/dev/vdb',
'boot_index': 1,
'destination_type': 'volume',
'volume_id': 'c2ec2156-d75e-11e2-985b-5254009297d6',
'snapshot_id': None})])
self.assertFalse(
self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
bdms = block_device_obj.block_device_make_list(ctxt,
[fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume',
'device_name': '/dev/vda',
'snapshot_id': 'de8836ac-d75e-11e2-8271-5254009297d6',
'destination_type': 'volume',
'boot_index': 0,
'volume_id': None})])
self.assertTrue(
self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
def test_is_volume_backed_instance_no_bdms(self):
ctxt = self.context
instance = self._create_fake_instance()
self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance['uuid']).AndReturn(
block_device_obj.block_device_make_list(ctxt, []))
self.mox.ReplayAll()
self.compute_api.is_volume_backed_instance(ctxt, instance, None)
def test_reservation_id_one_instance(self):
"""Verify building an instance has a reservation_id that
matches return value from create.
"""
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image')
try:
self.assertEqual(len(refs), 1)
self.assertEqual(refs[0]['reservation_id'], resv_id)
finally:
db.instance_destroy(self.context, refs[0]['uuid'])
def test_reservation_ids_two_instances(self):
"""Verify building 2 instances at once results in a
reservation_id being returned equal to reservation id set
in both instances.
"""
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
min_count=2, max_count=2)
try:
self.assertEqual(len(refs), 2)
self.assertIsNotNone(resv_id)
finally:
for instance in refs:
self.assertEqual(instance['reservation_id'], resv_id)
db.instance_destroy(self.context, refs[0]['uuid'])
def test_multi_instance_display_name_template(self):
self.flags(multi_instance_display_name_template='%(name)s')
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
min_count=2, max_count=2, display_name='x')
self.assertEqual(refs[0]['display_name'], 'x')
self.assertEqual(refs[0]['hostname'], 'x')
self.assertEqual(refs[1]['display_name'], 'x')
self.assertEqual(refs[1]['hostname'], 'x')
self.flags(multi_instance_display_name_template='%(name)s-%(count)s')
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
min_count=2, max_count=2, display_name='x')
self.assertEqual(refs[0]['display_name'], 'x-1')
self.assertEqual(refs[0]['hostname'], 'x-1')
self.assertEqual(refs[1]['display_name'], 'x-2')
self.assertEqual(refs[1]['hostname'], 'x-2')
self.flags(multi_instance_display_name_template='%(name)s-%(uuid)s')
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
min_count=2, max_count=2, display_name='x')
self.assertEqual(refs[0]['display_name'], 'x-%s' % refs[0]['uuid'])
self.assertEqual(refs[0]['hostname'], 'x-%s' % refs[0]['uuid'])
self.assertEqual(refs[1]['display_name'], 'x-%s' % refs[1]['uuid'])
self.assertEqual(refs[1]['hostname'], 'x-%s' % refs[1]['uuid'])
def test_instance_architecture(self):
# Test the instance architecture.
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['architecture'], arch.X86_64)
db.instance_destroy(self.context, i_ref['uuid'])
def test_instance_unknown_architecture(self):
# Test if the architecture is unknown.
instance = self._create_fake_instance_obj(
params={'architecture': ''})
try:
self.compute.run_instance(self.context, instance, {}, {}, None,
None, None, True, None, False)
instance = db.instance_get_by_uuid(self.context,
instance['uuid'])
self.assertNotEqual(instance['architecture'], 'Unknown')
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_instance_name_template(self):
# Test the instance_name template.
self.flags(instance_name_template='instance-%d')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], 'instance-%d' % i_ref['id'])
db.instance_destroy(self.context, i_ref['uuid'])
self.flags(instance_name_template='instance-%(uuid)s')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], 'instance-%s' % i_ref['uuid'])
db.instance_destroy(self.context, i_ref['uuid'])
self.flags(instance_name_template='%(id)d-%(uuid)s')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], '%d-%s' %
(i_ref['id'], i_ref['uuid']))
db.instance_destroy(self.context, i_ref['uuid'])
# not allowed.. default is uuid
self.flags(instance_name_template='%(name)s')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], i_ref['uuid'])
db.instance_destroy(self.context, i_ref['uuid'])
def test_add_remove_fixed_ip(self):
instance = self._create_fake_instance_obj(params={'host': CONF.host})
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.add_fixed_ip(self.context, instance, '1')
self.compute_api.remove_fixed_ip(self.context,
instance, '192.168.1.1')
self.compute_api.delete(self.context, instance)
def test_attach_volume_invalid(self):
self.assertRaises(exception.InvalidDevicePath,
self.compute_api.attach_volume,
self.context,
{'locked': False, 'vm_state': vm_states.ACTIVE,
'task_state': None,
'launched_at': timeutils.utcnow()},
None,
'/invalid')
def test_no_attach_volume_in_rescue_state(self):
def fake(*args, **kwargs):
pass
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.stubs.Set(cinder.API, 'check_attach', fake)
self.stubs.Set(cinder.API, 'reserve_volume', fake)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.attach_volume,
self.context,
{'uuid': 'fake_uuid', 'locked': False,
'vm_state': vm_states.RESCUED},
None,
'/dev/vdb')
def test_no_attach_volume_in_suspended_state(self):
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.attach_volume,
self.context,
{'uuid': 'fake_uuid', 'locked': False,
'vm_state': vm_states.SUSPENDED},
{'id': 'fake-volume-id'},
'/dev/vdb')
def test_no_detach_volume_in_rescue_state(self):
# Ensure volume can be detached from instance
params = {'vm_state': vm_states.RESCUED}
instance = self._create_fake_instance(params=params)
volume = {'id': 1, 'attach_status': 'in-use',
'instance_uuid': instance['uuid']}
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.detach_volume,
self.context, instance, volume)
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
@mock.patch.object(cinder.API, 'get')
def test_no_rescue_in_volume_state_attaching(self,
mock_get_vol,
mock_get_bdms):
# Make sure a VM cannot be rescued while volume is being attached
instance = self._create_fake_instance_obj()
bdms, volume = self._fake_rescue_block_devices(instance)
mock_get_vol.return_value = {'id': volume['id'],
'status': "attaching"}
mock_get_bdms.return_value = bdms
self.assertRaises(exception.InvalidVolume,
self.compute_api.rescue, self.context, instance)
def test_vnc_console(self):
# Make sure we can a vnc console for an instance.
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_console_type = "novnc"
fake_connect_info = {'token': 'fake_token',
'console_type': fake_console_type,
'host': 'fake_console_host',
'port': 'fake_console_port',
'internal_access_path': 'fake_access_path',
'instance_uuid': fake_instance['uuid'],
'access_url': 'fake_console_url'}
rpcapi = compute_rpcapi.ComputeAPI
self.mox.StubOutWithMock(rpcapi, 'get_vnc_console')
rpcapi.get_vnc_console(
self.context, instance=fake_instance,
console_type=fake_console_type).AndReturn(fake_connect_info)
self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
'authorize_console')
self.compute_api.consoleauth_rpcapi.authorize_console(
self.context, 'fake_token', fake_console_type, 'fake_console_host',
'fake_console_port', 'fake_access_path', 'fake_uuid')
self.mox.ReplayAll()
console = self.compute_api.get_vnc_console(self.context,
fake_instance, fake_console_type)
self.assertEqual(console, {'url': 'fake_console_url'})
def test_get_vnc_console_no_host(self):
instance = self._create_fake_instance(params={'host': ''})
self.assertRaises(exception.InstanceNotReady,
self.compute_api.get_vnc_console,
self.context, instance, 'novnc')
db.instance_destroy(self.context, instance['uuid'])
def test_spice_console(self):
# Make sure we can a spice console for an instance.
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_console_type = "spice-html5"
fake_connect_info = {'token': 'fake_token',
'console_type': fake_console_type,
'host': 'fake_console_host',
'port': 'fake_console_port',
'internal_access_path': 'fake_access_path',
'instance_uuid': fake_instance['uuid'],
'access_url': 'fake_console_url'}
rpcapi = compute_rpcapi.ComputeAPI
self.mox.StubOutWithMock(rpcapi, 'get_spice_console')
rpcapi.get_spice_console(
self.context, instance=fake_instance,
console_type=fake_console_type).AndReturn(fake_connect_info)
self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
'authorize_console')
self.compute_api.consoleauth_rpcapi.authorize_console(
self.context, 'fake_token', fake_console_type, 'fake_console_host',
'fake_console_port', 'fake_access_path', 'fake_uuid')
self.mox.ReplayAll()
console = self.compute_api.get_spice_console(self.context,
fake_instance, fake_console_type)
self.assertEqual(console, {'url': 'fake_console_url'})
def test_get_spice_console_no_host(self):
instance = self._create_fake_instance(params={'host': ''})
self.assertRaises(exception.InstanceNotReady,
self.compute_api.get_spice_console,
self.context, instance, 'spice')
db.instance_destroy(self.context, instance['uuid'])
def test_rdp_console(self):
# Make sure we can a rdp console for an instance.
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_console_type = "rdp-html5"
fake_connect_info = {'token': 'fake_token',
'console_type': fake_console_type,
'host': 'fake_console_host',
'port': 'fake_console_port',
'internal_access_path': 'fake_access_path',
'instance_uuid': fake_instance['uuid'],
'access_url': 'fake_console_url'}
rpcapi = compute_rpcapi.ComputeAPI
self.mox.StubOutWithMock(rpcapi, 'get_rdp_console')
rpcapi.get_rdp_console(
self.context, instance=fake_instance,
console_type=fake_console_type).AndReturn(fake_connect_info)
self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
'authorize_console')
self.compute_api.consoleauth_rpcapi.authorize_console(
self.context, 'fake_token', fake_console_type, 'fake_console_host',
'fake_console_port', 'fake_access_path', 'fake_uuid')
self.mox.ReplayAll()
console = self.compute_api.get_rdp_console(self.context,
fake_instance, fake_console_type)
self.assertEqual(console, {'url': 'fake_console_url'})
def test_get_rdp_console_no_host(self):
instance = self._create_fake_instance(params={'host': ''})
self.assertRaises(exception.InstanceNotReady,
self.compute_api.get_rdp_console,
self.context, instance, 'rdp')
db.instance_destroy(self.context, instance['uuid'])
def test_serial_console(self):
# Make sure we can get a serial proxy url for an instance.
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_console_type = 'serial'
fake_connect_info = {'token': 'fake_token',
'console_type': fake_console_type,
'host': 'fake_serial_host',
'port': 'fake_tcp_port',
'internal_access_path': 'fake_access_path',
'instance_uuid': fake_instance['uuid'],
'access_url': 'fake_access_url'}
rpcapi = compute_rpcapi.ComputeAPI
with contextlib.nested(
mock.patch.object(rpcapi, 'get_serial_console',
return_value=fake_connect_info),
mock.patch.object(self.compute_api.consoleauth_rpcapi,
'authorize_console')
) as (mock_get_serial_console, mock_authorize_console):
self.compute_api.consoleauth_rpcapi.authorize_console(
self.context, 'fake_token', fake_console_type,
'fake_serial_host', 'fake_tcp_port',
'fake_access_path', 'fake_uuid')
console = self.compute_api.get_serial_console(self.context,
fake_instance,
fake_console_type)
self.assertEqual(console, {'url': 'fake_access_url'})
def test_get_serial_console_no_host(self):
# Make sure an exception is raised when instance is not Active.
instance = self._create_fake_instance(params={'host': ''})
self.assertRaises(exception.InstanceNotReady,
self.compute_api.get_serial_console,
self.context, instance, 'serial')
db.instance_destroy(self.context, instance['uuid'])
def test_console_output(self):
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_tail_length = 699
fake_console_output = 'fake console output'
rpcapi = compute_rpcapi.ComputeAPI
self.mox.StubOutWithMock(rpcapi, 'get_console_output')
rpcapi.get_console_output(
self.context, instance=fake_instance,
tail_length=fake_tail_length).AndReturn(fake_console_output)
self.mox.ReplayAll()
output = self.compute_api.get_console_output(self.context,
fake_instance, tail_length=fake_tail_length)
self.assertEqual(output, fake_console_output)
def test_console_output_no_host(self):
instance = self._create_fake_instance(params={'host': ''})
self.assertRaises(exception.InstanceNotReady,
self.compute_api.get_console_output,
self.context, instance)
db.instance_destroy(self.context, instance['uuid'])
def test_attach_interface(self):
new_type = flavors.get_flavor_by_flavor_id('4')
sys_meta = flavors.save_flavor_info({}, new_type)
instance = objects.Instance(image_ref='foo',
system_metadata=sys_meta)
self.mox.StubOutWithMock(self.compute.network_api,
'allocate_port_for_instance')
nwinfo = [fake_network_cache_model.new_vif()]
network_id = nwinfo[0]['network']['id']
port_id = nwinfo[0]['id']
req_ip = '1.2.3.4'
self.compute.network_api.allocate_port_for_instance(
self.context, instance, port_id, network_id, req_ip
).AndReturn(nwinfo)
self.mox.ReplayAll()
vif = self.compute.attach_interface(self.context,
instance,
network_id,
port_id,
req_ip)
self.assertEqual(vif['id'], network_id)
return nwinfo, port_id
def test_detach_interface(self):
nwinfo, port_id = self.test_attach_interface()
self.stubs.Set(self.compute.network_api,
'deallocate_port_for_instance',
lambda a, b, c: [])
instance = objects.Instance()
instance.info_cache = objects.InstanceInfoCache.new(
self.context, 'fake-uuid')
instance.info_cache.network_info = network_model.NetworkInfo.hydrate(
nwinfo)
self.compute.detach_interface(self.context, instance, port_id)
self.assertEqual(self.compute.driver._interfaces, {})
def test_attach_volume(self):
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 'fake-volume-id', 'device_name': '/dev/vdb'})
bdm = block_device_obj.BlockDeviceMapping()._from_db_object(
self.context,
block_device_obj.BlockDeviceMapping(),
fake_bdm)
instance = self._create_fake_instance()
fake_volume = {'id': 'fake-volume-id'}
with contextlib.nested(
mock.patch.object(cinder.API, 'get', return_value=fake_volume),
mock.patch.object(cinder.API, 'check_attach'),
mock.patch.object(cinder.API, 'reserve_volume'),
mock.patch.object(compute_rpcapi.ComputeAPI,
'reserve_block_device_name', return_value=bdm),
mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
) as (mock_get, mock_check_attach, mock_reserve_vol, mock_reserve_bdm,
mock_attach):
self.compute_api.attach_volume(
self.context, instance, 'fake-volume-id',
'/dev/vdb', 'ide', 'cdrom')
mock_reserve_bdm.assert_called_once_with(
self.context, instance, '/dev/vdb', 'fake-volume-id',
disk_bus='ide', device_type='cdrom')
self.assertEqual(mock_get.call_args,
mock.call(self.context, 'fake-volume-id'))
self.assertEqual(mock_check_attach.call_args,
mock.call(
self.context, fake_volume, instance=instance))
mock_reserve_vol.assert_called_once_with(
self.context, 'fake-volume-id')
a, kw = mock_attach.call_args
self.assertEqual(kw['volume_id'], 'fake-volume-id')
self.assertEqual(kw['mountpoint'], '/dev/vdb')
self.assertEqual(kw['bdm'].device_name, '/dev/vdb')
self.assertEqual(kw['bdm'].volume_id, 'fake-volume-id')
def test_attach_volume_no_device(self):
called = {}
def fake_check_attach(*args, **kwargs):
called['fake_check_attach'] = True
def fake_reserve_volume(*args, **kwargs):
called['fake_reserve_volume'] = True
def fake_volume_get(self, context, volume_id):
called['fake_volume_get'] = True
return {'id': volume_id}
def fake_rpc_attach_volume(self, context, **kwargs):
called['fake_rpc_attach_volume'] = True
def fake_rpc_reserve_block_device_name(self, context, instance, device,
volume_id, **kwargs):
called['fake_rpc_reserve_block_device_name'] = True
bdm = block_device_obj.BlockDeviceMapping()
bdm['device_name'] = '/dev/vdb'
return bdm
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
self.stubs.Set(cinder.API, 'reserve_volume',
fake_reserve_volume)
self.stubs.Set(compute_rpcapi.ComputeAPI,
'reserve_block_device_name',
fake_rpc_reserve_block_device_name)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
fake_rpc_attach_volume)
instance = self._create_fake_instance()
self.compute_api.attach_volume(self.context, instance, 1, device=None)
self.assertTrue(called.get('fake_check_attach'))
self.assertTrue(called.get('fake_reserve_volume'))
self.assertTrue(called.get('fake_volume_get'))
self.assertTrue(called.get('fake_rpc_reserve_block_device_name'))
self.assertTrue(called.get('fake_rpc_attach_volume'))
def test_detach_volume(self):
# Ensure volume can be detached from instance
called = {}
instance = self._create_fake_instance()
volume = {'id': 1, 'attach_status': 'in-use',
'instance_uuid': instance['uuid']}
def fake_check_detach(*args, **kwargs):
called['fake_check_detach'] = True
def fake_begin_detaching(*args, **kwargs):
called['fake_begin_detaching'] = True
def fake_rpc_detach_volume(self, context, **kwargs):
called['fake_rpc_detach_volume'] = True
self.stubs.Set(cinder.API, 'check_detach', fake_check_detach)
self.stubs.Set(cinder.API, 'begin_detaching', fake_begin_detaching)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'detach_volume',
fake_rpc_detach_volume)
self.compute_api.detach_volume(self.context,
instance, volume)
self.assertTrue(called.get('fake_check_detach'))
self.assertTrue(called.get('fake_begin_detaching'))
self.assertTrue(called.get('fake_rpc_detach_volume'))
def test_detach_invalid_volume(self):
# Ensure exception is raised while detaching an un-attached volume
instance = {'uuid': 'uuid1',
'locked': False,
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE,
'task_state': None}
volume = {'id': 1, 'attach_status': 'detached'}
self.assertRaises(exception.InvalidVolume,
self.compute_api.detach_volume, self.context,
instance, volume)
def test_detach_unattached_volume(self):
# Ensure exception is raised when volume's idea of attached
# instance doesn't match.
instance = {'uuid': 'uuid1',
'locked': False,
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE,
'task_state': None}
volume = {'id': 1, 'attach_status': 'in-use',
'instance_uuid': 'uuid2'}
self.assertRaises(exception.VolumeUnattached,
self.compute_api.detach_volume, self.context,
instance, volume)
def test_detach_suspended_instance_fails(self):
instance = {'uuid': 'uuid1',
'locked': False,
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.SUSPENDED,
'task_state': None}
volume = {'id': 1, 'attach_status': 'in-use',
'instance_uuid': 'uuid2'}
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.detach_volume, self.context,
instance, volume)
def test_detach_volume_libvirt_is_down(self):
# Ensure rollback during detach if libvirt goes down
called = {}
instance = self._create_fake_instance()
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
{'device_name': '/dev/vdb', 'volume_id': 1,
'source_type': 'snapshot', 'destination_type': 'volume',
'connection_info': '{"test": "test"}'})
def fake_libvirt_driver_instance_exists(_instance):
called['fake_libvirt_driver_instance_exists'] = True
return False
def fake_libvirt_driver_detach_volume_fails(*args, **kwargs):
called['fake_libvirt_driver_detach_volume_fails'] = True
raise AttributeError()
def fake_roll_detaching(*args, **kwargs):
called['fake_roll_detaching'] = True
self.stubs.Set(cinder.API, 'roll_detaching', fake_roll_detaching)
self.stubs.Set(self.compute.driver, "instance_exists",
fake_libvirt_driver_instance_exists)
self.stubs.Set(self.compute.driver, "detach_volume",
fake_libvirt_driver_detach_volume_fails)
self.mox.StubOutWithMock(objects.BlockDeviceMapping,
'get_by_volume_id')
objects.BlockDeviceMapping.get_by_volume_id(
self.context, 1).AndReturn(objects.BlockDeviceMapping(
**fake_bdm))
self.mox.ReplayAll()
self.assertRaises(AttributeError, self.compute.detach_volume,
self.context, 1, instance)
self.assertTrue(called.get('fake_libvirt_driver_instance_exists'))
self.assertTrue(called.get('fake_roll_detaching'))
def test_terminate_with_volumes(self):
# Make sure that volumes get detached during instance termination.
admin = context.get_admin_context()
instance = self._create_fake_instance_obj()
volume_id = 'fake'
values = {'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc',
'delete_on_termination': False,
'volume_id': volume_id,
}
db.block_device_mapping_create(admin, values)
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
self.stubs.Set(cinder.API, "get", fake_volume_get)
# Stub out and record whether it gets detached
result = {"detached": False}
def fake_detach(self, context, volume_id_param):
result["detached"] = volume_id_param == volume_id
self.stubs.Set(cinder.API, "detach", fake_detach)
def fake_terminate_connection(self, context, volume_id, connector):
return {}
self.stubs.Set(cinder.API, "terminate_connection",
fake_terminate_connection)
# Kill the instance and check that it was detached
bdms = db.block_device_mapping_get_all_by_instance(admin,
instance['uuid'])
self.compute.terminate_instance(admin, instance, bdms, [])
self.assertTrue(result["detached"])
def test_terminate_deletes_all_bdms(self):
admin = context.get_admin_context()
instance = self._create_fake_instance_obj()
img_bdm = {'instance_uuid': instance['uuid'],
'device_name': '/dev/vda',
'source_type': 'image',
'destination_type': 'local',
'delete_on_termination': False,
'boot_index': 0,
'image_id': 'fake_image'}
vol_bdm = {'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc',
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': False,
'volume_id': 'fake_vol'}
bdms = []
for bdm in img_bdm, vol_bdm:
bdm_obj = objects.BlockDeviceMapping(**bdm)
bdm_obj.create(admin)
bdms.append(bdm_obj)
self.stubs.Set(self.compute, 'volume_api', mox.MockAnything())
self.stubs.Set(self.compute, '_prep_block_device', mox.MockAnything())
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
self.compute.terminate_instance(self.context, instance, bdms, [])
bdms = db.block_device_mapping_get_all_by_instance(admin,
instance['uuid'])
self.assertEqual(len(bdms), 0)
def test_inject_network_info(self):
instance = self._create_fake_instance_obj(params={'host': CONF.host})
self.compute.run_instance(self.context,
instance, {}, {}, None, None,
None, True, None, False)
instance = self.compute_api.get(self.context, instance['uuid'],
want_objects=True)
self.compute_api.inject_network_info(self.context, instance)
self.stubs.Set(self.compute_api.network_api,
'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.delete(self.context, instance)
def test_reset_network(self):
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, None, None,
None, True, None, False)
instance = self.compute_api.get(self.context, instance['uuid'],
want_objects=True)
self.compute_api.reset_network(self.context, instance)
def test_lock(self):
instance = self._create_fake_instance_obj()
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.lock(self.context, instance)
def test_unlock(self):
instance = self._create_fake_instance_obj()
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.unlock(self.context, instance)
def test_get_lock(self):
instance = self._create_fake_instance()
self.assertFalse(self.compute_api.get_lock(self.context, instance))
db.instance_update(self.context, instance['uuid'], {'locked': True})
self.assertTrue(self.compute_api.get_lock(self.context, instance))
def test_add_remove_security_group(self):
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
instance, {}, {}, None, None,
None, True, None, False)
instance = self.compute_api.get(self.context, instance['uuid'])
security_group_name = self._create_group()['name']
self.security_group_api.add_to_instance(self.context,
instance,
security_group_name)
self.security_group_api.remove_from_instance(self.context,
instance,
security_group_name)
def test_get_diagnostics(self):
instance = self._create_fake_instance_obj()
rpcapi = compute_rpcapi.ComputeAPI
self.mox.StubOutWithMock(rpcapi, 'get_diagnostics')
rpcapi.get_diagnostics(self.context, instance=instance)
self.mox.ReplayAll()
self.compute_api.get_diagnostics(self.context, instance)
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.delete(self.context, instance)
def test_get_instance_diagnostics(self):
instance = self._create_fake_instance_obj()
rpcapi = compute_rpcapi.ComputeAPI
self.mox.StubOutWithMock(rpcapi, 'get_instance_diagnostics')
rpcapi.get_instance_diagnostics(self.context, instance=instance)
self.mox.ReplayAll()
self.compute_api.get_instance_diagnostics(self.context, instance)
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.delete(self.context, instance)
def test_secgroup_refresh(self):
instance = self._create_fake_instance()
def rule_get(*args, **kwargs):
mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(
self.compute_api.db,
'security_group_rule_get_by_security_group_grantee',
rule_get)
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
rpcapi = self.security_group_api.security_group_rpcapi
self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
rpcapi.refresh_instance_security_rules(self.context,
instance['host'],
instance)
self.mox.ReplayAll()
self.security_group_api.trigger_members_refresh(self.context, [1])
def test_secgroup_refresh_once(self):
instance = self._create_fake_instance()
def rule_get(*args, **kwargs):
mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(
self.compute_api.db,
'security_group_rule_get_by_security_group_grantee',
rule_get)
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
rpcapi = self.security_group_api.security_group_rpcapi
self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
rpcapi.refresh_instance_security_rules(self.context,
instance['host'],
instance)
self.mox.ReplayAll()
self.security_group_api.trigger_members_refresh(self.context, [1, 2])
def test_secgroup_refresh_none(self):
def rule_get(*args, **kwargs):
mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': []})
return mock_group
self.stubs.Set(
self.compute_api.db,
'security_group_rule_get_by_security_group_grantee',
rule_get)
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
rpcapi = self.security_group_api.security_group_rpcapi
self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
self.mox.ReplayAll()
self.security_group_api.trigger_members_refresh(self.context, [1])
def test_secrule_refresh(self):
instance = self._create_fake_instance()
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
rpcapi = self.security_group_api.security_group_rpcapi
self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
rpcapi.refresh_instance_security_rules(self.context,
instance['host'],
instance)
self.mox.ReplayAll()
self.security_group_api.trigger_rules_refresh(self.context, [1])
def test_secrule_refresh_once(self):
instance = self._create_fake_instance()
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
rpcapi = self.security_group_api.security_group_rpcapi
self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
rpcapi.refresh_instance_security_rules(self.context,
instance['host'],
instance)
self.mox.ReplayAll()
self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
def test_secrule_refresh_none(self):
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': []})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
rpcapi = self.security_group_api.security_group_rpcapi
self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
self.mox.ReplayAll()
self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
def test_live_migrate(self):
instance, instance_uuid = self._run_instance()
rpcapi = self.compute_api.compute_task_api
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(rpcapi, 'live_migrate_instance')
self.compute_api._record_action_start(self.context, instance,
'live-migration')
rpcapi.live_migrate_instance(self.context, instance, 'fake_dest_host',
block_migration=True,
disk_over_commit=True)
self.mox.ReplayAll()
self.compute_api.live_migrate(self.context, instance,
block_migration=True,
disk_over_commit=True,
host_name='fake_dest_host')
instance.refresh()
self.assertEqual(instance['task_state'], task_states.MIGRATING)
def test_evacuate(self):
instance = self._create_fake_instance_obj(services=True)
self.assertIsNone(instance.task_state)
def fake_service_is_up(*args, **kwargs):
return False
def fake_rebuild_instance(*args, **kwargs):
instance.host = kwargs['host']
instance.save()
self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
fake_service_is_up)
self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance',
fake_rebuild_instance)
self.compute_api.evacuate(self.context.elevated(),
instance,
host='fake_dest_host',
on_shared_storage=True,
admin_password=None)
instance.refresh()
self.assertEqual(instance.task_state, task_states.REBUILDING)
self.assertEqual(instance.host, 'fake_dest_host')
instance.destroy()
def test_fail_evacuate_from_non_existing_host(self):
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['launched_at'] = timeutils.utcnow()
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
inst['node'] = NODENAME
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = arch.X86_64
inst['os_type'] = 'Linux'
instance = self._create_fake_instance_obj(inst)
self.assertIsNone(instance.task_state)
self.assertRaises(exception.ComputeHostNotFound,
self.compute_api.evacuate, self.context.elevated(), instance,
host='fake_dest_host', on_shared_storage=True,
admin_password=None)
instance.destroy()
def test_fail_evacuate_from_running_host(self):
instance = self._create_fake_instance_obj(services=True)
self.assertIsNone(instance.task_state)
def fake_service_is_up(*args, **kwargs):
return True
self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
fake_service_is_up)
self.assertRaises(exception.ComputeServiceInUse,
self.compute_api.evacuate, self.context.elevated(), instance,
host='fake_dest_host', on_shared_storage=True,
admin_password=None)
instance.destroy()
def test_fail_evacuate_instance_in_wrong_state(self):
states = [vm_states.BUILDING, vm_states.PAUSED, vm_states.SUSPENDED,
vm_states.RESCUED, vm_states.RESIZED, vm_states.SOFT_DELETED,
vm_states.DELETED]
instances = [self._create_fake_instance_obj({'vm_state': state})
for state in states]
for instance in instances:
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.evacuate, self.context, instance,
host='fake_dest_host', on_shared_storage=True,
admin_password=None)
instance.destroy()
def test_get_migrations(self):
migration = test_migration.fake_db_migration(uuid="1234")
filters = {'host': 'host1'}
self.mox.StubOutWithMock(db, "migration_get_all_by_filters")
db.migration_get_all_by_filters(self.context,
filters).AndReturn([migration])
self.mox.ReplayAll()
migrations = self.compute_api.get_migrations(self.context,
filters)
self.assertEqual(1, len(migrations))
self.assertEqual(migrations[0].id, migration['id'])
def fake_rpc_method(context, method, **kwargs):
pass
def _create_service_entries(context, values=[['avail_zone1', ['fake_host1',
'fake_host2']],
['avail_zone2', ['fake_host3']]]):
for (avail_zone, hosts) in values:
for host in hosts:
db.service_create(context,
{'host': host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0})
return values
class ComputeAPIAggrTestCase(BaseTestCase):
"""This is for unit coverage of aggregate-related methods
defined in nova.compute.api.
"""
def setUp(self):
super(ComputeAPIAggrTestCase, self).setUp()
self.api = compute_api.AggregateAPI()
self.context = context.get_admin_context()
self.stubs.Set(self.api.compute_rpcapi.client, 'call', fake_rpc_method)
self.stubs.Set(self.api.compute_rpcapi.client, 'cast', fake_rpc_method)
def test_aggregate_no_zone(self):
# Ensure we can create an aggregate without an availability zone
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
None)
self.api.delete_aggregate(self.context, aggr['id'])
db.aggregate_get(self.context.elevated(read_deleted='yes'),
aggr['id'])
self.assertRaises(exception.AggregateNotFound,
self.api.delete_aggregate, self.context, aggr['id'])
def test_check_az_for_aggregate(self):
# Ensure all conflict hosts can be returned
values = _create_service_entries(self.context)
fake_zone = values[0][0]
fake_host1 = values[0][1][0]
fake_host2 = values[0][1][1]
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host1)
aggr1 = self._init_aggregate_with_host(aggr1, None, None, fake_host2)
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host2)
aggr2 = self._init_aggregate_with_host(aggr2, None, None, fake_host1)
metadata = {'availability_zone': 'another_zone'}
self.assertRaises(exception.InvalidAggregateAction,
self.api.update_aggregate,
self.context, aggr2['id'], metadata)
def test_update_aggregate(self):
# Ensure metadata can be updated.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
fake_notifier.NOTIFICATIONS = []
aggr = self.api.update_aggregate(self.context, aggr['id'],
{'name': 'new_fake_aggregate'})
self.assertIsNone(availability_zones._get_cache().get('cache'))
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updateprop.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updateprop.end')
def test_update_aggregate_no_az(self):
# Ensure metadata without availability zone can be
# updated,even the aggregate contains hosts belong
# to another availability zone
values = _create_service_entries(self.context)
fake_zone = values[0][0]
fake_host = values[0][1][0]
self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host)
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host)
metadata = {'name': 'new_fake_aggregate'}
fake_notifier.NOTIFICATIONS = []
aggr2 = self.api.update_aggregate(self.context, aggr2['id'],
metadata)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updateprop.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updateprop.end')
def test_update_aggregate_az_change(self):
# Ensure availability zone can be updated,
# when the aggregate is the only one with
# availability zone
values = _create_service_entries(self.context)
fake_zone = values[0][0]
fake_host = values[0][1][0]
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host)
self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host)
metadata = {'availability_zone': 'new_fake_zone'}
fake_notifier.NOTIFICATIONS = []
aggr1 = self.api.update_aggregate(self.context, aggr1['id'],
metadata)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.end')
def test_update_aggregate_az_fails(self):
# Ensure aggregate's availability zone can't be updated,
# when aggregate has hosts in other availability zone
fake_notifier.NOTIFICATIONS = []
values = _create_service_entries(self.context)
fake_zone = values[0][0]
fake_host = values[0][1][0]
self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host)
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host)
metadata = {'availability_zone': 'another_zone'}
self.assertRaises(exception.InvalidAggregateAction,
self.api.update_aggregate,
self.context, aggr2['id'], metadata)
fake_host2 = values[0][1][1]
aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3',
None, fake_host2)
metadata = {'availability_zone': fake_zone}
aggr3 = self.api.update_aggregate(self.context, aggr3['id'],
metadata)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15)
msg = fake_notifier.NOTIFICATIONS[13]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.start')
msg = fake_notifier.NOTIFICATIONS[14]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.end')
def test_update_aggregate_az_fails_with_nova_az(self):
# Ensure aggregate's availability zone can't be updated,
# when aggregate has hosts in other availability zone
fake_notifier.NOTIFICATIONS = []
values = _create_service_entries(self.context)
fake_host = values[0][1][0]
self._init_aggregate_with_host(None, 'fake_aggregate1',
CONF.default_availability_zone,
fake_host)
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host)
metadata = {'availability_zone': 'another_zone'}
self.assertRaises(exception.InvalidAggregateAction,
self.api.update_aggregate,
self.context, aggr2['id'], metadata)
def test_update_aggregate_metadata(self):
# Ensure metadata can be updated.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
metadata = {'foo_key1': 'foo_value1',
'foo_key2': 'foo_value2',
'availability_zone': 'fake_zone'}
fake_notifier.NOTIFICATIONS = []
availability_zones._get_cache().add('fake_key', 'fake_value')
aggr = self.api.update_aggregate_metadata(self.context, aggr['id'],
metadata)
self.assertIsNone(availability_zones._get_cache().get('fake_key'))
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.end')
fake_notifier.NOTIFICATIONS = []
metadata['foo_key1'] = None
expected_payload_meta_data = {'foo_key1': None,
'foo_key2': 'foo_value2',
'availability_zone': 'fake_zone'}
expected = self.api.update_aggregate_metadata(self.context,
aggr['id'], metadata)
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('aggregate.updatemetadata.start', msg.event_type)
self.assertEqual(expected_payload_meta_data, msg.payload['meta_data'])
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual('aggregate.updatemetadata.end', msg.event_type)
self.assertEqual(expected_payload_meta_data, msg.payload['meta_data'])
self.assertThat(expected['metadata'],
matchers.DictMatches({'availability_zone': 'fake_zone',
'foo_key2': 'foo_value2'}))
def test_update_aggregate_metadata_no_az(self):
# Ensure metadata without availability zone can be
# updated,even the aggregate contains hosts belong
# to another availability zone
values = _create_service_entries(self.context)
fake_zone = values[0][0]
fake_host = values[0][1][0]
self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host)
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host)
metadata = {'foo_key2': 'foo_value3'}
fake_notifier.NOTIFICATIONS = []
aggr2 = self.api.update_aggregate_metadata(self.context, aggr2['id'],
metadata)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.end')
self.assertThat(aggr2['metadata'],
matchers.DictMatches({'foo_key2': 'foo_value3'}))
def test_update_aggregate_metadata_az_change(self):
# Ensure availability zone can be updated,
# when the aggregate is the only one with
# availability zone
values = _create_service_entries(self.context)
fake_zone = values[0][0]
fake_host = values[0][1][0]
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host)
self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host)
metadata = {'availability_zone': 'new_fake_zone'}
fake_notifier.NOTIFICATIONS = []
aggr1 = self.api.update_aggregate_metadata(self.context,
aggr1['id'], metadata)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.end')
def test_update_aggregate_az_do_not_replace_existing_metadata(self):
# Ensure that that update of the aggregate availability zone
# does not replace the aggregate existing metadata
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
metadata = {'foo_key1': 'foo_value1'}
aggr = self.api.update_aggregate_metadata(self.context,
aggr['id'],
metadata)
metadata = {'availability_zone': 'new_fake_zone'}
aggr = self.api.update_aggregate(self.context,
aggr['id'],
metadata)
self.assertThat(aggr['metadata'], matchers.DictMatches(
{'availability_zone': 'new_fake_zone', 'foo_key1': 'foo_value1'}))
def test_update_aggregate_metadata_az_fails(self):
# Ensure aggregate's availability zone can't be updated,
# when aggregate has hosts in other availability zone
fake_notifier.NOTIFICATIONS = []
values = _create_service_entries(self.context)
fake_zone = values[0][0]
fake_host = values[0][1][0]
self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host)
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host)
metadata = {'availability_zone': 'another_zone'}
self.assertRaises(exception.InvalidAggregateAction,
self.api.update_aggregate_metadata,
self.context, aggr2['id'], metadata)
aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3',
None, fake_host)
metadata = {'availability_zone': fake_zone}
aggr3 = self.api.update_aggregate_metadata(self.context,
aggr3['id'],
metadata)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15)
msg = fake_notifier.NOTIFICATIONS[13]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.start')
msg = fake_notifier.NOTIFICATIONS[14]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.end')
def test_delete_aggregate(self):
# Ensure we can delete an aggregate.
fake_notifier.NOTIFICATIONS = []
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.create.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.create.end')
fake_notifier.NOTIFICATIONS = []
self.api.delete_aggregate(self.context, aggr['id'])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.delete.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.delete.end')
db.aggregate_get(self.context.elevated(read_deleted='yes'),
aggr['id'])
self.assertRaises(exception.AggregateNotFound,
self.api.delete_aggregate, self.context, aggr['id'])
def test_delete_non_empty_aggregate(self):
# Ensure InvalidAggregateAction is raised when non empty aggregate.
_create_service_entries(self.context,
[['fake_availability_zone', ['fake_host']]])
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_availability_zone')
self.api.add_host_to_aggregate(self.context, aggr['id'], 'fake_host')
self.assertRaises(exception.InvalidAggregateAction,
self.api.delete_aggregate, self.context, aggr['id'])
def test_add_host_to_aggregate(self):
# Ensure we can add a host to an aggregate.
values = _create_service_entries(self.context)
fake_zone = values[0][0]
fake_host = values[0][1][0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
def fake_add_aggregate_host(*args, **kwargs):
hosts = kwargs["aggregate"]["hosts"]
self.assertIn(fake_host, hosts)
self.stubs.Set(self.api.compute_rpcapi, 'add_aggregate_host',
fake_add_aggregate_host)
self.mox.StubOutWithMock(availability_zones,
'update_host_availability_zone_cache')
def _stub_update_host_avail_zone_cache(host, az=None):
if az is not None:
availability_zones.update_host_availability_zone_cache(
self.context, host, az)
else:
availability_zones.update_host_availability_zone_cache(
self.context, host)
for (avail_zone, hosts) in values:
for host in hosts:
_stub_update_host_avail_zone_cache(
host, CONF.default_availability_zone)
_stub_update_host_avail_zone_cache(fake_host)
self.mox.ReplayAll()
fake_notifier.NOTIFICATIONS = []
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], fake_host)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.addhost.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.addhost.end')
self.assertEqual(len(aggr['hosts']), 1)
def test_add_host_to_aggr_with_no_az(self):
values = _create_service_entries(self.context)
fake_zone = values[0][0]
fake_host = values[0][1][0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
aggr = self.api.add_host_to_aggregate(self.context, aggr['id'],
fake_host)
aggr_no_az = self.api.create_aggregate(self.context, 'fake_aggregate2',
None)
aggr_no_az = self.api.add_host_to_aggregate(self.context,
aggr_no_az['id'],
fake_host)
self.assertIn(fake_host, aggr['hosts'])
self.assertIn(fake_host, aggr_no_az['hosts'])
def test_add_host_no_az_metadata(self):
# NOTE(mtreinish) based on how create works this is not how the
# the metadata is supposed to end up in the database but it has
# been seen. See lp bug #1209007. This test just confirms that
# the host is still added to the aggregate if there is no
# availability zone metadata.
def fake_aggregate_metadata_get_by_metadata_key(*args, **kwargs):
return {'meta_key': 'fake_value'}
self.stubs.Set(self.compute.db,
'aggregate_metadata_get_by_metadata_key',
fake_aggregate_metadata_get_by_metadata_key)
values = _create_service_entries(self.context)
fake_zone = values[0][0]
fake_host = values[0][1][0]
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
fake_zone)
aggr = self.api.add_host_to_aggregate(self.context, aggr['id'],
fake_host)
self.assertIn(fake_host, aggr['hosts'])
def test_add_host_to_multi_az(self):
# Ensure we can't add a host to different availability zone
values = _create_service_entries(self.context)
fake_zone = values[0][0]
fake_host = values[0][1][0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], fake_host)
self.assertEqual(len(aggr['hosts']), 1)
fake_zone2 = "another_zone"
aggr2 = self.api.create_aggregate(self.context,
'fake_aggregate2', fake_zone2)
self.assertRaises(exception.InvalidAggregateAction,
self.api.add_host_to_aggregate,
self.context, aggr2['id'], fake_host)
def test_add_host_to_multi_az_with_nova_agg(self):
# Ensure we can't add a host if already existing in an agg with AZ set
# to default
values = _create_service_entries(self.context)
fake_host = values[0][1][0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate',
CONF.default_availability_zone)
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], fake_host)
self.assertEqual(len(aggr['hosts']), 1)
fake_zone2 = "another_zone"
aggr2 = self.api.create_aggregate(self.context,
'fake_aggregate2', fake_zone2)
self.assertRaises(exception.InvalidAggregateAction,
self.api.add_host_to_aggregate,
self.context, aggr2['id'], fake_host)
def test_add_host_to_aggregate_multiple(self):
# Ensure we can add multiple hosts to an aggregate.
values = _create_service_entries(self.context)
fake_zone = values[0][0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
for host in values[0][1]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], host)
self.assertEqual(len(aggr['hosts']), len(values[0][1]))
def test_add_host_to_aggregate_raise_not_found(self):
# Ensure ComputeHostNotFound is raised when adding invalid host.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
fake_notifier.NOTIFICATIONS = []
self.assertRaises(exception.ComputeHostNotFound,
self.api.add_host_to_aggregate,
self.context, aggr['id'], 'invalid_host')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
self.assertEqual(fake_notifier.NOTIFICATIONS[1].publisher_id,
'compute.fake-mini')
def test_remove_host_from_aggregate_active(self):
# Ensure we can remove a host from an aggregate.
values = _create_service_entries(self.context)
fake_zone = values[0][0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
for host in values[0][1]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], host)
host_to_remove = values[0][1][0]
def fake_remove_aggregate_host(*args, **kwargs):
hosts = kwargs["aggregate"]["hosts"]
self.assertNotIn(host_to_remove, hosts)
self.stubs.Set(self.api.compute_rpcapi, 'remove_aggregate_host',
fake_remove_aggregate_host)
self.mox.StubOutWithMock(availability_zones,
'update_host_availability_zone_cache')
availability_zones.update_host_availability_zone_cache(self.context,
host_to_remove)
self.mox.ReplayAll()
fake_notifier.NOTIFICATIONS = []
expected = self.api.remove_host_from_aggregate(self.context,
aggr['id'],
host_to_remove)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.removehost.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.removehost.end')
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
def test_remove_host_from_aggregate_raise_not_found(self):
# Ensure ComputeHostNotFound is raised when removing invalid host.
_create_service_entries(self.context, [['fake_zone', ['fake_host']]])
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.assertRaises(exception.ComputeHostNotFound,
self.api.remove_host_from_aggregate,
self.context, aggr['id'], 'invalid_host')
def test_aggregate_list(self):
aggregate = self.api.create_aggregate(self.context,
'fake_aggregate',
'fake_zone')
metadata = {'foo_key1': 'foo_value1',
'foo_key2': 'foo_value2'}
meta_aggregate = self.api.create_aggregate(self.context,
'fake_aggregate2',
'fake_zone2')
self.api.update_aggregate_metadata(self.context, meta_aggregate['id'],
metadata)
aggregate_list = self.api.get_aggregate_list(self.context)
self.assertIn(aggregate['id'],
map(lambda x: x['id'], aggregate_list))
self.assertIn(meta_aggregate['id'],
map(lambda x: x['id'], aggregate_list))
self.assertIn('fake_aggregate',
map(lambda x: x['name'], aggregate_list))
self.assertIn('fake_aggregate2',
map(lambda x: x['name'], aggregate_list))
self.assertIn('fake_zone',
map(lambda x: x['availability_zone'], aggregate_list))
self.assertIn('fake_zone2',
map(lambda x: x['availability_zone'], aggregate_list))
test_meta_aggregate = aggregate_list[1]
self.assertIn('foo_key1', test_meta_aggregate.get('metadata'))
self.assertIn('foo_key2', test_meta_aggregate.get('metadata'))
self.assertEqual('foo_value1',
test_meta_aggregate.get('metadata')['foo_key1'])
self.assertEqual('foo_value2',
test_meta_aggregate.get('metadata')['foo_key2'])
def test_aggregate_list_with_hosts(self):
values = _create_service_entries(self.context)
fake_zone = values[0][0]
host_aggregate = self.api.create_aggregate(self.context,
'fake_aggregate',
fake_zone)
self.api.add_host_to_aggregate(self.context, host_aggregate['id'],
values[0][1][0])
aggregate_list = self.api.get_aggregate_list(self.context)
aggregate = aggregate_list[0]
self.assertIn(values[0][1][0], aggregate.get('hosts'))
class ComputeAggrTestCase(BaseTestCase):
"""This is for unit coverage of aggregate-related methods
defined in nova.compute.manager.
"""
def setUp(self):
super(ComputeAggrTestCase, self).setUp()
self.context = context.get_admin_context()
values = {'name': 'test_aggr'}
az = {'availability_zone': 'test_zone'}
self.aggr = db.aggregate_create(self.context, values, metadata=az)
def test_add_aggregate_host(self):
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
fake_driver_add_to_aggregate.called = True
return {"foo": "bar"}
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, host="host",
aggregate=jsonutils.to_primitive(self.aggr), slave_info=None)
self.assertTrue(fake_driver_add_to_aggregate.called)
def test_remove_aggregate_host(self):
def fake_driver_remove_from_aggregate(context, aggregate, host,
**_ignore):
fake_driver_remove_from_aggregate.called = True
self.assertEqual("host", host, "host")
return {"foo": "bar"}
self.stubs.Set(self.compute.driver, "remove_from_aggregate",
fake_driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context,
aggregate=jsonutils.to_primitive(self.aggr), host="host",
slave_info=None)
self.assertTrue(fake_driver_remove_from_aggregate.called)
def test_add_aggregate_host_passes_slave_info_to_driver(self):
def driver_add_to_aggregate(context, aggregate, host, **kwargs):
self.assertEqual(self.context, context)
self.assertEqual(aggregate['id'], self.aggr['id'])
self.assertEqual(host, "the_host")
self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
self.stubs.Set(self.compute.driver, "add_to_aggregate",
driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, host="the_host",
slave_info="SLAVE_INFO",
aggregate=jsonutils.to_primitive(self.aggr))
def test_remove_from_aggregate_passes_slave_info_to_driver(self):
def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
self.assertEqual(self.context, context)
self.assertEqual(aggregate['id'], self.aggr['id'])
self.assertEqual(host, "the_host")
self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
self.stubs.Set(self.compute.driver, "remove_from_aggregate",
driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context,
aggregate=jsonutils.to_primitive(self.aggr), host="the_host",
slave_info="SLAVE_INFO")
class ComputePolicyTestCase(BaseTestCase):
def setUp(self):
super(ComputePolicyTestCase, self).setUp()
self.compute_api = compute.API()
def test_actions_are_prefixed(self):
self.mox.StubOutWithMock(policy, 'enforce')
nova.policy.enforce(self.context, 'compute:reboot', {})
self.mox.ReplayAll()
compute_api.check_policy(self.context, 'reboot', {})
def test_wrapped_method(self):
instance = self._create_fake_instance_obj(params={'host': None,
'cell_name': 'foo'})
# force delete to fail
rules = {"compute:delete": [["false:false"]]}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.delete, self.context, instance)
# reset rules to allow deletion
rules = {"compute:delete": []}
self.policy.set_rules(rules)
self.compute_api.delete(self.context, instance)
def test_create_fail(self):
rules = {"compute:create": [["false:false"]]}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1')
def test_create_attach_volume_fail(self):
rules = {
"compute:create": [],
"compute:create:attach_network": [["false:false"]],
"compute:create:attach_volume": [],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1',
requested_networks='blah',
block_device_mapping='blah')
def test_create_attach_network_fail(self):
rules = {
"compute:create": [],
"compute:create:attach_network": [],
"compute:create:attach_volume": [["false:false"]],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1',
requested_networks='blah',
block_device_mapping='blah')
def test_get_fail(self):
instance = self._create_fake_instance()
rules = {
"compute:get": [["false:false"]],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get, self.context, instance['uuid'])
def test_get_all_fail(self):
rules = {
"compute:get_all": [["false:false"]],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_all, self.context)
def test_get_instance_faults(self):
instance1 = self._create_fake_instance()
instance2 = self._create_fake_instance()
instances = [instance1, instance2]
rules = {
"compute:get_instance_faults": [["false:false"]],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_instance_faults,
context.get_admin_context(), instances)
def test_force_host_fail(self):
rules = {"compute:create": [],
"compute:create:forced_host": [["role:fake"]],
"network:validate_networks": []}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, None, '1',
availability_zone='1:1')
def test_force_host_pass(self):
rules = {"compute:create": [],
"compute:create:forced_host": [],
"network:validate_networks": []}
self.policy.set_rules(rules)
self.compute_api.create(self.context, None, '1',
availability_zone='1:1')
class DisabledInstanceTypesTestCase(BaseTestCase):
"""Some instance-types are marked 'disabled' which means that they will not
show up in customer-facing listings. We do, however, want those
instance-types to be available for emergency migrations and for rebuilding
of existing instances.
One legitimate use of the 'disabled' field would be when phasing out a
particular instance-type. We still want customers to be able to use an
instance that of the old type, and we want Ops to be able perform
migrations against it, but we *don't* want customers building new slices
with ths phased-out instance-type.
"""
def setUp(self):
super(DisabledInstanceTypesTestCase, self).setUp()
self.compute_api = compute.API()
self.inst_type = flavors.get_default_flavor()
def test_can_build_instance_from_visible_instance_type(self):
self.inst_type['disabled'] = False
# Assert that exception.FlavorNotFound is not raised
self.compute_api.create(self.context, self.inst_type,
image_href='some-fake-image')
def test_cannot_build_instance_from_disabled_instance_type(self):
self.inst_type['disabled'] = True
self.assertRaises(exception.FlavorNotFound,
self.compute_api.create, self.context, self.inst_type, None)
def test_can_resize_to_visible_instance_type(self):
instance = self._create_fake_instance_obj()
orig_get_flavor_by_flavor_id =\
flavors.get_flavor_by_flavor_id
def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
read_deleted="yes"):
instance_type = orig_get_flavor_by_flavor_id(flavor_id,
ctxt,
read_deleted)
instance_type['disabled'] = False
return instance_type
self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
self._stub_migrate_server()
self.compute_api.resize(self.context, instance, '4')
def test_cannot_resize_to_disabled_instance_type(self):
instance = self._create_fake_instance_obj()
orig_get_flavor_by_flavor_id = \
flavors.get_flavor_by_flavor_id
def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
read_deleted="yes"):
instance_type = orig_get_flavor_by_flavor_id(flavor_id,
ctxt,
read_deleted)
instance_type['disabled'] = True
return instance_type
self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
self.assertRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context, instance, '4')
class ComputeReschedulingTestCase(BaseTestCase):
"""Tests re-scheduling logic for new build requests."""
def setUp(self):
super(ComputeReschedulingTestCase, self).setUp()
self.expected_task_state = task_states.SCHEDULING
def fake_update(*args, **kwargs):
self.updated_task_state = kwargs.get('task_state')
self.stubs.Set(self.compute, '_instance_update', fake_update)
def _reschedule(self, request_spec=None, filter_properties=None,
exc_info=None):
if not filter_properties:
filter_properties = {}
instance = self._create_fake_instance_obj()
admin_password = None
injected_files = None
requested_networks = None
is_first_time = False
scheduler_method = self.compute.scheduler_rpcapi.run_instance
method_args = (request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties)
return self.compute._reschedule(self.context, request_spec,
filter_properties, instance, scheduler_method,
method_args, self.expected_task_state, exc_info=exc_info)
def test_reschedule_no_filter_properties(self):
# no filter_properties will disable re-scheduling.
self.assertFalse(self._reschedule())
def test_reschedule_no_retry_info(self):
# no retry info will also disable re-scheduling.
filter_properties = {}
self.assertFalse(self._reschedule(filter_properties=filter_properties))
def test_reschedule_no_request_spec(self):
# no request spec will also disable re-scheduling.
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
self.assertFalse(self._reschedule(filter_properties=filter_properties))
def test_reschedule_success(self):
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
request_spec = {'instance_uuids': ['foo', 'bar']}
try:
raise test.TestingException("just need an exception")
except test.TestingException:
exc_info = sys.exc_info()
exc_str = traceback.format_exception_only(exc_info[0],
exc_info[1])
self.assertTrue(self._reschedule(filter_properties=filter_properties,
request_spec=request_spec, exc_info=exc_info))
self.assertEqual(1, len(request_spec['instance_uuids']))
self.assertEqual(self.updated_task_state, self.expected_task_state)
self.assertEqual(exc_str, filter_properties['retry']['exc'])
class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase):
"""Test re-scheduling logic for prep_resize requests."""
def setUp(self):
super(ComputeReschedulingResizeTestCase, self).setUp()
self.expected_task_state = task_states.RESIZE_PREP
def _reschedule(self, request_spec=None, filter_properties=None,
exc_info=None):
if not filter_properties:
filter_properties = {}
instance_uuid = str(uuid.uuid4())
instance = self._create_fake_instance_obj(
params={'uuid': instance_uuid})
instance_type = {}
reservations = None
scheduler_method = self.compute.compute_task_api.resize_instance
scheduler_hint = dict(filter_properties=filter_properties)
method_args = (instance, None, scheduler_hint, instance_type,
reservations)
return self.compute._reschedule(self.context, request_spec,
filter_properties, instance, scheduler_method,
method_args, self.expected_task_state, exc_info=exc_info)
class InnerTestingException(Exception):
pass
class ComputeRescheduleOrErrorTestCase(BaseTestCase):
"""Test logic and exception handling around rescheduling or re-raising
original exceptions when builds fail.
"""
def setUp(self):
super(ComputeRescheduleOrErrorTestCase, self).setUp()
self.instance = self._create_fake_instance_obj()
def test_reschedule_or_error_called(self):
"""Basic sanity check to make sure _reschedule_or_error is called
when a build fails.
"""
self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
self.mox.StubOutWithMock(self.compute, '_spawn')
self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
bdms = block_device_obj.block_device_make_list(self.context, [])
objects.BlockDeviceMappingList.get_by_instance_uuid(
mox.IgnoreArg(), self.instance.uuid).AndReturn(bdms)
self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
[], mox.IgnoreArg(), [], None, set_access_ip=False).AndRaise(
test.TestingException("BuildError"))
self.compute._reschedule_or_error(mox.IgnoreArg(), self.instance,
mox.IgnoreArg(), None, None, None,
False, None, {}, bdms, False).AndReturn(True)
self.mox.ReplayAll()
self.compute._run_instance(self.context, None, {}, None, None, None,
False, None, self.instance, False)
def test_shutdown_instance_fail(self):
"""Test shutdown instance failing before re-scheduling logic can even
run.
"""
instance_uuid = self.instance['uuid']
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
self.instance, exc_info[0], exc_info=exc_info)
self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(InnerTestingException("Error"))
self.compute._log_original_error(exc_info, instance_uuid)
self.mox.ReplayAll()
# should raise the deallocation exception, not the original build
# error:
self.assertRaises(InnerTestingException,
self.compute._reschedule_or_error, self.context,
self.instance, exc_info, None, None, None, False, None, {})
def test_shutdown_instance_fail_instance_info_cache_not_found(self):
# Covers the case that _shutdown_instance fails with an
# InstanceInfoCacheNotFound exception when getting instance network
# information prior to calling driver.destroy.
elevated_context = self.context.elevated()
error = exception.InstanceInfoCacheNotFound(
instance_uuid=self.instance['uuid'])
with contextlib.nested(
mock.patch.object(self.context, 'elevated',
return_value=elevated_context),
mock.patch.object(self.compute, '_get_instance_nw_info',
side_effect=error),
mock.patch.object(self.compute,
'_get_instance_block_device_info'),
mock.patch.object(self.compute.driver, 'destroy'),
mock.patch.object(self.compute, '_try_deallocate_network')
) as (
elevated_mock,
_get_instance_nw_info_mock,
_get_instance_block_device_info_mock,
destroy_mock,
_try_deallocate_network_mock
):
inst_obj = self.instance
self.compute._shutdown_instance(self.context, inst_obj,
bdms=[], notify=False)
# By asserting that _try_deallocate_network_mock was called
# exactly once, we know that _get_instance_nw_info raising
# InstanceInfoCacheNotFound did not make _shutdown_instance error
# out and driver.destroy was still called.
_try_deallocate_network_mock.assert_called_once_with(
elevated_context, inst_obj, None)
def test_reschedule_fail(self):
# Test handling of exception from _reschedule.
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute, '_reschedule')
self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
mox.IgnoreArg(),
mox.IgnoreArg())
self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid,
mox.IgnoreArg())
self.compute._reschedule(self.context, None, self.instance,
{}, self.compute.scheduler_rpcapi.run_instance,
method_args, task_states.SCHEDULING, exc_info).AndRaise(
InnerTestingException("Inner"))
self.mox.ReplayAll()
self.assertFalse(self.compute._reschedule_or_error(self.context,
self.instance, exc_info, None, None, None, False, None, {}))
def test_reschedule_false(self):
# Test not-rescheduling, but no nested exception.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute, '_reschedule')
try:
raise test.TestingException("Original")
except test.TestingException:
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
self.instance, exc_info[0], exc_info=exc_info)
self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
mox.IgnoreArg(),
mox.IgnoreArg())
self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid,
mox.IgnoreArg())
self.compute._reschedule(self.context, None, {}, self.instance,
self.compute.scheduler_rpcapi.run_instance, method_args,
task_states.SCHEDULING, exc_info).AndReturn(False)
self.mox.ReplayAll()
# re-scheduling is False, the original build error should be
# raised here:
self.assertFalse(self.compute._reschedule_or_error(self.context,
self.instance, exc_info, None, None, None, False, None, {}))
def test_reschedule_true(self):
# Test behavior when re-scheduling happens.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute, '_reschedule')
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
self.instance, exc_info[0], exc_info=exc_info)
self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
mox.IgnoreArg(),
mox.IgnoreArg())
self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid,
mox.IgnoreArg())
self.compute._reschedule(self.context, None, {}, self.instance,
self.compute.scheduler_rpcapi.run_instance,
method_args, task_states.SCHEDULING, exc_info).AndReturn(
True)
self.compute._log_original_error(exc_info, instance_uuid)
self.mox.ReplayAll()
# re-scheduling is True, original error is logged, but nothing
# is raised:
self.compute._reschedule_or_error(self.context, self.instance,
exc_info, None, None, None, False, None, {})
def test_no_reschedule_on_delete_during_spawn(self):
# instance should not be rescheduled if instance is deleted
# during the build
self.mox.StubOutWithMock(self.compute, '_spawn')
self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
exc = exception.UnexpectedDeletingTaskStateError(
expected=task_states.SPAWNING, actual=task_states.DELETING)
self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), set_access_ip=False).AndRaise(exc)
self.mox.ReplayAll()
# test succeeds if mocked method '_reschedule_or_error' is not
# called.
self.compute._run_instance(self.context, None, {}, None, None, None,
False, None, self.instance, False)
def test_no_reschedule_on_unexpected_task_state(self):
# instance shouldn't be rescheduled if unexpected task state arises.
# the exception should get reraised.
self.mox.StubOutWithMock(self.compute, '_spawn')
self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
exc = exception.UnexpectedTaskStateError(expected=task_states.SPAWNING,
actual=task_states.SCHEDULING)
self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), set_access_ip=False).AndRaise(exc)
self.mox.ReplayAll()
self.assertRaises(exception.UnexpectedTaskStateError,
self.compute._run_instance, self.context, None, {}, None, None,
None, False, None, self.instance, False)
def test_no_reschedule_on_block_device_fail(self):
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
exc = exception.InvalidBDM()
self.compute._prep_block_device(mox.IgnoreArg(), self.instance,
mox.IgnoreArg()).AndRaise(exc)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidBDM, self.compute._run_instance,
self.context, None, {}, None, None, None, False,
None, self.instance, False)
class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
"""Test logic and exception handling around rescheduling prep resize
requests
"""
def setUp(self):
super(ComputeRescheduleResizeOrReraiseTestCase, self).setUp()
self.instance = self._create_fake_instance()
self.instance_uuid = self.instance['uuid']
self.instance_type = flavors.get_flavor_by_name(
"m1.tiny")
def test_reschedule_resize_or_reraise_called(self):
"""Verify the rescheduling logic gets called when there is an error
during prep_resize.
"""
inst_obj = self._create_fake_instance_obj()
self.mox.StubOutWithMock(self.compute.db, 'migration_create')
self.mox.StubOutWithMock(self.compute, '_reschedule_resize_or_reraise')
self.compute.db.migration_create(mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(test.TestingException("Original"))
self.compute._reschedule_resize_or_reraise(mox.IgnoreArg(), None,
inst_obj, mox.IgnoreArg(), self.instance_type,
mox.IgnoreArg(), {},
{})
self.mox.ReplayAll()
self.compute.prep_resize(self.context, image=None,
instance=inst_obj,
instance_type=self.instance_type,
reservations=[], request_spec={},
filter_properties={}, node=None)
def test_reschedule_fails_with_exception(self):
"""Original exception should be raised if the _reschedule method
raises another exception
"""
instance = self._create_fake_instance_obj()
scheduler_hint = dict(filter_properties={})
method_args = (instance, None, scheduler_hint, self.instance_type,
None)
self.mox.StubOutWithMock(self.compute, "_reschedule")
self.compute._reschedule(
self.context, None, None, instance,
self.compute.compute_task_api.resize_instance, method_args,
task_states.RESIZE_PREP).AndRaise(
InnerTestingException("Inner"))
self.mox.ReplayAll()
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
None, instance, exc_info, self.instance_type,
self.none_quotas, {}, {})
def test_reschedule_false(self):
"""Original exception should be raised if the resize is not
rescheduled.
"""
instance = self._create_fake_instance_obj()
scheduler_hint = dict(filter_properties={})
method_args = (instance, None, scheduler_hint, self.instance_type,
None)
self.mox.StubOutWithMock(self.compute, "_reschedule")
self.compute._reschedule(
self.context, None, None, instance,
self.compute.compute_task_api.resize_instance, method_args,
task_states.RESIZE_PREP).AndReturn(False)
self.mox.ReplayAll()
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
None, instance, exc_info, self.instance_type,
self.none_quotas, {}, {})
def test_reschedule_true(self):
# If rescheduled, the original resize exception should be logged.
instance = self._create_fake_instance_obj()
scheduler_hint = dict(filter_properties={})
method_args = (instance, None, scheduler_hint, self.instance_type,
None)
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
self.mox.StubOutWithMock(self.compute, "_reschedule")
self.mox.StubOutWithMock(self.compute, "_log_original_error")
self.compute._reschedule(self.context, {}, {},
instance,
self.compute.compute_task_api.resize_instance, method_args,
task_states.RESIZE_PREP, exc_info).AndReturn(True)
self.compute._log_original_error(exc_info, instance.uuid)
self.mox.ReplayAll()
self.compute._reschedule_resize_or_reraise(
self.context, None, instance, exc_info,
self.instance_type, self.none_quotas, {}, {})
class ComputeInactiveImageTestCase(BaseTestCase):
def setUp(self):
super(ComputeInactiveImageTestCase, self).setUp()
def fake_show(meh, context, id, **kwargs):
return {'id': id, 'min_disk': None, 'min_ram': None,
'name': 'fake_name',
'status': 'deleted',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'something_else': 'meow'}}
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
self.compute_api = compute.API()
def test_create_instance_with_deleted_image(self):
# Make sure we can't start an instance with a deleted image.
inst_type = flavors.get_flavor_by_name('m1.tiny')
self.assertRaises(exception.ImageNotActive,
self.compute_api.create,
self.context, inst_type, 'fake-image-uuid')
class EvacuateHostTestCase(BaseTestCase):
def setUp(self):
super(EvacuateHostTestCase, self).setUp()
self.inst = self._create_fake_instance_obj(
{'host': 'fake_host_2', 'node': 'fakenode2'})
self.inst.task_state = task_states.REBUILDING
self.inst.save()
def tearDown(self):
db.instance_destroy(self.context, self.inst.uuid)
super(EvacuateHostTestCase, self).tearDown()
def _rebuild(self, on_shared_storage=True):
def fake(cls, ctxt, instance, *args, **kwargs):
pass
self.stubs.Set(network_api.API, 'setup_networks_on_host', fake)
orig_image_ref = None
image_ref = None
injected_files = None
bdms = db.block_device_mapping_get_all_by_instance(self.context,
self.inst.uuid)
self.compute.rebuild_instance(
self.context, self.inst, orig_image_ref,
image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
on_shared_storage=on_shared_storage)
def test_rebuild_on_host_updated_target(self):
"""Confirm evacuate scenario updates host and node."""
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
def fake_get_compute_info(context, host):
self.assertTrue(context.is_admin)
self.assertEqual('fake-mini', host)
cn = objects.ComputeNode(hypervisor_hostname=self.rt.nodename)
return cn
self.stubs.Set(self.compute, '_get_compute_info',
fake_get_compute_info)
self.mox.ReplayAll()
self._rebuild()
# Should be on destination host
instance = db.instance_get(self.context, self.inst.id)
self.assertEqual(instance['host'], self.compute.host)
self.assertEqual(NODENAME, instance['node'])
def test_rebuild_on_host_updated_target_node_not_found(self):
"""Confirm evacuate scenario where compute_node isn't found."""
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
def fake_get_compute_info(context, host):
raise exception.NotFound(_("Host %s not found") % host)
self.stubs.Set(self.compute, '_get_compute_info',
fake_get_compute_info)
self.mox.ReplayAll()
self._rebuild()
# Should be on destination host
instance = db.instance_get(self.context, self.inst.id)
self.assertEqual(instance['host'], self.compute.host)
self.assertIsNone(instance['node'])
def test_rebuild_with_instance_in_stopped_state(self):
"""Confirm evacuate scenario updates vm_state to stopped
if instance is in stopped state
"""
# Initialize the VM to stopped state
db.instance_update(self.context, self.inst.uuid,
{"vm_state": vm_states.STOPPED})
self.inst.vm_state = vm_states.STOPPED
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.mox.ReplayAll()
self._rebuild()
# Check the vm state is reset to stopped
instance = db.instance_get(self.context, self.inst.id)
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
def test_rebuild_with_wrong_shared_storage(self):
"""Confirm evacuate scenario does not update host."""
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
lambda: self._rebuild(on_shared_storage=False))
# Should remain on original host
instance = db.instance_get(self.context, self.inst.id)
self.assertEqual(instance['host'], 'fake_host_2')
def test_rebuild_on_host_with_volumes(self):
"""Confirm evacuate scenario reconnects volumes."""
values = {'instance_uuid': self.inst.uuid,
'source_type': 'volume',
'device_name': '/dev/vdc',
'delete_on_termination': False,
'volume_id': 'fake_volume_id'}
db.block_device_mapping_create(self.context, values)
def fake_volume_get(self, context, volume):
return {'id': 'fake_volume_id'}
self.stubs.Set(cinder.API, "get", fake_volume_get)
# Stub out and record whether it gets detached
result = {"detached": False}
def fake_detach(self, context, volume):
result["detached"] = volume["id"] == 'fake_volume_id'
self.stubs.Set(cinder.API, "detach", fake_detach)
def fake_terminate_connection(self, context, volume, connector):
return {}
self.stubs.Set(cinder.API, "terminate_connection",
fake_terminate_connection)
# make sure volumes attach, detach are called
self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg())
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.compute._prep_block_device(mox.IsA(self.context),
mox.IsA(objects.Instance),
mox.IgnoreArg())
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.mox.ReplayAll()
self._rebuild()
# cleanup
for bdms in db.block_device_mapping_get_all_by_instance(
self.context, self.inst.uuid):
db.block_device_mapping_destroy(self.context, bdms['id'])
def test_rebuild_on_host_with_shared_storage(self):
"""Confirm evacuate scenario on shared storage."""
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.compute.driver.spawn(mox.IsA(self.context),
mox.IsA(objects.Instance), {}, mox.IgnoreArg(), 'newpass',
network_info=mox.IgnoreArg(),
block_device_info=mox.IgnoreArg())
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.mox.ReplayAll()
self._rebuild()
def test_rebuild_on_host_without_shared_storage(self):
"""Confirm evacuate scenario without shared storage
(rebuild from image)
"""
fake_image = {'id': 1,
'name': 'fake_name',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id'}}
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.compute.driver.spawn(mox.IsA(self.context),
mox.IsA(objects.Instance), mox.IsA(fake_image),
mox.IgnoreArg(), mox.IsA('newpass'),
network_info=mox.IgnoreArg(),
block_device_info=mox.IgnoreArg())
self.stubs.Set(self.compute.driver, 'instance_on_disk',
lambda x: False)
self.mox.ReplayAll()
self._rebuild(on_shared_storage=False)
def test_rebuild_on_host_instance_exists(self):
"""Rebuild if instance exists raises an exception."""
db.instance_update(self.context, self.inst.uuid,
{"task_state": task_states.SCHEDULING})
self.compute.run_instance(self.context,
self.inst, {}, {},
[], None, None, True, None, False)
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.assertRaises(exception.InstanceExists,
lambda: self._rebuild(on_shared_storage=True))
def test_driver_does_not_support_recreate(self):
with utils.temporary_mutation(self.compute.driver.capabilities,
supports_recreate=False):
self.stubs.Set(self.compute.driver, 'instance_on_disk',
lambda x: True)
self.assertRaises(exception.InstanceRecreateNotSupported,
lambda: self._rebuild(on_shared_storage=True))
class ComputeInjectedFilesTestCase(BaseTestCase):
# Test that running instances with injected_files decodes files correctly
def setUp(self):
super(ComputeInjectedFilesTestCase, self).setUp()
self.instance = self._create_fake_instance_obj()
self.stubs.Set(self.compute.driver, 'spawn', self._spawn)
def _spawn(self, context, instance, image_meta, injected_files,
admin_password, nw_info, block_device_info, db_api=None):
self.assertEqual(self.expected, injected_files)
def _test(self, injected_files, decoded_files):
self.expected = decoded_files
self.compute.run_instance(self.context, self.instance, {}, {}, [],
injected_files, None, True, None, False)
def test_injected_none(self):
# test an input of None for injected_files
self._test(None, [])
def test_injected_empty(self):
# test an input of [] for injected_files
self._test([], [])
def test_injected_success(self):
# test with valid b64 encoded content.
injected_files = [
('/a/b/c', base64.b64encode('foobarbaz')),
('/d/e/f', base64.b64encode('seespotrun')),
]
decoded_files = [
('/a/b/c', 'foobarbaz'),
('/d/e/f', 'seespotrun'),
]
self._test(injected_files, decoded_files)
def test_injected_invalid(self):
# test with invalid b64 encoded content
injected_files = [
('/a/b/c', base64.b64encode('foobarbaz')),
('/d/e/f', 'seespotrun'),
]
self.assertRaises(exception.Base64Exception, self.compute.run_instance,
self.context, self.instance, {}, {}, [], injected_files, None,
True, None, False)
def test_reschedule(self):
# test that rescheduling is done with original encoded files
expected = [
('/a/b/c', base64.b64encode('foobarbaz')),
('/d/e/f', base64.b64encode('seespotrun')),
]
def _roe(context, instance, exc_info, requested_networks,
admin_password, injected_files, is_first_time, request_spec,
filter_properties, bdms=None, legacy_bdm_in_spec=False):
self.assertEqual(expected, injected_files)
return True
def spawn_explode(context, instance, image_meta, injected_files,
admin_password, nw_info, block_device_info):
# force reschedule logic to execute
raise test.TestingException(_("spawn error"))
self.stubs.Set(self.compute.driver, 'spawn', spawn_explode)
self.stubs.Set(self.compute, '_reschedule_or_error', _roe)
self.compute.run_instance(self.context, self.instance, {}, {}, [],
expected, None, True, None, False)
class CheckConfigDriveTestCase(test.TestCase):
# NOTE(sirp): `TestCase` is far too heavyweight for this test, this should
# probably derive from a `test.FastTestCase` that omits DB and env
# handling
def setUp(self):
super(CheckConfigDriveTestCase, self).setUp()
self.compute_api = compute.API()
def _assertCheck(self, expected, config_drive):
self.assertEqual(expected,
self.compute_api._check_config_drive(config_drive))
def _assertInvalid(self, config_drive):
self.assertRaises(exception.ConfigDriveInvalidValue,
self.compute_api._check_config_drive,
config_drive)
def test_config_drive_false_values(self):
self._assertCheck('', None)
self._assertCheck('', '')
self._assertCheck('', 'False')
self._assertCheck('', 'f')
self._assertCheck('', '0')
def test_config_drive_true_values(self):
self._assertCheck(True, 'True')
self._assertCheck(True, 't')
self._assertCheck(True, '1')
def test_config_drive_bogus_values_raise(self):
self._assertInvalid('asd')
self._assertInvalid(uuidutils.generate_uuid())
class CheckRequestedImageTestCase(test.TestCase):
def setUp(self):
super(CheckRequestedImageTestCase, self).setUp()
self.compute_api = compute.API()
self.context = context.RequestContext(
'fake_user_id', 'fake_project_id')
self.instance_type = flavors.get_default_flavor()
self.instance_type['memory_mb'] = 64
self.instance_type['root_gb'] = 1
def test_no_image_specified(self):
self.compute_api._check_requested_image(self.context, None, None,
self.instance_type)
def test_image_status_must_be_active(self):
image = dict(id='123', status='foo')
self.assertRaises(exception.ImageNotActive,
self.compute_api._check_requested_image, self.context,
image['id'], image, self.instance_type)
image['status'] = 'active'
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_image_min_ram_check(self):
image = dict(id='123', status='active', min_ram='65')
self.assertRaises(exception.FlavorMemoryTooSmall,
self.compute_api._check_requested_image, self.context,
image['id'], image, self.instance_type)
image['min_ram'] = '64'
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_image_min_disk_check(self):
image = dict(id='123', status='active', min_disk='2')
self.assertRaises(exception.FlavorDiskTooSmall,
self.compute_api._check_requested_image, self.context,
image['id'], image, self.instance_type)
image['min_disk'] = '1'
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_image_too_large(self):
image = dict(id='123', status='active', size='1073741825')
self.assertRaises(exception.FlavorDiskTooSmall,
self.compute_api._check_requested_image, self.context,
image['id'], image, self.instance_type)
image['size'] = '1073741824'
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_root_gb_zero_disables_size_check(self):
self.instance_type['root_gb'] = 0
image = dict(id='123', status='active', size='1073741825')
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_root_gb_zero_disables_min_disk(self):
self.instance_type['root_gb'] = 0
image = dict(id='123', status='active', min_disk='2')
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_config_drive_option(self):
image = {'id': 1, 'status': 'active'}
image['properties'] = {'img_config_drive': 'optional'}
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
image['properties'] = {'img_config_drive': 'mandatory'}
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
image['properties'] = {'img_config_drive': 'bar'}
self.assertRaises(exception.InvalidImageConfigDrive,
self.compute_api._check_requested_image,
self.context, image['id'], image, self.instance_type)
class ComputeHooksTestCase(test.BaseHookTestCase):
def test_delete_instance_has_hook(self):
delete_func = compute_manager.ComputeManager._delete_instance
self.assert_has_hook('delete_instance', delete_func)
def test_create_instance_has_hook(self):
create_func = compute_api.API.create
self.assert_has_hook('create_instance', create_func)
| {
"content_hash": "3fbe66a1d527bee289334380c18d79f0",
"timestamp": "",
"source": "github",
"line_count": 11322,
"max_line_length": 79,
"avg_line_length": 44.26055467231938,
"alnum_prop": 0.5671338886250344,
"repo_name": "angdraug/nova",
"id": "6584d63db5ef2da5b1e382ad5a58f76b93122351",
"size": "501926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/compute/test_compute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14991706"
},
{
"name": "Shell",
"bytes": "18352"
}
],
"symlink_target": ""
} |
import contextlib
import copy
import mock
from pysensu_yelp import Status
from pytest import raises
from paasta_tools import chronos_tools
from paasta_tools import setup_chronos_job
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import NoDeploymentsAvailable
class TestSetupChronosJob:
fake_docker_image = 'test_docker:1.0'
fake_client = mock.MagicMock()
fake_service = 'test_service'
fake_instance = 'test'
fake_cluster = 'fake_test_cluster'
fake_config_dict = {
'name': 'test_service test gitsha config',
'description': 'This is a test Chronos job.',
'command': '/bin/sleep 40',
'bounce_method': 'graceful',
'epsilon': 'PT30M',
'retries': 5,
'owner': 'test@test.com',
'async': False,
'cpus': 5.5,
'mem': 1024.4,
'disk': 2048.5,
'disabled': 'true',
'schedule': 'R/2015-03-25T19:36:35Z/PT5M',
'schedule_time_zone': 'Zulu',
}
fake_branch_dict = {
'docker_image': 'paasta-%s-%s' % (fake_service, fake_cluster),
}
fake_chronos_job_config = chronos_tools.ChronosJobConfig(
service=fake_service,
cluster=fake_cluster,
instance=fake_instance,
config_dict=fake_config_dict,
branch_dict=fake_branch_dict,
)
fake_docker_registry = 'remote_registry.com'
fake_args = mock.MagicMock(
service_instance=compose_job_id(fake_service, fake_instance),
soa_dir='no_more',
verbose=False,
)
def test_main_success(self):
expected_status = 0
expected_output = 'it_is_finished'
fake_complete_job_config = {'foo': 'bar'}
with contextlib.nested(
mock.patch('paasta_tools.setup_chronos_job.parse_args',
return_value=self.fake_args,
autospec=True),
mock.patch('paasta_tools.chronos_tools.load_chronos_config', autospec=True),
mock.patch('paasta_tools.chronos_tools.get_chronos_client',
return_value=self.fake_client,
autospec=True),
mock.patch('paasta_tools.chronos_tools.create_complete_config',
return_value=fake_complete_job_config,
autospec=True),
mock.patch('paasta_tools.setup_chronos_job.setup_job',
return_value=(expected_status, expected_output),
autospec=True),
mock.patch('paasta_tools.setup_chronos_job.send_event', autospec=True),
mock.patch('paasta_tools.setup_chronos_job.load_system_paasta_config', autospec=True),
mock.patch('sys.exit', autospec=True),
) as (
parse_args_patch,
load_chronos_config_patch,
get_client_patch,
create_complete_config_patch,
setup_job_patch,
send_event_patch,
load_system_paasta_config_patch,
sys_exit_patch,
):
load_system_paasta_config_patch.return_value.get_cluster = mock.MagicMock(return_value=self.fake_cluster)
setup_chronos_job.main()
parse_args_patch.assert_called_once_with()
get_client_patch.assert_called_once_with(load_chronos_config_patch.return_value)
setup_job_patch.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
complete_job_config=fake_complete_job_config,
client=self.fake_client,
cluster=self.fake_cluster,
)
send_event_patch.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
soa_dir=self.fake_args.soa_dir,
status=expected_status,
output=expected_output,
)
sys_exit_patch.assert_called_once_with(0)
def test_main_no_deployments(self):
with contextlib.nested(
mock.patch('paasta_tools.setup_chronos_job.parse_args',
return_value=self.fake_args,
autospec=True),
mock.patch('paasta_tools.chronos_tools.load_chronos_config', autospec=True),
mock.patch('paasta_tools.chronos_tools.get_chronos_client',
return_value=self.fake_client,
autospec=True),
mock.patch('paasta_tools.chronos_tools.create_complete_config',
return_value={},
autospec=True,
side_effect=NoDeploymentsAvailable),
mock.patch('paasta_tools.setup_chronos_job.setup_job',
return_value=(0, 'it_is_finished'),
autospec=True),
mock.patch('paasta_tools.setup_chronos_job.load_system_paasta_config', autospec=True),
mock.patch('paasta_tools.setup_chronos_job.send_event', autospec=True),
) as (
parse_args_patch,
load_chronos_config_patch,
get_client_patch,
load_chronos_job_config_patch,
setup_job_patch,
load_system_paasta_config_patch,
send_event_patch,
):
load_system_paasta_config_patch.return_value.get_cluster = mock.MagicMock(return_value=self.fake_cluster)
with raises(SystemExit) as excinfo:
setup_chronos_job.main()
assert excinfo.value.code == 0
def test_main_bad_chronos_job_config_notifies_user(self):
with contextlib.nested(
mock.patch('paasta_tools.setup_chronos_job.parse_args',
return_value=self.fake_args,
autospec=True),
mock.patch('paasta_tools.chronos_tools.load_chronos_config', autospec=True),
mock.patch('paasta_tools.chronos_tools.get_chronos_client',
return_value=self.fake_client,
autospec=True),
mock.patch('paasta_tools.chronos_tools.create_complete_config',
autospec=True,
side_effect=chronos_tools.UnknownChronosJobError('test bad configuration')),
mock.patch('paasta_tools.setup_chronos_job.setup_job',
return_value=(0, 'it_is_finished'),
autospec=True),
mock.patch('paasta_tools.setup_chronos_job.load_system_paasta_config', autospec=True),
mock.patch('paasta_tools.setup_chronos_job.send_event', autospec=True),
) as (
parse_args_patch,
load_chronos_config_patch,
get_client_patch,
load_chronos_job_config_patch,
setup_job_patch,
load_system_paasta_config_patch,
send_event_patch,
):
load_system_paasta_config_patch.return_value.get_cluster = mock.MagicMock(return_value=self.fake_cluster)
with raises(SystemExit) as excinfo:
setup_chronos_job.main()
assert excinfo.value.code == 0
expected_error_msg = (
"Could not read chronos configuration file for %s in cluster %s\nError was: test bad configuration"
% (compose_job_id(self.fake_service, self.fake_instance), self.fake_cluster)
)
send_event_patch.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
soa_dir=self.fake_args.soa_dir,
status=Status.CRITICAL,
output=expected_error_msg
)
def test_setup_job_new_app_with_no_previous_jobs(self):
fake_existing_jobs = []
with contextlib.nested(
mock.patch('paasta_tools.setup_chronos_job.bounce_chronos_job', autospec=True, return_value=(0, 'ok')),
mock.patch('paasta_tools.chronos_tools.lookup_chronos_jobs',
autospec=True),
mock.patch('paasta_tools.chronos_tools.sort_jobs',
autospec=True,
return_value=fake_existing_jobs),
mock.patch('paasta_tools.chronos_tools.load_system_paasta_config', autospec=True),
mock.patch('paasta_tools.chronos_tools.load_chronos_job_config',
autospec=True,
return_value=self.fake_chronos_job_config),
) as (
mock_bounce_chronos_job,
lookup_chronos_jobs_patch,
sort_jobs_patch,
load_system_paasta_config_patch,
load_chronos_job_config_patch,
):
load_system_paasta_config_patch.return_value.get_cluster.return_value = self.fake_cluster
load_system_paasta_config_patch.return_value.get_volumes.return_value = []
load_system_paasta_config_patch.return_value.get_dockercfg_location.return_value = \
'file:///root/.dockercfg'
complete_config = chronos_tools.create_complete_config(
service=self.fake_service,
job_name=self.fake_instance,
soa_dir=self.fake_args.soa_dir,
)
actual = setup_chronos_job.setup_job(
service=self.fake_service,
instance=self.fake_instance,
complete_job_config=complete_config,
client=self.fake_client,
cluster=self.fake_cluster,
)
mock_bounce_chronos_job.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
cluster=self.fake_cluster,
job_to_update=complete_config,
client=self.fake_client,
)
assert actual == mock_bounce_chronos_job.return_value
def test_setup_job_with_previously_enabled_job(self):
fake_existing_job = {
'name': 'fake_job',
'disabled': False,
}
with contextlib.nested(
mock.patch('paasta_tools.setup_chronos_job.bounce_chronos_job', autospec=True, return_value=(0, 'ok')),
mock.patch('paasta_tools.chronos_tools.lookup_chronos_jobs',
autospec=True),
mock.patch('paasta_tools.chronos_tools.sort_jobs',
autospec=True,
return_value=[fake_existing_job]),
mock.patch('paasta_tools.chronos_tools.load_system_paasta_config', autospec=True),
mock.patch('paasta_tools.chronos_tools.load_chronos_job_config',
autospec=True, return_value=self.fake_chronos_job_config),
) as (
mock_bounce_chronos_job,
mock_lookup_chronos_jobs,
mock_sort_jobs,
load_system_paasta_config_patch,
load_chronos_job_config_patch,
):
load_system_paasta_config_patch.return_value.get_cluster.return_value = self.fake_cluster
load_system_paasta_config_patch.return_value.get_volumes.return_value = []
load_system_paasta_config_patch.return_value.get_dockercfg_location.return_value = \
"file:///root/.dockercfg"
complete_config = chronos_tools.create_complete_config(
service=self.fake_service,
job_name=self.fake_instance,
soa_dir=self.fake_args.soa_dir
)
actual = setup_chronos_job.setup_job(
service=self.fake_service,
instance=self.fake_instance,
complete_job_config=complete_config,
client=self.fake_client,
cluster=self.fake_cluster,
)
mock_bounce_chronos_job.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
cluster=self.fake_cluster,
job_to_update=complete_config,
client=self.fake_client,
)
assert mock_lookup_chronos_jobs.called
assert actual == mock_bounce_chronos_job.return_value
def test_setup_job_does_nothing_with_only_existing_app(self):
fake_existing_job = copy.deepcopy(self.fake_config_dict)
with contextlib.nested(
mock.patch('paasta_tools.setup_chronos_job.bounce_chronos_job', autospec=True, return_value=(0, 'ok')),
mock.patch('paasta_tools.chronos_tools.lookup_chronos_jobs',
autospec=True, return_value=[fake_existing_job]),
mock.patch('paasta_tools.chronos_tools.load_system_paasta_config', autospec=True),
mock.patch('paasta_tools.chronos_tools.load_chronos_job_config',
autospec=True, return_value=self.fake_chronos_job_config),
) as (
mock_bounce_chronos_job,
mock_lookup_chronos_jobs,
load_system_paasta_config_patch,
load_chronos_job_config_patch,
):
load_system_paasta_config_patch.return_value.get_cluster.return_value = self.fake_cluster
complete_config = copy.deepcopy(self.fake_config_dict)
# Force the complete_config's name to match the return value of
# lookup_chronos_jobs to simulate that they have the same name
complete_config["name"] = fake_existing_job["name"]
actual = setup_chronos_job.setup_job(
service=self.fake_service,
instance=self.fake_instance,
complete_job_config=complete_config,
client=self.fake_client,
cluster=self.fake_cluster,
)
mock_bounce_chronos_job.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
cluster=self.fake_cluster,
job_to_update=None,
client=self.fake_client,
)
assert mock_lookup_chronos_jobs.called
assert actual == mock_bounce_chronos_job.return_value
def test_send_event(self):
fake_status = '42'
fake_output = 'something went wrong'
fake_soa_dir = ''
expected_check_name = 'setup_chronos_job.%s' % compose_job_id(self.fake_service, self.fake_instance)
with contextlib.nested(
mock.patch("paasta_tools.monitoring_tools.send_event", autospec=True),
mock.patch("paasta_tools.chronos_tools.load_chronos_job_config", autospec=True),
mock.patch("paasta_tools.setup_chronos_job.load_system_paasta_config", autospec=True),
) as (
mock_send_event,
mock_load_chronos_job_config,
mock_load_system_paasta_config,
):
mock_load_system_paasta_config.return_value.get_cluster = mock.Mock(return_value='fake_cluster')
mock_load_chronos_job_config.return_value.get_monitoring.return_value = {}
setup_chronos_job.send_event(
service=self.fake_service,
instance=self.fake_instance,
soa_dir=fake_soa_dir,
status=fake_status,
output=fake_output,
)
mock_send_event.assert_called_once_with(
service=self.fake_service,
check_name=expected_check_name,
overrides={'alert_after': '10m', 'check_every': '10s'},
status=fake_status,
output=fake_output,
soa_dir=fake_soa_dir
)
mock_load_chronos_job_config.assert_called_once_with(
service=self.fake_service,
instance=self.fake_instance,
cluster=mock_load_system_paasta_config.return_value.get_cluster.return_value,
soa_dir=fake_soa_dir,
)
def test_bounce_chronos_job_takes_actions(self):
fake_job_to_update = {'name': 'job_to_update'}
with contextlib.nested(
mock.patch("paasta_tools.setup_chronos_job._log", autospec=True),
mock.patch("paasta_tools.chronos_tools.update_job", autospec=True),
) as (
mock_log,
mock_update_job,
):
setup_chronos_job.bounce_chronos_job(
service=self.fake_service,
instance=self.fake_instance,
cluster=self.fake_cluster,
job_to_update=fake_job_to_update,
client=self.fake_client,
)
mock_log.assert_any_call(
line=mock.ANY,
level='debug',
instance=self.fake_instance,
cluster=self.fake_cluster,
component='deploy',
service=self.fake_service,
)
mock_log.assert_any_call(
line="Updated Chronos job: job_to_update",
level='event',
instance=self.fake_instance,
cluster=self.fake_cluster,
component='deploy',
service=self.fake_service,
)
mock_update_job.assert_called_once_with(job=fake_job_to_update, client=self.fake_client)
def test_bounce_chronos_job_doesnt_log_when_nothing_to_do(self):
with contextlib.nested(
mock.patch("paasta_tools.setup_chronos_job._log", autospec=True),
mock.patch("paasta_tools.chronos_tools.update_job", autospec=True),
) as (
mock_log,
mock_update_job,
):
setup_chronos_job.bounce_chronos_job(
service=self.fake_service,
instance=self.fake_instance,
cluster=self.fake_cluster,
job_to_update=None,
client=self.fake_client,
)
assert not mock_log.called
assert not mock_update_job.called
| {
"content_hash": "711b6530668a79e90c38f1413e1e39c5",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 117,
"avg_line_length": 44.743142144638405,
"alnum_prop": 0.568721435737376,
"repo_name": "gstarnberger/paasta",
"id": "7683ec0c8ee0140574db6d3ab7b986b50d6ad762",
"size": "18542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_setup_chronos_job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cucumber",
"bytes": "44761"
},
{
"name": "Makefile",
"bytes": "6313"
},
{
"name": "Python",
"bytes": "1808732"
},
{
"name": "Shell",
"bytes": "15986"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.