repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
liu602348184/django
|
refs/heads/master
|
django/views/decorators/gzip.py
|
720
|
from django.middleware.gzip import GZipMiddleware
from django.utils.decorators import decorator_from_middleware
gzip_page = decorator_from_middleware(GZipMiddleware)
gzip_page.__doc__ = "Decorator for views that gzips pages if the client supports it."
|
glpatcern/rpyc
|
refs/heads/master
|
tests/test_teleportation.py
|
9
|
from __future__ import with_statement
import subprocess
import sys
import os
import rpyc
import unittest
from rpyc.utils.teleportation import export_function, import_function
from rpyc.utils.classic import teleport_function
def b(st):
if sys.version_info[0] >= 3:
return bytes(st, "latin-1")
else:
return st
def f(a):
def g(b):
return a + int(b)
return g
def h(a):
import os
return a * os.getpid()
class TeleportationTest(unittest.TestCase):
def setUp(self):
server_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "bin", "rpyc_classic.py")
self.proc = subprocess.Popen([sys.executable, server_file, "--mode=oneshot", "--host=localhost", "-p0"],
stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
line = self.proc.stdout.readline().strip()
if not line:
print (self.proc.stderr.read())
self.fail("server failed to start")
self.assertEqual(line, b("rpyc-oneshot"), "server failed to start")
host, port = self.proc.stdout.readline().strip().split(b("\t"))
self.conn = rpyc.classic.connect(host, int(port))
def tearDown(self):
self.conn.close()
def test(self):
exp = export_function(f)
f2 = import_function(exp)
self.assertEqual(f(6)(7), f2(6)(7))
# HACK: needed so the other side could import us (for globals)
mod = self.conn.modules.types.ModuleType(__name__)
self.conn.modules.sys.modules[__name__] = mod
mod.__builtins__ = self.conn.builtins
h2 = teleport_function(self.conn, h)
self.assertNotEqual(h(7), h2(7))
if __name__ == "__main__":
unittest.main()
|
adviti/melange
|
refs/heads/master
|
app/gdata/__init__.py
|
158
|
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes representing Google Data elements.
Extends Atom classes to add Google Data specific elements.
"""
__author__ = 'j.s@google.com (Jeffrey Scudder)'
import os
import atom
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
# XML namespaces which are often used in GData entities.
GDATA_NAMESPACE = 'http://schemas.google.com/g/2005'
GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s'
OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/'
OPENSEARCH_TEMPLATE = '{http://a9.com/-/spec/opensearchrss/1.0/}%s'
BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch'
GACL_NAMESPACE = 'http://schemas.google.com/acl/2007'
GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s'
# Labels used in batch request entries to specify the desired CRUD operation.
BATCH_INSERT = 'insert'
BATCH_UPDATE = 'update'
BATCH_DELETE = 'delete'
BATCH_QUERY = 'query'
class Error(Exception):
pass
class MissingRequiredParameters(Error):
pass
class MediaSource(object):
"""GData Entries can refer to media sources, so this class provides a
place to store references to these objects along with some metadata.
"""
def __init__(self, file_handle=None, content_type=None, content_length=None,
file_path=None, file_name=None):
"""Creates an object of type MediaSource.
Args:
file_handle: A file handle pointing to the file to be encapsulated in the
MediaSource
content_type: string The MIME type of the file. Required if a file_handle
is given.
content_length: int The size of the file. Required if a file_handle is
given.
file_path: string (optional) A full path name to the file. Used in
place of a file_handle.
file_name: string The name of the file without any path information.
Required if a file_handle is given.
"""
self.file_handle = file_handle
self.content_type = content_type
self.content_length = content_length
self.file_name = file_name
if (file_handle is None and content_type is not None and
file_path is not None):
self.setFile(file_path, content_type)
def setFile(self, file_name, content_type):
"""A helper function which can create a file handle from a given filename
and set the content type and length all at once.
Args:
file_name: string The path and file name to the file containing the media
content_type: string A MIME type representing the type of the media
"""
self.file_handle = open(file_name, 'rb')
self.content_type = content_type
self.content_length = os.path.getsize(file_name)
self.file_name = os.path.basename(file_name)
class LinkFinder(atom.LinkFinder):
"""An "interface" providing methods to find link elements
GData Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in GData entries.
"""
def GetSelfLink(self):
"""Find the first link with rel set to 'self'
Returns:
An atom.Link or none if none of the links had rel equal to 'self'
"""
for a_link in self.link:
if a_link.rel == 'self':
return a_link
return None
def GetEditLink(self):
for a_link in self.link:
if a_link.rel == 'edit':
return a_link
return None
def GetEditMediaLink(self):
"""The Picasa API mistakenly returns media-edit rather than edit-media, but
this may change soon.
"""
for a_link in self.link:
if a_link.rel == 'edit-media':
return a_link
if a_link.rel == 'media-edit':
return a_link
return None
def GetHtmlLink(self):
"""Find the first link with rel of alternate and type of text/html
Returns:
An atom.Link or None if no links matched
"""
for a_link in self.link:
if a_link.rel == 'alternate' and a_link.type == 'text/html':
return a_link
return None
def GetPostLink(self):
"""Get a link containing the POST target URL.
The POST target URL is used to insert new entries.
Returns:
A link object with a rel matching the POST type.
"""
for a_link in self.link:
if a_link.rel == 'http://schemas.google.com/g/2005#post':
return a_link
return None
def GetAclLink(self):
for a_link in self.link:
if a_link.rel == 'http://schemas.google.com/acl/2007#accessControlList':
return a_link
return None
def GetFeedLink(self):
for a_link in self.link:
if a_link.rel == 'http://schemas.google.com/g/2005#feed':
return a_link
return None
def GetNextLink(self):
for a_link in self.link:
if a_link.rel == 'next':
return a_link
return None
def GetPrevLink(self):
for a_link in self.link:
if a_link.rel == 'previous':
return a_link
return None
class TotalResults(atom.AtomBase):
"""opensearch:TotalResults for a GData feed"""
_tag = 'totalResults'
_namespace = OPENSEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def TotalResultsFromString(xml_string):
return atom.CreateClassFromXMLString(TotalResults, xml_string)
class StartIndex(atom.AtomBase):
"""The opensearch:startIndex element in GData feed"""
_tag = 'startIndex'
_namespace = OPENSEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def StartIndexFromString(xml_string):
return atom.CreateClassFromXMLString(StartIndex, xml_string)
class ItemsPerPage(atom.AtomBase):
"""The opensearch:itemsPerPage element in GData feed"""
_tag = 'itemsPerPage'
_namespace = OPENSEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ItemsPerPageFromString(xml_string):
return atom.CreateClassFromXMLString(ItemsPerPage, xml_string)
class ExtendedProperty(atom.AtomBase):
"""The Google Data extendedProperty element.
Used to store arbitrary key-value information specific to your
application. The value can either be a text string stored as an XML
attribute (.value), or an XML node (XmlBlob) as a child element.
This element is used in the Google Calendar data API and the Google
Contacts data API.
"""
_tag = 'extendedProperty'
_namespace = GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
def __init__(self, name=None, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GetXmlBlobExtensionElement(self):
"""Returns the XML blob as an atom.ExtensionElement.
Returns:
An atom.ExtensionElement representing the blob's XML, or None if no
blob was set.
"""
if len(self.extension_elements) < 1:
return None
else:
return self.extension_elements[0]
def GetXmlBlobString(self):
"""Returns the XML blob as a string.
Returns:
A string containing the blob's XML, or None if no blob was set.
"""
blob = self.GetXmlBlobExtensionElement()
if blob:
return blob.ToString()
return None
def SetXmlBlob(self, blob):
"""Sets the contents of the extendedProperty to XML as a child node.
Since the extendedProperty is only allowed one child element as an XML
blob, setting the XML blob will erase any preexisting extension elements
in this object.
Args:
blob: str, ElementTree Element or atom.ExtensionElement representing
the XML blob stored in the extendedProperty.
"""
# Erase any existing extension_elements, clears the child nodes from the
# extendedProperty.
self.extension_elements = []
if isinstance(blob, atom.ExtensionElement):
self.extension_elements.append(blob)
elif ElementTree.iselement(blob):
self.extension_elements.append(atom._ExtensionElementFromElementTree(
blob))
else:
self.extension_elements.append(atom.ExtensionElementFromString(blob))
def ExtendedPropertyFromString(xml_string):
return atom.CreateClassFromXMLString(ExtendedProperty, xml_string)
class GDataEntry(atom.Entry, LinkFinder):
"""Extends Atom Entry to provide data processing"""
_tag = atom.Entry._tag
_namespace = atom.Entry._namespace
_children = atom.Entry._children.copy()
_attributes = atom.Entry._attributes.copy()
def __GetId(self):
return self.__id
# This method was created to strip the unwanted whitespace from the id's
# text node.
def __SetId(self, id):
self.__id = id
if id is not None and id.text is not None:
self.__id.text = id.text.strip()
id = property(__GetId, __SetId)
def IsMedia(self):
"""Determines whether or not an entry is a GData Media entry.
"""
if (self.GetEditMediaLink()):
return True
else:
return False
def GetMediaURL(self):
"""Returns the URL to the media content, if the entry is a media entry.
Otherwise returns None.
"""
if not self.IsMedia():
return None
else:
return self.content.src
def GDataEntryFromString(xml_string):
"""Creates a new GDataEntry instance given a string of XML."""
return atom.CreateClassFromXMLString(GDataEntry, xml_string)
class GDataFeed(atom.Feed, LinkFinder):
"""A Feed from a GData service"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = atom.Feed._children.copy()
_attributes = atom.Feed._attributes.copy()
_children['{%s}totalResults' % OPENSEARCH_NAMESPACE] = ('total_results',
TotalResults)
_children['{%s}startIndex' % OPENSEARCH_NAMESPACE] = ('start_index',
StartIndex)
_children['{%s}itemsPerPage' % OPENSEARCH_NAMESPACE] = ('items_per_page',
ItemsPerPage)
# Add a conversion rule for atom:entry to make it into a GData
# Entry.
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GDataEntry])
def __GetId(self):
return self.__id
def __SetId(self, id):
self.__id = id
if id is not None and id.text is not None:
self.__id.text = id.text.strip()
id = property(__GetId, __SetId)
def __GetGenerator(self):
return self.__generator
def __SetGenerator(self, generator):
self.__generator = generator
if generator is not None:
self.__generator.text = generator.text.strip()
generator = property(__GetGenerator, __SetGenerator)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
total_results=None, start_index=None, items_per_page=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
entry: list (optional) A list of the Entry instances contained in the
feed.
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.entry = entry or []
self.total_results = total_results
self.start_index = start_index
self.items_per_page = items_per_page
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GDataFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GDataFeed, xml_string)
class BatchId(atom.AtomBase):
_tag = 'id'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def BatchIdFromString(xml_string):
return atom.CreateClassFromXMLString(BatchId, xml_string)
class BatchOperation(atom.AtomBase):
_tag = 'operation'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['type'] = 'type'
def __init__(self, op_type=None, extension_elements=None,
extension_attributes=None,
text=None):
self.type = op_type
atom.AtomBase.__init__(self,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchOperationFromString(xml_string):
return atom.CreateClassFromXMLString(BatchOperation, xml_string)
class BatchStatus(atom.AtomBase):
"""The batch:status element present in a batch response entry.
A status element contains the code (HTTP response code) and
reason as elements. In a single request these fields would
be part of the HTTP response, but in a batch request each
Entry operation has a corresponding Entry in the response
feed which includes status information.
See http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_tag = 'status'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['code'] = 'code'
_attributes['reason'] = 'reason'
_attributes['content-type'] = 'content_type'
def __init__(self, code=None, reason=None, content_type=None,
extension_elements=None, extension_attributes=None, text=None):
self.code = code
self.reason = reason
self.content_type = content_type
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchStatusFromString(xml_string):
return atom.CreateClassFromXMLString(BatchStatus, xml_string)
class BatchEntry(GDataEntry):
"""An atom:entry for use in batch requests.
The BatchEntry contains additional members to specify the operation to be
performed on this entry and a batch ID so that the server can reference
individual operations in the response feed. For more information, see:
http://code.google.com/apis/gdata/batch.html
"""
_tag = GDataEntry._tag
_namespace = GDataEntry._namespace
_children = GDataEntry._children.copy()
_children['{%s}operation' % BATCH_NAMESPACE] = ('batch_operation', BatchOperation)
_children['{%s}id' % BATCH_NAMESPACE] = ('batch_id', BatchId)
_children['{%s}status' % BATCH_NAMESPACE] = ('batch_status', BatchStatus)
_attributes = GDataEntry._attributes.copy()
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
batch_operation=None, batch_id=None, batch_status=None,
extension_elements=None, extension_attributes=None, text=None):
self.batch_operation = batch_operation
self.batch_id = batch_id
self.batch_status = batch_status
GDataEntry.__init__(self, author=author, category=category,
content=content, contributor=contributor, atom_id=atom_id, link=link,
published=published, rights=rights, source=source, summary=summary,
control=control, title=title, updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
def BatchEntryFromString(xml_string):
return atom.CreateClassFromXMLString(BatchEntry, xml_string)
class BatchInterrupted(atom.AtomBase):
"""The batch:interrupted element sent if batch request was interrupted.
Only appears in a feed if some of the batch entries could not be processed.
See: http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_tag = 'interrupted'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['reason'] = 'reason'
_attributes['success'] = 'success'
_attributes['failures'] = 'failures'
_attributes['parsed'] = 'parsed'
def __init__(self, reason=None, success=None, failures=None, parsed=None,
extension_elements=None, extension_attributes=None, text=None):
self.reason = reason
self.success = success
self.failures = failures
self.parsed = parsed
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchInterruptedFromString(xml_string):
return atom.CreateClassFromXMLString(BatchInterrupted, xml_string)
class BatchFeed(GDataFeed):
"""A feed containing a list of batch request entries."""
_tag = GDataFeed._tag
_namespace = GDataFeed._namespace
_children = GDataFeed._children.copy()
_attributes = GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchEntry])
_children['{%s}interrupted' % BATCH_NAMESPACE] = ('interrupted', BatchInterrupted)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
total_results=None, start_index=None, items_per_page=None,
interrupted=None,
extension_elements=None, extension_attributes=None, text=None):
self.interrupted = interrupted
GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results, start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def AddBatchEntry(self, entry=None, id_url_string=None,
batch_id_string=None, operation_string=None):
"""Logic for populating members of a BatchEntry and adding to the feed.
If the entry is not a BatchEntry, it is converted to a BatchEntry so
that the batch specific members will be present.
The id_url_string can be used in place of an entry if the batch operation
applies to a URL. For example query and delete operations require just
the URL of an entry, no body is sent in the HTTP request. If an
id_url_string is sent instead of an entry, a BatchEntry is created and
added to the feed.
This method also assigns the desired batch id to the entry so that it
can be referenced in the server's response. If the batch_id_string is
None, this method will assign a batch_id to be the index at which this
entry will be in the feed's entry list.
Args:
entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The
entry which will be sent to the server as part of the batch request.
The item must have a valid atom id so that the server knows which
entry this request references.
id_url_string: str (optional) The URL of the entry to be acted on. You
can find this URL in the text member of the atom id for an entry.
If an entry is not sent, this id will be used to construct a new
BatchEntry which will be added to the request feed.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
operation_string: str (optional) The desired batch operation which will
set the batch_operation.type member of the entry. Options are
'insert', 'update', 'delete', and 'query'
Raises:
MissingRequiredParameters: Raised if neither an id_ url_string nor an
entry are provided in the request.
Returns:
The added entry.
"""
if entry is None and id_url_string is None:
raise MissingRequiredParameters('supply either an entry or URL string')
if entry is None and id_url_string is not None:
entry = BatchEntry(atom_id=atom.Id(text=id_url_string))
# TODO: handle cases in which the entry lacks batch_... members.
#if not isinstance(entry, BatchEntry):
# Convert the entry to a batch entry.
if batch_id_string is not None:
entry.batch_id = BatchId(text=batch_id_string)
elif entry.batch_id is None or entry.batch_id.text is None:
entry.batch_id = BatchId(text=str(len(self.entry)))
if operation_string is not None:
entry.batch_operation = BatchOperation(op_type=operation_string)
self.entry.append(entry)
return entry
def AddInsert(self, entry, batch_id_string=None):
"""Add an insert request to the operations in this batch request feed.
If the entry doesn't yet have an operation or a batch id, these will
be set to the insert operation and a batch_id specified as a parameter.
Args:
entry: BatchEntry The entry which will be sent in the batch feed as an
insert request.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
"""
entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_INSERT)
def AddUpdate(self, entry, batch_id_string=None):
"""Add an update request to the list of batch operations in this feed.
Sets the operation type of the entry to insert if it is not already set
and assigns the desired batch id to the entry so that it can be
referenced in the server's response.
Args:
entry: BatchEntry The entry which will be sent to the server as an
update (HTTP PUT) request. The item must have a valid atom id
so that the server knows which entry to replace.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. See also comments for AddInsert.
"""
entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_UPDATE)
def AddDelete(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a delete request to the batch request feed.
This method takes either the url_string which is the atom id of the item
to be deleted, or the entry itself. The atom id of the entry must be
present so that the server knows which entry should be deleted.
Args:
url_string: str (optional) The URL of the entry to be deleted. You can
find this URL in the text member of the atom id for an entry.
entry: BatchEntry (optional) The entry to be deleted.
batch_id_string: str (optional)
Raises:
MissingRequiredParameters: Raised if neither a url_string nor an entry
are provided in the request.
"""
entry = self.AddBatchEntry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string,
operation_string=BATCH_DELETE)
def AddQuery(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a query request to the batch request feed.
This method takes either the url_string which is the query URL
whose results will be added to the result feed. The query URL will
be encapsulated in a BatchEntry, and you may pass in the BatchEntry
with a query URL instead of sending a url_string.
Args:
url_string: str (optional)
entry: BatchEntry (optional)
batch_id_string: str (optional)
Raises:
MissingRequiredParameters
"""
entry = self.AddBatchEntry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string,
operation_string=BATCH_QUERY)
def GetBatchLink(self):
for link in self.link:
if link.rel == 'http://schemas.google.com/g/2005#batch':
return link
return None
def BatchFeedFromString(xml_string):
return atom.CreateClassFromXMLString(BatchFeed, xml_string)
class EntryLink(atom.AtomBase):
"""The gd:entryLink element"""
_tag = 'entryLink'
_namespace = GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
# The entry used to be an atom.Entry, now it is a GDataEntry.
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', GDataEntry)
_attributes['rel'] = 'rel'
_attributes['readOnly'] = 'read_only'
_attributes['href'] = 'href'
def __init__(self, href=None, read_only=None, rel=None,
entry=None, extension_elements=None,
extension_attributes=None, text=None):
self.href = href
self.read_only = read_only
self.rel = rel
self.entry = entry
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EntryLinkFromString(xml_string):
return atom.CreateClassFromXMLString(EntryLink, xml_string)
class FeedLink(atom.AtomBase):
"""The gd:feedLink element"""
_tag = 'feedLink'
_namespace = GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}feed' % atom.ATOM_NAMESPACE] = ('feed', GDataFeed)
_attributes['rel'] = 'rel'
_attributes['readOnly'] = 'read_only'
_attributes['countHint'] = 'count_hint'
_attributes['href'] = 'href'
def __init__(self, count_hint=None, href=None, read_only=None, rel=None,
feed=None, extension_elements=None, extension_attributes=None,
text=None):
self.count_hint = count_hint
self.href = href
self.read_only = read_only
self.rel = rel
self.feed = feed
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def FeedLinkFromString(xml_string):
return atom.CreateClassFromXMLString(FeedLink, xml_string)
|
pyblish/pyblish-win
|
refs/heads/master
|
lib/Python27/Lib/logging/__init__.py
|
26
|
# Copyright 2001-2014 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, cStringIO, traceback, warnings, weakref, collections
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning']
try:
import codecs
except ImportError:
codecs = None
try:
import thread
import threading
except ImportError:
thread = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
# Note: the attributes below are no longer maintained.
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
try:
unicode
_unicode = True
except NameError:
_unicode = False
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
# done filching
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = 1
#
# If you don't want threading information in the log, set this to zero
#
logThreads = 1
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = 1
#
# If you don't want process information in the log, set this to zero
#
logProcesses = 1
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, (int, long)):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if thread:
_lock = threading.RLock()
else:
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
# Issue #21172: a request was made to relax the isinstance check
# to hasattr(args[0], '__getitem__'). However, the docs on string
# formatting still seem to suggest a mapping object is required.
# Thus, while not removing the isinstance check, it does now look
# for collections.Mapping rather than, as before, dict.
if (args and len(args) == 1 and isinstance(args[0], collections.Mapping)
and args[0]):
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - long(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and thread:
self.thread = thread.get_ident()
self.threadName = threading.current_thread().name
else:
self.thread = None
self.threadName = None
if not logMultiprocessing:
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except StandardError:
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
if not _unicode: #if no unicode support...
msg = str(self.msg)
else:
msg = self.msg
if not isinstance(msg, basestring):
try:
msg = str(self.msg)
except UnicodeError:
msg = self.msg #Defer encoding till later
if self.args:
msg = msg % self.args
return msg
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = LogRecord(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)\\n" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
"""
if fmt:
self._fmt = fmt
else:
self._fmt = "%(message)s"
self.datefmt = datefmt
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
s = "%s,%03d" % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = cStringIO.StringIO()
traceback.print_exception(ei[0], ei[1], ei[2], None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._fmt.find("%(asctime)") >= 0
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self._fmt % record.__dict__
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
try:
s = s + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
s = s + record.exc_text.decode(sys.getfilesystemencoding(),
'replace')
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return 1
elif self.name == record.name:
return 1
elif record.name.find(self.name, 0, self.nlen) != 0:
return 0
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
"""
rv = 1
for f in self.filters:
if not f.filter(record):
rv = 0
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. It can also be called from another thread. So we need to
# pre-emptively grab the necessary globals and check if they're None,
# to prevent race conditions and failures during interpreter shutdown.
acquire, release, handlers = _acquireLock, _releaseLock, _handlerList
if acquire and release and handlers:
acquire()
try:
if wr in handlers:
handlers.remove(wr)
finally:
release()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if thread:
self.lock = threading.RLock()
else:
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError:
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s\n"
if not _unicode: #if no unicode support...
stream.write(fs % msg)
else:
try:
if (isinstance(msg, unicode) and
getattr(stream, 'encoding', None)):
ufs = u'%s\n'
try:
stream.write(ufs % msg)
except UnicodeEncodeError:
#Printing to terminals sometimes fails. For example,
#with an encoding of 'cp1251', the above write will
#work if written to a stream opened or wrapped by
#the codecs module, but fail when writing to a
#terminal even when the codepage is set to cp1251.
#An extra encoding step seems to be needed.
stream.write((ufs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
if codecs is None:
encoding = None
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
self.stream = None
# Issue #19523: call unconditionally to
# prevent a handler leak when delay is set
StreamHandler.close(self)
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
if self.encoding is None:
stream = open(self.baseFilename, self.mode)
else:
stream = codecs.open(self.baseFilename, self.mode, self.encoding)
return stream
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
#self.loggers = [alogger]
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
#if alogger not in self.loggers:
if alogger not in self.loggerMap:
#self.loggers.append(alogger)
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = 0
self.loggerDict = {}
self.loggerClass = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, basestring):
raise TypeError('A logger name must be string or Unicode')
if isinstance(name, unicode):
name = name.encode('utf-8')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = 1
self.handlers = []
self.disabled = 0
def setLevel(self, level):
"""
Set the logging level of this logger.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = 1
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func = self.findCaller()
except ValueError:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = 1
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return 0
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.error(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
kwargs["exc_info"] = 1
self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.critical(msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
def isEnabledFor(self, level):
"""
See if the underlying logger is enabled for the specified level.
"""
return self.logger.isEnabledFor(level)
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
hdlr = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
hdlr = StreamHandler(stream)
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
fmt = Formatter(fs, dfs)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
#def getRootLogger():
# """
# Return the root logger.
#
# Note that getLogger('') now does the same thing, so this function is
# deprecated and may disappear in the future.
# """
# return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger,
with exception information.
"""
kwargs['exc_info'] = 1
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
warn = warning
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
tweemeterjop/thug
|
refs/heads/master
|
thug/ActiveX/modules/CGAgent.py
|
1
|
# Chinagames iGame CGAgent ActiveX Control Buffer Overflow
# CVE-2009-1800
import logging
log = logging.getLogger("Thug")
def CreateChinagames(self, arg0):
if len(arg0) > 428:
log.ThugLogging.log_exploit_event(self._window.url,
"CGAgent ActiveX",
"CreateChinagames Method Buffer Overflow",
cve = 'CVE-2009-1800')
|
pacoqueen/ginn
|
refs/heads/master
|
extra/install/ipython2/ipython-5.10.0/IPython/core/magics/display.py
|
8
|
"""Simple magics for display formats"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Our own packages
from IPython.core.display import display, Javascript, Latex, SVG, HTML
from IPython.core.magic import (
Magics, magics_class, cell_magic
)
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
@magics_class
class DisplayMagics(Magics):
"""Magics for displaying various output types with literals
Defines javascript/latex/svg/html cell magics for writing
blocks in those languages, to be rendered in the frontend.
"""
@cell_magic
def js(self, line, cell):
"""Run the cell block of Javascript code
Alias of `%%javascript`
"""
self.javascript(line, cell)
@cell_magic
def javascript(self, line, cell):
"""Run the cell block of Javascript code"""
display(Javascript(cell))
@cell_magic
def latex(self, line, cell):
"""Render the cell as a block of latex
The subset of latex which is support depends on the implementation in
the client. In the Jupyter Notebook, this magic only renders the subset
of latex defined by MathJax
[here](https://docs.mathjax.org/en/v2.5-latest/tex.html)."""
display(Latex(cell))
@cell_magic
def svg(self, line, cell):
"""Render the cell as an SVG literal"""
display(SVG(cell))
@cell_magic
def html(self, line, cell):
"""Render the cell as a block of HTML"""
display(HTML(cell))
|
hunering/demo-code
|
refs/heads/master
|
python/language/4-control-flow-for.py
|
1
|
words = ['cat', 'window', 'defenestrate']
for word in words:
print(word, " length is ", len(word))
# the follwoing code cause infinite loop
# for word in words:
# if len(word) > 6:
# words.insert(0, "asfdsafd")
for word in words[:]:
if len(word) > 6:
words.insert(0, "asfdsafd")
for i, v in enumerate(['tic', 'tac', 'toe']):
print(i, v)
|
Kagee/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/hearthisat.py
|
108
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
HEADRequest,
str_to_int,
urlencode_postdata,
urlhandle_detect_ext,
)
class HearThisAtIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hearthis\.at/(?P<artist>[^/]+)/(?P<title>[A-Za-z0-9\-]+)/?$'
_PLAYLIST_URL = 'https://hearthis.at/playlist.php'
_TEST = {
'url': 'https://hearthis.at/moofi/dr-kreep',
'md5': 'ab6ec33c8fed6556029337c7885eb4e0',
'info_dict': {
'id': '150939',
'ext': 'wav',
'title': 'Moofi - Dr. Kreep',
'thumbnail': 're:^https?://.*\.jpg$',
'timestamp': 1421564134,
'description': 'Creepy Patch. Mutable Instruments Braids Vowel + Formant Mode.',
'upload_date': '20150118',
'comment_count': int,
'view_count': int,
'like_count': int,
'duration': 71,
'categories': ['Experimental'],
}
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
display_id = '{artist:s} - {title:s}'.format(**m.groupdict())
webpage = self._download_webpage(url, display_id)
track_id = self._search_regex(
r'intTrackId\s*=\s*(\d+)', webpage, 'track ID')
payload = urlencode_postdata({'tracks[]': track_id})
req = compat_urllib_request.Request(self._PLAYLIST_URL, payload)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
track = self._download_json(req, track_id, 'Downloading playlist')[0]
title = '{artist:s} - {title:s}'.format(**track)
categories = None
if track.get('category'):
categories = [track['category']]
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
meta_span = r'<span[^>]+class="%s".*?</i>([^<]+)</span>'
view_count = str_to_int(self._search_regex(
meta_span % 'plays_count', webpage, 'view count', fatal=False))
like_count = str_to_int(self._search_regex(
meta_span % 'likes_count', webpage, 'like count', fatal=False))
comment_count = str_to_int(self._search_regex(
meta_span % 'comment_count', webpage, 'comment count', fatal=False))
duration = str_to_int(self._search_regex(
r'data-length="(\d+)', webpage, 'duration', fatal=False))
timestamp = str_to_int(self._search_regex(
r'<span[^>]+class="calctime"[^>]+data-time="(\d+)', webpage, 'timestamp', fatal=False))
formats = []
mp3_url = self._search_regex(
r'(?s)<a class="player-link"\s+(?:[a-zA-Z0-9_:-]+="[^"]+"\s+)*?data-mp3="([^"]+)"',
webpage, 'mp3 URL', fatal=False)
if mp3_url:
formats.append({
'format_id': 'mp3',
'vcodec': 'none',
'acodec': 'mp3',
'url': mp3_url,
})
download_path = self._search_regex(
r'<a class="[^"]*download_fct[^"]*"\s+href="([^"]+)"',
webpage, 'download URL', default=None)
if download_path:
download_url = compat_urlparse.urljoin(url, download_path)
ext_req = HEADRequest(download_url)
ext_handle = self._request_webpage(
ext_req, display_id, note='Determining extension')
ext = urlhandle_detect_ext(ext_handle)
formats.append({
'format_id': 'download',
'vcodec': 'none',
'ext': ext,
'url': download_url,
'preference': 2, # Usually better quality
})
self._sort_formats(formats)
return {
'id': track_id,
'display_id': display_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
'timestamp': timestamp,
'view_count': view_count,
'comment_count': comment_count,
'like_count': like_count,
'categories': categories,
}
|
larsoner/mne-python
|
refs/heads/master
|
mne/io/nihon/__init__.py
|
12
|
"""Nihon Kohden module for conversion to FIF."""
# Author: Fede Raimondo <federaimondo@gmail.com>
#
# License: BSD (3-clause)
from .nihon import read_raw_nihon
|
youfoh/TizenProject
|
refs/heads/tizen
|
tools/perf/tests/attr.py
|
3174
|
#! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
|
uethackathon05/uethackathon05.github.io
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py
|
1446
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalDependencies_excluded': 'file3',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'true',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
|
mrquim/repository.mrquim
|
refs/heads/master
|
repo/script.module.youtube.dl/lib/youtube_dl/extractor/dhm.py
|
64
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import parse_duration
class DHMIE(InfoExtractor):
IE_DESC = 'Filmarchiv - Deutsches Historisches Museum'
_VALID_URL = r'https?://(?:www\.)?dhm\.de/filmarchiv/(?:[^/]+/)+(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.dhm.de/filmarchiv/die-filme/the-marshallplan-at-work-in-west-germany/',
'md5': '11c475f670209bf6acca0b2b7ef51827',
'info_dict': {
'id': 'the-marshallplan-at-work-in-west-germany',
'ext': 'flv',
'title': 'MARSHALL PLAN AT WORK IN WESTERN GERMANY, THE',
'description': 'md5:1fabd480c153f97b07add61c44407c82',
'duration': 660,
'thumbnail': r're:^https?://.*\.jpg$',
},
}, {
'url': 'http://www.dhm.de/filmarchiv/02-mapping-the-wall/peter-g/rolle-1/',
'md5': '09890226332476a3e3f6f2cb74734aa5',
'info_dict': {
'id': 'rolle-1',
'ext': 'flv',
'title': 'ROLLE 1',
'thumbnail': r're:^https?://.*\.jpg$',
},
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist_url = self._search_regex(
r"file\s*:\s*'([^']+)'", webpage, 'playlist url')
entries = self._extract_xspf_playlist(playlist_url, playlist_id)
title = self._search_regex(
[r'dc:title="([^"]+)"', r'<title> »([^<]+)</title>'],
webpage, 'title').strip()
description = self._html_search_regex(
r'<p><strong>Description:</strong>(.+?)</p>',
webpage, 'description', default=None)
duration = parse_duration(self._search_regex(
r'<em>Length\s*</em>\s*:\s*</strong>([^<]+)',
webpage, 'duration', default=None))
entries[0].update({
'title': title,
'description': description,
'duration': duration,
})
return self.playlist_result(entries, playlist_id)
|
seg/2016-ml-contest
|
refs/heads/master
|
rkappius/facies_w_tf_submit.py
|
1
|
################################################################################
#
# Facies Classification using ML in (Google) TensorFlow
#
# Russell A. Kappius
# Kappius Consulting LLC
# Supported by Sterling Seismic Services
#
# January 18, 2017
#
################################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import numpy as np
#np.random.seed(0)
# Input from csv files
################################################################################
# 2. Current method: Use 'facies_vectors.csv' and 'validation_data_nofacies.csv'
################################################################################
training_data = pd.read_csv('facies_vectors.csv')
test_data = pd.read_csv('validation_data_nofacies.csv')
# isolate training vectors & labels, and test vectors & create labels as just 1
all_vectors = training_data.drop(['Facies','Formation', 'Well Name', 'Depth'], axis=1)
all_labels = training_data['Facies'].values
# Remove NaNs
nan_idx = np.any(np.isnan(all_vectors), axis=1)
training_vectors = all_vectors[np.logical_not(nan_idx)]
training_labels = all_labels [np.logical_not(nan_idx)]
test_vectors = test_data.drop(['Formation', 'Well Name', 'Depth'], axis=1)
test_labels = np.ones(test_vectors.shape[0], dtype=np.int)
################################################################################
################################################################################
# Scale feature vectors
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(training_vectors)
scaled_training_vectors = scaler.transform(training_vectors)
test_scaler = preprocessing.StandardScaler().fit(test_vectors)
scaled_test_vectors = test_scaler.transform(test_vectors)
################################################################################
################################################################################
# use (my) DataSet class to provide 'next_batch' functionality to TensorFlow
# Also changes labels to 'one-hot' 2D arrays
import DataSet
training_dataset = DataSet.load_dataset(scaled_training_vectors,training_labels)
test_dataset = DataSet.load_dataset(scaled_test_vectors,test_labels)
################################################################################
################################################################################
# Solve with (Google) TensorFlow
import tensorflow as tf
# Create the model
# 7 elements in each feature vector, 9 possible facies
x = tf.placeholder(tf.float32, [None, 7])
W = tf.Variable(tf.zeros([7, 9]))
b = tf.Variable(tf.zeros([9]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None,9])
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# create a session
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
# Train
for _ in range(1000):
batch_xs, batch_ys = training_dataset.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# produce unknown labels
run_test = tf.argmax(y,1)
test_labels = \
sess.run(run_test, \
feed_dict={x: test_dataset.feature_vectors, y_: test_dataset.labels})
# save predicted labels
test_data['Facies'] = test_labels
test_data.to_csv('PredictedResults.csv')
#print(test_labels)
print('done')
################################################################################
|
sivakuna-aap/superdesk-content-api
|
refs/heads/master
|
content_api/publish/service.py
|
2
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from eve.utils import config
from superdesk.services import BaseService
logger = logging.getLogger(__name__)
class PublishService(BaseService):
"""
A service for publishing to the content api.
Serves mainly as a proxy to the data layer.
"""
def create(self, docs, **kwargs):
ids = []
for doc in docs:
doc[config.ID_FIELD] = doc['guid']
del doc['guid']
_id = doc[config.ID_FIELD]
original = self.find_one(req=None, _id=_id)
if original:
self.update(_id, doc, original)
ids.append(_id)
else:
ids.extend(super().create([doc], **kwargs))
return ids
|
TheTypoMaster/my-vim-set-mac
|
refs/heads/master
|
.vim/bundle/YouCompleteMe/third_party/ycmd/third_party/jedi/test/static_analysis/descriptors.py
|
28
|
# classmethod
class TarFile():
@classmethod
def open(cls, name, **kwargs):
return cls.taropen(name, **kwargs)
@classmethod
def taropen(cls, name, **kwargs):
return name
# should just work
TarFile.open('hallo')
|
alhashash/yowsup
|
refs/heads/master
|
yowsup/layers/protocol_groups/protocolentities/iq_groups_leave_success.py
|
39
|
from yowsup.structs import ProtocolTreeNode
from yowsup.layers.protocol_iq.protocolentities import ResultIqProtocolEntity
class SuccessLeaveGroupsIqProtocolEntity(ResultIqProtocolEntity):
'''
<iq type="result" from="g.us" id="{{ID}}">
<leave>
<group id="{{GROUP_JID}}"></group>
</leave>
</iq>
'''
def __init__(self, _id, groupId):
super(SuccessLeaveGroupsIqProtocolEntity, self).\
__init__(_from="g.us", _id=_id)
self.setProps(groupId)
def setProps(self, groupId):
self.groupId = groupId
def __str__(self):
out = super(SuccessLeaveGroupsIqProtocolEntity, self).__str__()
out += "Group Id: %s\n" % self.groupId
return out
def toProtocolTreeNode(self):
node = super(SuccessLeaveGroupsIqProtocolEntity, self).\
toProtocolTreeNode()
leaveNode = ProtocolTreeNode(
"leave", {}, [ProtocolTreeNode("group", {"id": self.groupId})]
)
node.addChild(leaveNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = super(SuccessLeaveGroupsIqProtocolEntity, SuccessLeaveGroupsIqProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = SuccessLeaveGroupsIqProtocolEntity
entity.setProps(
node.getChild("leave").getChild("group").getAttributeValue("id")
)
return entity
|
noslenfa/tdjangorest
|
refs/heads/master
|
uw/lib/python2.7/site-packages/IPython/kernel/blocking/__init__.py
|
28
|
from .client import BlockingKernelClient
|
r8o8s1e0/ChromeWebLab
|
refs/heads/master
|
Orchestra/sw/hub/orchestra/messenger/format.py
|
7
|
#
# format.py: encode/decode messages from Node.js server's
# byte-compressed format
#
# See MessageEncoder.js in server source for message formats.
#
# Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.python import log
# Message ID constants (PARTIAL)
# See MessageIds.js in server source
#
RECEIVED_MESSAGE_IDS = {
0x10: 'add_note',
0x12: 'change_note',
0x14: 'remove_note',
0x0B: 'current_layout',
0x71: 'error'
}
SENT_MESSAGE_IDS = {
'note_added': 0x11,
'note_changed': 0x13,
'note_removed': 0x15,
'loop_times': 0x20
}
LAYOUT_BYTES_PER_NOTE = 3
def decode_message_type(message):
"""
Get message type from 0th byte of message
"""
try:
message_id = ord(message[0])
return RECEIVED_MESSAGE_IDS[message_id]
except KeyError:
log.err("Message ID not recognized (%d)" % message_id)
return None
def decode_layout(message):
"""
Loop through each note (i.e. blob) of each instrument in layout,
producing a dict in the format sequencer expects.
"""
instrument_index = 1
instruments = []
# For each instrument...
while instrument_index < len(message):
num_notes = ord(message[instrument_index])
note_index = instrument_index + 1
next_instrument_index = note_index + (num_notes * LAYOUT_BYTES_PER_NOTE)
notes = []
# For each note...
while note_index < next_instrument_index:
notes.append({
'id': ord(message[note_index]),
'pos': ord(message[note_index + 1]),
'pitch': ord(message[note_index + 2])
})
note_index += LAYOUT_BYTES_PER_NOTE
instruments.append(notes)
instrument_index = next_instrument_index
return instruments
def decode_note(message):
"""
Used by note update methods (add/change/remove).
Only the latter omits the pitch/pos, and therefore is shorter.
"""
instrument_id = ord(message[1])
note = {
'id': ord(message[2])
}
if len(message) > 3:
note['pos'] = ord(message[3])
note['pitch'] = ord(message[4])
return instrument_id, note
def encode_note_confirmation(message_type, message, note_time):
"""
Sling request back to Node.js with some mods to format it
as a response (i.e. confirmation).
"""
response = ""
# translate request message ID to response ID
if message_type is 'change_note':
response = chr(SENT_MESSAGE_IDS['note_changed'])
elif message_type is 'add_note':
response = chr(SENT_MESSAGE_IDS['note_added'])
elif message_type is 'remove_note':
response = chr(SENT_MESSAGE_IDS['note_removed'])
# just pass the note params back verbatim
if message_type is 'remove_note':
response += message[1:3]
else:
response += message[1:5]
if note_time:
response += encode_date(note_time)
return response
def encode_loop_times(loop_times):
"""
Encode an array of timestamps
"""
message = chr(SENT_MESSAGE_IDS['loop_times'])
for time in loop_times:
message += encode_date(time)
return message
def encode_date(note_time):
"""
Compress time (epoch time in millis) into 6 bytes.
Ported from MessageEncoder.encodeDate in web client source
"""
time_string = ""
for b in range(6):
one_byte = note_time & 0x7F
time_string = chr(one_byte) + time_string
note_time = (note_time - one_byte) / 128
return time_string
|
adamtornhill/maat-scripts
|
refs/heads/master
|
miner/complexity_calculations.py
|
1
|
import re
######################################################################
## Complexity calculations
######################################################################
leading_tabs_expr = re.compile(r'^(\t+)')
leading_spaces_expr = re.compile(r'^( +)')
empty_line_expr = re.compile(r'^\s*$')
def n_log_tabs(line):
pattern = re.compile(r' +')
wo_spaces = re.sub(pattern, '', line)
m = leading_tabs_expr.search(wo_spaces)
if m:
tabs = m.group()
return len(tabs)
return 0
def n_log_spaces(line):
pattern = re.compile(r'\t+')
wo_tabs = re.sub(pattern, '', line)
m = leading_spaces_expr.search(wo_tabs)
if m:
spaces = m.group()
return len(spaces)
return 0
def contains_code(line):
return not empty_line_expr.match(line)
def complexity_of(line):
return n_log_tabs(line) + (n_log_spaces(line) / 4) # hardcoded indentation
######################################################################
## Statistics from complexity
######################################################################
def calculate_complexity_in(source):
return [complexity_of(line) for line in source.split("\n") if contains_code(line)]
|
achang97/YouTunes
|
refs/heads/master
|
lib/python2.7/_abcoll.py
|
62
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
via collections; they are defined here only to alleviate certain
bootstrapping issues. Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
]
### ONE-TRICK PONIES ###
def _hasattr(C, attr):
try:
return any(attr in B.__dict__ for B in C.__mro__)
except AttributeError:
# Old-style class
return hasattr(C, attr)
class Hashable:
__metaclass__ = ABCMeta
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
try:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
except AttributeError:
# Old-style class
if getattr(C, "__hash__", None):
return True
return NotImplemented
class Iterable:
__metaclass__ = ABCMeta
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if _hasattr(C, "__iter__"):
return True
return NotImplemented
Iterable.register(str)
class Iterator(Iterable):
@abstractmethod
def next(self):
'Return the next item from the iterator. When exhausted, raise StopIteration'
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if _hasattr(C, "next") and _hasattr(C, "__iter__"):
return True
return NotImplemented
class Sized:
__metaclass__ = ABCMeta
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if _hasattr(C, "__len__"):
return True
return NotImplemented
class Container:
__metaclass__ = ABCMeta
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if _hasattr(C, "__contains__"):
return True
return NotImplemented
class Callable:
__metaclass__ = ABCMeta
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if _hasattr(C, "__call__"):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), redefine __le__ and __ge__,
then the other operations will automatically follow suit.
"""
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) > len(other) and self.__ge__(other)
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) < len(other):
return False
for elem in other:
if elem not in self:
return False
return True
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
__rand__ = __and__
def isdisjoint(self, other):
'Return True if two sets have a null intersection.'
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
__ror__ = __or__
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __rsub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in other
if value not in self)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
__rxor__ = __xor__
# Sets are not hashable by default, but subclasses can change this
__hash__ = None
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxint
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
"""A mutable set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__, __len__,
add(), and discard().
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
"""A Mapping is a generic container for associating key/value
pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __iter__, and __len__.
"""
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def iterkeys(self):
'D.iterkeys() -> an iterator over the keys of D'
return iter(self)
def itervalues(self):
'D.itervalues() -> an iterator over the values of D'
for key in self:
yield self[key]
def iteritems(self):
'D.iteritems() -> an iterator over the (key, value) items of D'
for key in self:
yield (key, self[key])
def keys(self):
"D.keys() -> list of D's keys"
return list(self)
def items(self):
"D.items() -> list of D's (key, value) pairs, as 2-tuples"
return [(key, self[key]) for key in self]
def values(self):
"D.values() -> list of D's values"
return [self[key] for key in self]
# Mappings are not hashable by default, but subclasses can change this
__hash__ = None
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
for key in self._mapping:
yield key
KeysView.register(type({}.viewkeys()))
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
ItemsView.register(type({}.viewitems()))
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
ValuesView.register(type({}.viewvalues()))
class MutableMapping(Mapping):
"""A MutableMapping is a generic container for associating
key/value pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __setitem__, __delitem__,
__iter__, and __len__.
"""
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
'''D.popitem() -> (k, v), remove and return some (key, value) pair
as a 2-tuple; but raise KeyError if D is empty.
'''
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
'D.clear() -> None. Remove all items from D.'
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k, v in F.items(): D[k] = v
'''
if not args:
raise TypeError("descriptor 'update' of 'MutableMapping' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('update expected at most 1 arguments, got %d' %
len(args))
if args:
other = args[0]
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D'
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
'''S.index(value) -> integer -- return first index of value.
Raises ValueError if the value is not present.
'''
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
'S.count(value) -> integer -- return number of occurrences of value'
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(basestring)
Sequence.register(buffer)
Sequence.register(xrange)
class MutableSequence(Sequence):
"""All the operations on a read-only sequence.
Concrete subclasses must provide __new__ or __init__,
__getitem__, __setitem__, __delitem__, __len__, and insert().
"""
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
'S.insert(index, object) -- insert object before index'
raise IndexError
def append(self, value):
'S.append(object) -- append object to the end of the sequence'
self.insert(len(self), value)
def reverse(self):
'S.reverse() -- reverse *IN PLACE*'
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
'S.extend(iterable) -- extend sequence by appending elements from the iterable'
for v in values:
self.append(v)
def pop(self, index=-1):
'''S.pop([index]) -> item -- remove and return item at index (default last).
Raise IndexError if list is empty or index is out of range.
'''
v = self[index]
del self[index]
return v
def remove(self, value):
'''S.remove(value) -- remove first occurrence of value.
Raise ValueError if the value is not present.
'''
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
|
omni5cience/django-inlineformfield
|
refs/heads/master
|
.tox/py27/lib/python2.7/site-packages/django/contrib/gis/geos/tests/test_geos.py
|
52
|
from __future__ import unicode_literals
import ctypes
import json
import random
import unittest
from unittest import skipUnless
from binascii import a2b_hex, b2a_hex
from io import BytesIO
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis import memoryview
from django.contrib.gis.geometry.test_data import TestDataMixin
from django.utils.encoding import force_bytes
from django.utils import six
from django.utils.six.moves import xrange
from .. import HAS_GEOS
if HAS_GEOS:
from .. import (GEOSException, GEOSIndexError, GEOSGeometry,
GeometryCollection, Point, MultiPoint, Polygon, MultiPolygon, LinearRing,
LineString, MultiLineString, fromfile, fromstr, geos_version_info)
from ..base import gdal, numpy, GEOSBase
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSTest(unittest.TestCase, TestDataMixin):
@property
def null_srid(self):
"""
Returns the proper null SRID depending on the GEOS version.
See the comments in `test_srid` for more details.
"""
info = geos_version_info()
if info['version'] == '3.0.0' and info['release_candidate']:
return -1
else:
return None
def test_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memmory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p(b'foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
if geom.hasz and geos_version_info()['version'] >= '3.3.0':
self.assertEqual(g.ewkt, geom.wkt)
def test_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex.decode())
def test_hexewkb(self):
"Testing (HEX)EWKB output."
# For testing HEX(EWKB).
ogc_hex = b'01010000000000000000000000000000000000F03F'
ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex_3d, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none. Also, GEOS has a
# a bug in versions prior to 3.1 that puts the X coordinate in
# place of Z; an exception should be raised on those versions.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
# Same for EWKB.
self.assertEqual(memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
self.assertEqual(memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml:
self.assertEqual(kml, geom.kml)
def test_errors(self):
"Testing the Error handlers."
# string-based
for err in self.geometries.errors:
with self.assertRaises((GEOSException, ValueError)):
fromstr(err.wkt)
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, memoryview(b'0'))
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test_wkb(self):
"Testing WKB output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).decode().upper(), g.hex)
def test_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_create_wkb(self):
"Testing creation from WKB."
for g in self.geometries.hex_wkt:
wkb = memoryview(a2b_hex(g.hex.encode()))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_json(self):
"Testing GeoJSON input/output (via GDAL)."
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test_fromfile(self):
"Testing the fromfile() factory."
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(force_bytes(ref_pnt.wkt))
wkb_f = BytesIO()
wkb_f.write(bytes(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo': 'bar'})
self.assertNotEqual(g, False)
def test_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) # Point individual arguments
if numpy:
self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy:
self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test_polygons_from_bbox(self):
"Testing `from_bbox` class method."
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
# Testing numerical precision
x = 3.14159265358979323
bbox = (0, 0, 1, x)
p = Polygon.from_bbox(bbox)
y = p.extent[-1]
self.assertEqual(format(x, '.13f'), format(y, '.13f'))
def test_polygons(self):
"Testing Polygon objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test_polygon_comparison(self):
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0)))
self.assertTrue(p1 > p2)
self.assertFalse(p1 < p2)
self.assertFalse(p2 > p1)
self.assertTrue(p2 < p1)
p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0)))
p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0)))
self.assertFalse(p4 < p3)
self.assertTrue(p3 < p4)
self.assertTrue(p4 > p3)
self.assertFalse(p3 > p4)
def test_multipolygons(self):
"Testing MultiPolygon objects."
fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
def test_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
#### Memory issues with rings and polygons
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
str(ring1)
str(ring2)
def test_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in xrange(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2:
tset = (5, 23)
else:
tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in xrange(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in xrange(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly:
self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)):
self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
# In GEOS 3.0.0rc1-4 when the EWKB and/or HEXEWKB is exported,
# the SRID information is lost and set to -1 -- this is not a
# problem on the 3.0.0 version (another reason to upgrade).
exp_srid = self.null_srid
p2 = fromstr(p1.hex)
self.assertEqual(exp_srid, p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_custom_srid(self):
""" Test with a srid unknown from GDAL """
pnt = Point(111200, 220900, srid=999999)
self.assertTrue(pnt.ewkt.startswith("SRID=999999;POINT (111200.0"))
self.assertIsInstance(pnt.ogr, gdal.OGRGeometry)
self.assertIsNone(pnt.srs)
# Test conversion from custom to a known srid
c2w = gdal.CoordTransform(
gdal.SpatialReference('+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 +datum=WGS84 +units=m +no_defs'),
gdal.SpatialReference(4326))
new_pnt = pnt.transform(c2w, clone=True)
self.assertEqual(new_pnt.srid, 4326)
self.assertAlmostEqual(new_pnt.x, 1, 3)
self.assertAlmostEqual(new_pnt.y, 2, 3)
def test_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup:
new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(21, 100), random.randint(21, 100))
# Testing the assignment
mp[i] = new
str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in xrange(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in xrange(len(poly)):
r = poly[j]
for k in xrange(len(r)):
r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
#mpoly[0][0][0] = (3.14, 2.71)
#self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
#self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
#del mpoly
def test_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2., 3., 8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1., 2.))
pnt.coords = (1., 2., 3.)
self.assertEqual((1., 2., 3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2., 3., 8.), (50., 250., -117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1., 2.))
ls[0] = (1., 2., 3.)
self.assertEqual((1., 2., 3.), ls[0])
def test_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumference of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend(mls.wkt for mls in self.geometries.multilinestrings)
coll.extend(p.wkt for p in self.geometries.polygons)
coll.extend(mp.wkt for mp in self.geometries.multipoints)
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_gdal(self):
"Testing `ogr` and `srs` properties."
g1 = fromstr('POINT(5 23)')
self.assertIsInstance(g1.ogr, gdal.OGRGeometry)
self.assertIsNone(g1.srs)
g1_3d = fromstr('POINT(5 23 8)')
self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry)
self.assertEqual(g1_3d.ogr.z, 8)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertIsInstance(g2.ogr, gdal.OGRGeometry)
self.assertIsInstance(g2.srs, gdal.SpatialReference)
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform(self):
"Testing `transform` method."
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform_3d(self):
p3d = GEOSGeometry('POINT (5 23 100)', 4326)
p3d.transform(2774)
self.assertEqual(p3d.z, 100)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
if gdal.HAS_GDAL:
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
finally:
gdal.HAS_GDAL = old_has_gdal
def test_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
finally:
gdal.HAS_GDAL = old_has_gdal
def test_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
from django.utils.six.moves import cPickle
import pickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 3857))
# The SRID won't be exported in GEOS 3.0 release candidates.
no_srid = self.null_srid == -1
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
if not no_srid:
self.assertEqual(geom.srid, tmpg.srid)
def test_prepared(self):
"Testing PreparedGeometry support."
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
if geos_version_info()['version'] > '3.3.0':
self.assertTrue(prep.crosses(fromstr('LINESTRING(1 1, 15 15)')))
self.assertTrue(prep.disjoint(Point(-5, -5)))
poly = Polygon(((-1, -1), (1, 1), (1, 0), (-1, -1)))
self.assertTrue(prep.overlaps(poly))
poly = Polygon(((-5, 0), (-5, 5), (0, 5), (-5, 0)))
self.assertTrue(prep.touches(poly))
poly = Polygon(((-1, -1), (-1, 11), (11, 11), (11, -1), (-1, -1)))
self.assertTrue(prep.within(poly))
# Original geometry deletion should not crash the prepared one (#21662)
del mpoly
self.assertTrue(prep.covers(Point(5, 5)))
def test_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test_valid_reason(self):
"Testing IsValidReason support"
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertEqual(g.valid_reason, "Valid Geometry")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertFalse(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
@skipUnless(HAS_GEOS and geos_version_info()['version'] >= '3.2.0', "geos >= 3.2.0 is required")
def test_linearref(self):
"Testing linear referencing"
ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)')
mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))')
self.assertEqual(ls.project(Point(0, 20)), 10.0)
self.assertEqual(ls.project(Point(7, 6)), 24)
self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0 / 3)
self.assertEqual(ls.interpolate(10), Point(0, 10))
self.assertEqual(ls.interpolate(24), Point(10, 6))
self.assertEqual(ls.interpolate_normalized(1.0 / 3), Point(0, 10))
self.assertEqual(mls.project(Point(0, 20)), 10)
self.assertEqual(mls.project(Point(7, 6)), 16)
self.assertEqual(mls.interpolate(9), Point(0, 9))
self.assertEqual(mls.interpolate(17), Point(10, 7))
def test_geos_version(self):
"""Testing the GEOS version regular expression."""
from django.contrib.gis.geos.libgeos import version_regex
versions = [('3.0.0rc4-CAPI-1.3.3', '3.0.0', '1.3.3'),
('3.0.0-CAPI-1.4.1', '3.0.0', '1.4.1'),
('3.4.0dev-CAPI-1.8.0', '3.4.0', '1.8.0'),
('3.4.0dev-CAPI-1.8.0 r0', '3.4.0', '1.8.0')]
for v_init, v_geos, v_capi in versions:
m = version_regex.match(v_init)
self.assertTrue(m, msg="Unable to parse the version string '%s'" % v_init)
self.assertEqual(m.group('version'), v_geos)
self.assertEqual(m.group('capi_version'), v_capi)
|
eayunstack/neutron
|
refs/heads/master
|
neutron/tests/fullstack/resources/config.py
|
1
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
import fixtures
from neutron_lib import constants
from neutron.common import utils
from neutron.plugins.ml2.extensions import qos as qos_ext
from neutron.tests.common import config_fixtures
from neutron.tests.common.exclusive_resources import port
from neutron.tests.common import helpers as c_helpers
PHYSICAL_NETWORK_NAME = "physnet1"
class ConfigFixture(fixtures.Fixture):
"""A fixture that holds an actual Neutron configuration.
Note that 'self.config' is intended to only be updated once, during
the constructor, so if this fixture is re-used (setUp is called twice),
then the dynamic configuration values won't change. The correct usage
is initializing a new instance of the class.
"""
def __init__(self, env_desc, host_desc, temp_dir, base_filename):
super(ConfigFixture, self).__init__()
self.config = config_fixtures.ConfigDict()
self.env_desc = env_desc
self.host_desc = host_desc
self.temp_dir = temp_dir
self.base_filename = base_filename
def _setUp(self):
cfg_fixture = config_fixtures.ConfigFileFixture(
self.base_filename, self.config, self.temp_dir)
self.useFixture(cfg_fixture)
self.filename = cfg_fixture.filename
def _generate_namespace_suffix(self):
return utils.get_rand_name(prefix='test')
class NeutronConfigFixture(ConfigFixture):
def __init__(self, env_desc, host_desc, temp_dir,
connection, rabbitmq_environment):
super(NeutronConfigFixture, self).__init__(
env_desc, host_desc, temp_dir, base_filename='neutron.conf')
self.config.update({
'DEFAULT': {
'host': self._generate_host(),
'state_path': self._generate_state_path(self.temp_dir),
'api_paste_config': self._generate_api_paste(),
'core_plugin': 'ml2',
'service_plugins': env_desc.service_plugins,
'auth_strategy': 'noauth',
'debug': 'True',
'agent_down_time': str(env_desc.agent_down_time),
'transport_url':
'rabbit://%(user)s:%(password)s@%(host)s:5672/%(vhost)s' %
{'user': rabbitmq_environment.user,
'password': rabbitmq_environment.password,
'host': rabbitmq_environment.host,
'vhost': rabbitmq_environment.vhost},
},
'database': {
'connection': connection,
},
'oslo_concurrency': {
'lock_path': '$state_path/lock',
},
'oslo_policy': {
'policy_file': self._generate_policy_json(),
},
'agent': {
'report_interval': str(env_desc.agent_down_time / 2.0)
},
})
# Set root_helper/root_helper_daemon only when env var is set
root_helper = os.environ.get('OS_ROOTWRAP_CMD')
if root_helper:
self.config['agent']['root_helper'] = root_helper
root_helper_daemon = os.environ.get('OS_ROOTWRAP_DAEMON_CMD')
if root_helper_daemon:
self.config['agent']['root_helper_daemon'] = root_helper_daemon
if env_desc.router_scheduler:
self.config['DEFAULT']['router_scheduler_driver'] = (
env_desc.router_scheduler)
def _setUp(self):
self.config['DEFAULT'].update({
'bind_port': self.useFixture(
port.ExclusivePort(constants.PROTO_NAME_TCP)).port
})
super(NeutronConfigFixture, self)._setUp()
def _generate_host(self):
return utils.get_rand_name(prefix='host-')
def _generate_state_path(self, temp_dir):
# Assume that temp_dir will be removed by the caller
self.state_path = tempfile.mkdtemp(prefix='state_path', dir=temp_dir)
return self.state_path
def _generate_api_paste(self):
return c_helpers.find_sample_file('api-paste.ini')
def _generate_policy_json(self):
return c_helpers.find_sample_file('policy.json')
class ML2ConfigFixture(ConfigFixture):
def __init__(self, env_desc, host_desc, temp_dir, tenant_network_types):
super(ML2ConfigFixture, self).__init__(
env_desc, host_desc, temp_dir, base_filename='ml2_conf.ini')
mechanism_drivers = self.env_desc.mech_drivers
if self.env_desc.l2_pop:
mechanism_drivers += ',l2population'
self.config.update({
'ml2': {
'tenant_network_types': tenant_network_types,
'mechanism_drivers': mechanism_drivers,
},
'ml2_type_vlan': {
'network_vlan_ranges': PHYSICAL_NETWORK_NAME + ':1000:2999',
},
'ml2_type_gre': {
'tunnel_id_ranges': '1:1000',
},
'ml2_type_vxlan': {
'vni_ranges': '1001:2000',
},
})
extension_drivers = ['port_security']
if env_desc.qos:
extension_drivers.append(qos_ext.QOS_EXT_DRIVER_ALIAS)
self.config['ml2']['extension_drivers'] = ','.join(extension_drivers)
class OVSConfigFixture(ConfigFixture):
def __init__(self, env_desc, host_desc, temp_dir, local_ip):
super(OVSConfigFixture, self).__init__(
env_desc, host_desc, temp_dir,
base_filename='openvswitch_agent.ini')
self.tunneling_enabled = self.env_desc.tunneling_enabled
self.config.update({
'ovs': {
'local_ip': local_ip,
'integration_bridge': self._generate_integration_bridge(),
'of_interface': host_desc.of_interface,
},
'securitygroup': {
'firewall_driver': host_desc.firewall_driver,
},
'agent': {
'l2_population': str(self.env_desc.l2_pop),
'arp_responder': str(self.env_desc.arp_responder),
}
})
if self.tunneling_enabled:
self.config['agent'].update({
'tunnel_types': self.env_desc.network_type})
self.config['ovs'].update({
'tunnel_bridge': self._generate_tunnel_bridge(),
'int_peer_patch_port': self._generate_int_peer(),
'tun_peer_patch_port': self._generate_tun_peer()})
else:
self.config['ovs']['bridge_mappings'] = (
self._generate_bridge_mappings())
if env_desc.qos:
self.config['agent']['extensions'] = 'qos'
def _setUp(self):
if self.config['ovs']['of_interface'] == 'native':
self.config['ovs'].update({
'of_listen_port': self.useFixture(
port.ExclusivePort(constants.PROTO_NAME_TCP)).port
})
super(OVSConfigFixture, self)._setUp()
def _generate_bridge_mappings(self):
return '%s:%s' % (PHYSICAL_NETWORK_NAME,
utils.get_rand_device_name(prefix='br-eth'))
def _generate_integration_bridge(self):
return utils.get_rand_device_name(prefix='br-int')
def _generate_tunnel_bridge(self):
return utils.get_rand_device_name(prefix='br-tun')
def _generate_int_peer(self):
return utils.get_rand_device_name(prefix='patch-tun')
def _generate_tun_peer(self):
return utils.get_rand_device_name(prefix='patch-int')
def get_br_int_name(self):
return self.config.ovs.integration_bridge
def get_br_phys_name(self):
return self.config.ovs.bridge_mappings.split(':')[1]
def get_br_tun_name(self):
return self.config.ovs.tunnel_bridge
class LinuxBridgeConfigFixture(ConfigFixture):
def __init__(self, env_desc, host_desc, temp_dir, local_ip,
physical_device_name):
super(LinuxBridgeConfigFixture, self).__init__(
env_desc, host_desc, temp_dir,
base_filename="linuxbridge_agent.ini"
)
self.config.update({
'VXLAN': {
'enable_vxlan': str(self.env_desc.tunneling_enabled),
'local_ip': local_ip,
'l2_population': str(self.env_desc.l2_pop),
},
'securitygroup': {
'firewall_driver': host_desc.firewall_driver,
}
})
if env_desc.qos:
self.config.update({
'AGENT': {
'extensions': 'qos'
}
})
if self.env_desc.tunneling_enabled:
self.config.update({
'LINUX_BRIDGE': {
'bridge_mappings': self._generate_bridge_mappings(
physical_device_name
)
}
})
else:
self.config.update({
'LINUX_BRIDGE': {
'physical_interface_mappings':
self._generate_bridge_mappings(
physical_device_name
)
}
})
def _generate_bridge_mappings(self, device_name):
return '%s:%s' % (PHYSICAL_NETWORK_NAME, device_name)
class L3ConfigFixture(ConfigFixture):
def __init__(self, env_desc, host_desc, temp_dir, integration_bridge=None):
super(L3ConfigFixture, self).__init__(
env_desc, host_desc, temp_dir, base_filename='l3_agent.ini')
if host_desc.l2_agent_type == constants.AGENT_TYPE_OVS:
self._prepare_config_with_ovs_agent(integration_bridge)
elif host_desc.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE:
self._prepare_config_with_linuxbridge_agent()
if host_desc.l3_agent_mode:
self.config['DEFAULT'].update({
'agent_mode': host_desc.l3_agent_mode})
self.config['DEFAULT'].update({
'debug': 'True',
'test_namespace_suffix': self._generate_namespace_suffix(),
})
if host_desc.availability_zone:
self.config['agent'].update({
'availability_zone': host_desc.availability_zone
})
def _prepare_config_with_ovs_agent(self, integration_bridge):
self.config.update({
'DEFAULT': {
'interface_driver': ('neutron.agent.linux.interface.'
'OVSInterfaceDriver'),
'ovs_integration_bridge': integration_bridge,
'external_network_bridge': self._generate_external_bridge(),
}
})
def _prepare_config_with_linuxbridge_agent(self):
self.config.update({
'DEFAULT': {
'interface_driver': ('neutron.agent.linux.interface.'
'BridgeInterfaceDriver'),
}
})
def _generate_external_bridge(self):
return utils.get_rand_device_name(prefix='br-ex')
def get_external_bridge(self):
return self.config.DEFAULT.external_network_bridge
class DhcpConfigFixture(ConfigFixture):
def __init__(self, env_desc, host_desc, temp_dir, integration_bridge=None):
super(DhcpConfigFixture, self).__init__(
env_desc, host_desc, temp_dir, base_filename='dhcp_agent.ini')
if host_desc.l2_agent_type == constants.AGENT_TYPE_OVS:
self._prepare_config_with_ovs_agent(integration_bridge)
elif host_desc.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE:
self._prepare_config_with_linuxbridge_agent()
self.config['DEFAULT'].update({
'debug': 'True',
'dhcp_confs': self._generate_dhcp_path(),
'test_namespace_suffix': self._generate_namespace_suffix()
})
if host_desc.availability_zone:
self.config['agent'].update({
'availability_zone': host_desc.availability_zone
})
def _setUp(self):
super(DhcpConfigFixture, self)._setUp()
self.addCleanup(self._clean_dhcp_path)
def _prepare_config_with_ovs_agent(self, integration_bridge):
self.config.update({
'DEFAULT': {
'interface_driver': 'openvswitch',
'ovs_integration_bridge': integration_bridge,
}
})
def _prepare_config_with_linuxbridge_agent(self):
self.config.update({
'DEFAULT': {
'interface_driver': 'linuxbridge',
}
})
def _generate_dhcp_path(self):
# NOTE(slaweq): dhcp_conf path needs to be directory with read
# permission for everyone, otherwise dnsmasq process will not be able
# to read his configs
self.dhcp_path = tempfile.mkdtemp(prefix="dhcp_configs_", dir="/tmp/")
os.chmod(self.dhcp_path, 0o755)
return self.dhcp_path
def _clean_dhcp_path(self):
shutil.rmtree(self.dhcp_path, ignore_errors=True)
|
avsm/xen-unstable
|
refs/heads/master
|
tools/python/xen/xend/XendDomainInfo.py
|
1
|
#===========================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2005-2007 XenSource Ltd
#============================================================================
"""Representation of a single domain.
Includes support for domain construction, using
open-ended configurations.
Author: Mike Wray <mike.wray@hp.com>
"""
import logging
import time
import threading
import thread
import re
import copy
import os
import traceback
from types import StringTypes
import xen.lowlevel.xc
from xen.util import asserts, auxbin
from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
import xen.util.xsm.xsm as security
from xen.util import xsconstants
from xen.util.pci import serialise_pci_opts, pci_opts_list_to_sxp, \
pci_dict_to_bdf_str, pci_dict_to_xc_str, \
pci_convert_sxp_to_dict, pci_convert_dict_to_sxp, \
pci_dict_cmp, PCI_DEVFN, PCI_SLOT, PCI_FUNC
from xen.xend import balloon, sxp, uuid, image, arch
from xen.xend import XendOptions, XendNode, XendConfig
from xen.xend.XendConfig import scrub_password
from xen.xend.XendBootloader import bootloader, bootloader_tidy
from xen.xend.XendError import XendError, VmError
from xen.xend.XendDevices import XendDevices
from xen.xend.XendTask import XendTask
from xen.xend.xenstore.xstransact import xstransact, complete
from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
from xen.xend.xenstore.xswatch import xswatch
from xen.xend.XendConstants import *
from xen.xend.XendAPIConstants import *
from xen.xend.server.DevConstants import xenbusState
from xen.xend.XendVMMetrics import XendVMMetrics
from xen.xend import XendAPIStore
from xen.xend.XendPPCI import XendPPCI
from xen.xend.XendDPCI import XendDPCI
from xen.xend.XendPSCSI import XendPSCSI
from xen.xend.XendDSCSI import XendDSCSI
MIGRATE_TIMEOUT = 30.0
BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
xc = xen.lowlevel.xc.xc()
xoptions = XendOptions.instance()
log = logging.getLogger("xend.XendDomainInfo")
#log.setLevel(logging.TRACE)
def create(config):
"""Creates and start a VM using the supplied configuration.
@param config: A configuration object involving lists of tuples.
@type config: list of lists, eg ['vm', ['image', 'xen.gz']]
@rtype: XendDomainInfo
@return: An up and running XendDomainInfo instance
@raise VmError: Invalid configuration or failure to start.
"""
from xen.xend import XendDomain
domconfig = XendConfig.XendConfig(sxp_obj = config)
othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
if othervm is None or othervm.domid is None:
othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
if othervm is not None and othervm.domid is not None:
raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
log.debug("XendDomainInfo.create(%s)", scrub_password(config))
vm = XendDomainInfo(domconfig)
try:
vm.start()
except:
log.exception('Domain construction failed')
vm.destroy()
raise
return vm
def create_from_dict(config_dict):
"""Creates and start a VM using the supplied configuration.
@param config_dict: An configuration dictionary.
@rtype: XendDomainInfo
@return: An up and running XendDomainInfo instance
@raise VmError: Invalid configuration or failure to start.
"""
log.debug("XendDomainInfo.create_from_dict(%s)",
scrub_password(config_dict))
vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
try:
vm.start()
except:
log.exception('Domain construction failed')
vm.destroy()
raise
return vm
def recreate(info, priv):
"""Create the VM object for an existing domain. The domain must not
be dying, as the paths in the store should already have been removed,
and asking us to recreate them causes problems.
@param xeninfo: Parsed configuration
@type xeninfo: Dictionary
@param priv: Is a privileged domain (Dom 0)
@type priv: bool
@rtype: XendDomainInfo
@return: A up and running XendDomainInfo instance
@raise VmError: Invalid configuration.
@raise XendError: Errors with configuration.
"""
log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
assert not info['dying']
xeninfo = XendConfig.XendConfig(dominfo = info)
xeninfo['is_control_domain'] = priv
xeninfo['is_a_template'] = False
xeninfo['auto_power_on'] = False
domid = xeninfo['domid']
uuid1 = uuid.fromString(xeninfo['uuid'])
needs_reinitialising = False
dompath = GetDomainPath(domid)
if not dompath:
raise XendError('No domain path in store for existing '
'domain %d' % domid)
log.info("Recreating domain %d, UUID %s. at %s" %
(domid, xeninfo['uuid'], dompath))
# need to verify the path and uuid if not Domain-0
# if the required uuid and vm aren't set, then that means
# we need to recreate the dom with our own values
#
# NOTE: this is probably not desirable, really we should just
# abort or ignore, but there may be cases where xenstore's
# entry disappears (eg. xenstore-rm /)
#
try:
vmpath = xstransact.Read(dompath, "vm")
if not vmpath:
if not priv:
log.warn('/local/domain/%d/vm is missing. recreate is '
'confused, trying our best to recover' % domid)
needs_reinitialising = True
raise XendError('reinit')
uuid2_str = xstransact.Read(vmpath, "uuid")
if not uuid2_str:
log.warn('%s/uuid/ is missing. recreate is confused, '
'trying our best to recover' % vmpath)
needs_reinitialising = True
raise XendError('reinit')
uuid2 = uuid.fromString(uuid2_str)
if uuid1 != uuid2:
log.warn('UUID in /vm does not match the UUID in /dom/%d.'
'Trying out best to recover' % domid)
needs_reinitialising = True
except XendError:
pass # our best shot at 'goto' in python :)
vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
vmpath = vmpath)
if needs_reinitialising:
vm._recreateDom()
vm._removeVm()
vm._storeVmDetails()
vm._storeDomDetails()
vm.image = image.create(vm, vm.info)
vm.image.recreate()
vm._registerWatches()
vm.refreshShutdown(xeninfo)
# register the domain in the list
from xen.xend import XendDomain
XendDomain.instance().add_domain(vm)
return vm
def restore(config):
"""Create a domain and a VM object to do a restore.
@param config: Domain SXP configuration
@type config: list of lists. (see C{create})
@rtype: XendDomainInfo
@return: A up and running XendDomainInfo instance
@raise VmError: Invalid configuration or failure to start.
@raise XendError: Errors with configuration.
"""
log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
resume = True)
try:
vm.resume()
return vm
except:
vm.destroy()
raise
def createDormant(domconfig):
"""Create a dormant/inactive XenDomainInfo without creating VM.
This is for creating instances of persistent domains that are not
yet start.
@param domconfig: Parsed configuration
@type domconfig: XendConfig object
@rtype: XendDomainInfo
@return: A up and running XendDomainInfo instance
@raise XendError: Errors with configuration.
"""
log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
# domid does not make sense for non-running domains.
domconfig.pop('domid', None)
vm = XendDomainInfo(domconfig)
return vm
def domain_by_name(name):
"""Get domain by name
@params name: Name of the domain
@type name: string
@return: XendDomainInfo or None
"""
from xen.xend import XendDomain
return XendDomain.instance().domain_lookup_by_name_nr(name)
def shutdown_reason(code):
"""Get a shutdown reason from a code.
@param code: shutdown code
@type code: int
@return: shutdown reason
@rtype: string
"""
return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
def dom_get(dom):
"""Get info from xen for an existing domain.
@param dom: domain id
@type dom: int
@return: info or None
@rtype: dictionary
"""
try:
domlist = xc.domain_getinfo(dom, 1)
if domlist and dom == domlist[0]['domid']:
return domlist[0]
except Exception, err:
# ignore missing domain
log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
return None
def get_assigned_pci_devices(domid):
dev_str_list = []
path = '/local/domain/0/backend/pci/%u/0/' % domid
num_devs = xstransact.Read(path + 'num_devs');
if num_devs is None or num_devs == "":
return dev_str_list
num_devs = int(num_devs);
for i in range(num_devs):
dev_str = xstransact.Read(path + 'dev-%i' % i)
dev_str_list = dev_str_list + [dev_str]
return dev_str_list
def do_FLR(domid):
from xen.xend.server.pciif import parse_pci_name, PciDevice
dev_str_list = get_assigned_pci_devices(domid)
for dev_str in dev_str_list:
try:
dev = PciDevice(parse_pci_name(dev_str))
except Exception, e:
raise VmError("pci: failed to locate device and "+
"parse it's resources - "+str(e))
dev.do_FLR()
class XendDomainInfo:
"""An object represents a domain.
@TODO: try to unify dom and domid, they mean the same thing, but
xc refers to it as dom, and everywhere else, including
xenstore it is domid. The best way is to change xc's
python interface.
@ivar info: Parsed configuration
@type info: dictionary
@ivar domid: Domain ID (if VM has started)
@type domid: int or None
@ivar vmpath: XenStore path to this VM.
@type vmpath: string
@ivar dompath: XenStore path to this Domain.
@type dompath: string
@ivar image: Reference to the VM Image.
@type image: xen.xend.image.ImageHandler
@ivar store_port: event channel to xenstored
@type store_port: int
@ivar console_port: event channel to xenconsoled
@type console_port: int
@ivar store_mfn: xenstored mfn
@type store_mfn: int
@ivar console_mfn: xenconsoled mfn
@type console_mfn: int
@ivar notes: OS image notes
@type notes: dictionary
@ivar vmWatch: reference to a watch on the xenstored vmpath
@type vmWatch: xen.xend.xenstore.xswatch
@ivar shutdownWatch: reference to watch on the xenstored domain shutdown
@type shutdownWatch: xen.xend.xenstore.xswatch
@ivar shutdownStartTime: UNIX Time when domain started shutting down.
@type shutdownStartTime: float or None
@ivar restart_in_progress: Is a domain restart thread running?
@type restart_in_progress: bool
# @ivar state: Domain state
# @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
@ivar state_updated: lock for self.state
@type state_updated: threading.Condition
@ivar refresh_shutdown_lock: lock for polling shutdown state
@type refresh_shutdown_lock: threading.Condition
@ivar _deviceControllers: device controller cache for this domain
@type _deviceControllers: dict 'string' to DevControllers
"""
def __init__(self, info, domid = None, dompath = None, augment = False,
priv = False, resume = False, vmpath = None):
"""Constructor for a domain
@param info: parsed configuration
@type info: dictionary
@keyword domid: Set initial domain id (if any)
@type domid: int
@keyword dompath: Set initial dompath (if any)
@type dompath: string
@keyword augment: Augment given info with xenstored VM info
@type augment: bool
@keyword priv: Is a privileged domain (Dom 0)
@type priv: bool
@keyword resume: Is this domain being resumed?
@type resume: bool
"""
self.info = info
if domid == None:
self.domid = self.info.get('domid')
else:
self.domid = domid
#REMOVE: uuid is now generated in XendConfig
#if not self._infoIsSet('uuid'):
# self.info['uuid'] = uuid.toString(uuid.create())
# Find a unique /vm/<uuid>/<integer> path if not specified.
# This avoids conflict between pre-/post-migrate domains when doing
# localhost relocation.
self.vmpath = vmpath
i = 0
while self.vmpath == None:
self.vmpath = XS_VMROOT + self.info['uuid']
if i != 0:
self.vmpath = self.vmpath + '-' + str(i)
try:
if self._readVm("uuid"):
self.vmpath = None
i = i + 1
except:
pass
self.dompath = dompath
self.image = None
self.store_port = None
self.store_mfn = None
self.console_port = None
self.console_mfn = None
self.native_protocol = None
self.vmWatch = None
self.shutdownWatch = None
self.shutdownStartTime = None
self._resume = resume
self.restart_in_progress = False
self.state_updated = threading.Condition()
self.refresh_shutdown_lock = threading.Condition()
self._stateSet(DOM_STATE_HALTED)
self._deviceControllers = {}
for state in DOM_STATES_OLD:
self.info[state] = 0
if augment:
self._augmentInfo(priv)
self._checkName(self.info['name_label'])
self.metrics = XendVMMetrics(uuid.createString(), self)
#
# Public functions available through XMLRPC
#
def start(self, is_managed = False):
"""Attempts to start the VM by do the appropriate
initialisation if it not started.
"""
from xen.xend import XendDomain
if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
try:
XendTask.log_progress(0, 30, self._constructDomain)
XendTask.log_progress(31, 60, self._initDomain)
XendTask.log_progress(61, 70, self._storeVmDetails)
XendTask.log_progress(71, 80, self._storeDomDetails)
XendTask.log_progress(81, 90, self._registerWatches)
XendTask.log_progress(91, 100, self.refreshShutdown)
xendomains = XendDomain.instance()
# save running configuration if XendDomains believe domain is
# persistent
if is_managed:
xendomains.managed_config_save(self)
except:
log.exception('VM start failed')
self.destroy()
raise
else:
raise XendError('VM already running')
def resume(self):
"""Resumes a domain that has come back from suspension."""
state = self._stateGet()
if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
try:
self._constructDomain()
try:
self._setCPUAffinity()
except:
# usually a CPU we want to set affinity to does not exist
# we just ignore it so that the domain can still be restored
log.warn("Cannot restore CPU affinity")
self._setSchedParams()
self._storeVmDetails()
self._createChannels()
self._createDevices()
self._storeDomDetails()
self._endRestore()
except:
log.exception('VM resume failed')
self.destroy()
raise
else:
raise XendError('VM is not suspended; it is %s'
% XEN_API_VM_POWER_STATE[state])
def shutdown(self, reason):
"""Shutdown a domain by signalling this via xenstored."""
log.debug('XendDomainInfo.shutdown(%s)', reason)
if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
raise XendError('Domain cannot be shutdown')
if self.domid == 0:
raise XendError('Domain 0 cannot be shutdown')
if reason not in DOMAIN_SHUTDOWN_REASONS.values():
raise XendError('Invalid reason: %s' % reason)
self.storeDom("control/shutdown", reason)
# HVM domain shuts itself down only if it has PV drivers
if self.info.is_hvm():
hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
if not hvm_pvdrv or hvm_s_state != 0:
code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
log.info("HVM save:remote shutdown dom %d!", self.domid)
xc.domain_shutdown(self.domid, code)
def pause(self):
"""Pause domain
@raise XendError: Failed pausing a domain
"""
try:
if(self.domid):
# get all blktap2 devices
dev = xstransact.List(self.vmpath + 'device/tap2')
for x in dev:
path = self.getDeviceController('tap2').readBackend(x, 'params')
if path and path.startswith('/dev/xen/blktap-2'):
#Figure out the sysfs path.
pattern = re.compile('/dev/xen/blktap-2/tapdev(\d+)$')
ctrlid = pattern.search(path)
ctrl = '/sys/class/blktap2/blktap' + ctrlid.group(1)
#pause the disk
f = open(ctrl + '/pause', 'w')
f.write('pause');
f.close()
except Exception, ex:
log.warn('Could not pause blktap disk.');
try:
xc.domain_pause(self.domid)
self._stateSet(DOM_STATE_PAUSED)
except Exception, ex:
log.exception(ex)
raise XendError("Domain unable to be paused: %s" % str(ex))
def unpause(self):
"""Unpause domain
@raise XendError: Failed unpausing a domain
"""
try:
if(self.domid):
dev = xstransact.List(self.vmpath + 'device/tap2')
for x in dev:
path = self.getDeviceController('tap2').readBackend(x, 'params')
if path and path.startswith('/dev/xen/blktap-2'):
#Figure out the sysfs path.
pattern = re.compile('/dev/xen/blktap-2/tapdev(\d+)$')
ctrlid = pattern.search(path)
ctrl = '/sys/class/blktap2/blktap' + ctrlid.group(1)
#unpause the disk
if(os.path.exists(ctrl + '/resume')):
f = open(ctrl + '/resume', 'w');
f.write('resume');
f.close();
except Exception, ex:
log.warn('Could not unpause blktap disk: %s' % str(ex));
try:
xc.domain_unpause(self.domid)
self._stateSet(DOM_STATE_RUNNING)
except Exception, ex:
log.exception(ex)
raise XendError("Domain unable to be unpaused: %s" % str(ex))
def send_sysrq(self, key):
""" Send a Sysrq equivalent key via xenstored."""
if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
raise XendError("Domain '%s' is not started" % self.info['name_label'])
asserts.isCharConvertible(key)
self.storeDom("control/sysrq", '%c' % key)
def pci_device_configure_boot(self):
if not self.info.is_hvm():
return
devid = '0'
dev_info = self._getDeviceInfo_pci(devid)
if dev_info is None:
return
# get the virtual slot info from xenstore
dev_uuid = sxp.child_value(dev_info, 'uuid')
pci_conf = self.info['devices'][dev_uuid][1]
pci_devs = pci_conf['devs']
# Keep a set of keys that are done rather than
# just itterating through set(map(..., pci_devs))
# to preserve any order information present.
done = set()
for key in map(lambda x: x['key'], pci_devs):
if key in done:
continue
done |= set([key])
dev = filter(lambda x: x['key'] == key, pci_devs)
head_dev = dev.pop()
dev_sxp = pci_convert_dict_to_sxp(head_dev, 'Initialising',
'Booting')
self.pci_device_configure(dev_sxp)
# That is all for single-function virtual devices
if len(dev) == 0:
continue
if int(head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
new_dev_info = self._getDeviceInfo_pci(devid)
if new_dev_info is None:
continue
new_dev_uuid = sxp.child_value(new_dev_info, 'uuid')
new_pci_conf = self.info['devices'][new_dev_uuid][1]
new_pci_devs = new_pci_conf['devs']
new_head_dev = filter(lambda x: pci_dict_cmp(x, head_dev),
new_pci_devs)[0]
if int(new_head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
continue
vdevfn = PCI_SLOT(int(new_head_dev['vdevfn'], 16))
new_dev = []
for i in dev:
i['vdevfn'] = '0x%02x' % \
PCI_DEVFN(vdevfn,
PCI_FUNC(int(i['vdevfn'], 16)))
new_dev.append(i)
dev = new_dev
for i in dev:
dev_sxp = pci_convert_dict_to_sxp(i, 'Initialising', 'Booting')
self.pci_device_configure(dev_sxp)
def hvm_pci_device_create(self, dev_config):
log.debug("XendDomainInfo.hvm_pci_device_create: %s"
% scrub_password(dev_config))
if not self.info.is_hvm():
raise VmError("hvm_pci_device_create called on non-HVM guest")
#all the PCI devs share one conf node
devid = '0'
new_dev = dev_config['devs'][0]
dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
#check conflict before trigger hotplug event
if dev_info is not None:
dev_uuid = sxp.child_value(dev_info, 'uuid')
pci_conf = self.info['devices'][dev_uuid][1]
pci_devs = pci_conf['devs']
for x in pci_devs:
if (int(x['vdevfn'], 16) == int(new_dev['vdevfn'], 16) and
not int(x['vdevfn'], 16) & AUTO_PHP_SLOT):
raise VmError("vdevfn %s already have a device." %
(new_dev['vdevfn']))
if (pci_dict_cmp(x, new_dev)):
raise VmError("device is already inserted")
# Test whether the devices can be assigned with VT-d
bdf = xc.test_assign_device(0, pci_dict_to_xc_str(new_dev))
if bdf != 0:
if bdf == -1:
raise VmError("failed to assign device: maybe the platform"
" doesn't support VT-d, or VT-d isn't enabled"
" properly?")
raise VmError("fail to assign device(%s): maybe it has"
" already been assigned to other domain, or maybe"
" it doesn't exist." % pci_dict_to_bdf_str(new_dev))
# Here, we duplicate some checkings (in some cases, we mustn't allow
# a device to be hot-plugged into an HVM guest) that are also done in
# pci_device_configure()'s self.device_create(dev_sxp) or
# dev_control.reconfigureDevice(devid, dev_config).
# We must make the checkings before sending the command 'pci-ins' to
# ioemu.
# Test whether the device is owned by pciback. For instance, we can't
# hotplug a device being used by Dom0 itself to an HVM guest.
from xen.xend.server.pciif import PciDevice, parse_pci_name
try:
pci_device = PciDevice(new_dev)
except Exception, e:
raise VmError("pci: failed to locate device and "+
"parse it's resources - "+str(e))
if pci_device.driver!='pciback' and pci_device.driver!='pci-stub':
raise VmError(("pci: PCI Backend does not own device "+ \
"%s\n"+ \
"See the pciback.hide kernel "+ \
"command-line parameter or\n"+ \
"bind your slot/device to the PCI backend using sysfs" \
)%(pci_device.name))
# Check non-page-aligned MMIO BAR.
if pci_device.has_non_page_aligned_bar and arch.type != "ia64":
raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
pci_device.name)
# Check the co-assignment.
# To pci-attach a device D to domN, we should ensure each of D's
# co-assignment devices hasn't been assigned, or has been assigned to
# domN.
coassignment_list = pci_device.find_coassigned_devices()
pci_device.devs_check_driver(coassignment_list)
assigned_pci_device_str_list = self._get_assigned_pci_devices()
for pci_str in coassignment_list:
pci_dev = parse_pci_name(pci_str)
if xc.test_assign_device(0, pci_dict_to_xc_str(pci_dev)) == 0:
continue
if not pci_str in assigned_pci_device_str_list:
raise VmError(("pci: failed to pci-attach %s to domain %s" + \
" because one of its co-assignment device %s has been" + \
" assigned to other domain." \
)% (pci_device.name, self.info['name_label'], pci_str))
return self.hvm_pci_device_insert_dev(new_dev)
def hvm_pci_device_insert(self, dev_config):
log.debug("XendDomainInfo.hvm_pci_device_insert: %s"
% scrub_password(dev_config))
if not self.info.is_hvm():
raise VmError("hvm_pci_device_create called on non-HVM guest")
new_dev = dev_config['devs'][0]
return self.hvm_pci_device_insert_dev(new_dev)
def hvm_pci_device_insert_dev(self, new_dev):
log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s"
% scrub_password(new_dev))
if self.domid is not None:
opts = ''
if new_dev.has_key('opts'):
opts = ',' + serialise_pci_opts(new_dev['opts'])
bdf_str = "%s@%02x%s" % (pci_dict_to_bdf_str(new_dev),
int(new_dev['vdevfn'], 16), opts)
log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s" % bdf_str)
self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
vdevfn = xstransact.Read("/local/domain/0/device-model/%i/parameter"
% self.getDomid())
try:
vdevfn_int = int(vdevfn, 16)
except ValueError:
raise VmError(("Cannot pass-through PCI function '%s'. " +
"Device model reported an error: %s") %
(bdf_str, vdevfn))
else:
vdevfn = new_dev['vdevfn']
return vdevfn
def device_create(self, dev_config):
"""Create a new device.
@param dev_config: device configuration
@type dev_config: SXP object (parsed config)
"""
log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
dev_type = sxp.name(dev_config)
dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
dev_config_dict = self.info['devices'][dev_uuid][1]
log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
if dev_type == 'vif':
for x in dev_config:
if x != 'vif' and x[0] == 'mac':
if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
if self.domid is not None:
try:
dev_config_dict['devid'] = devid = \
self._createDevice(dev_type, dev_config_dict)
if dev_type == 'tap2':
# createDevice may create a blktap1 device if blktap2 is not
# installed or if the blktap driver is not supported in
# blktap1
dev_type = self.getBlockDeviceClass(devid)
self._waitForDevice(dev_type, devid)
except VmError, ex:
del self.info['devices'][dev_uuid]
if dev_type == 'pci':
for dev in dev_config_dict['devs']:
XendAPIStore.deregister(dev['uuid'], 'DPCI')
elif dev_type == 'vscsi':
for dev in dev_config_dict['devs']:
XendAPIStore.deregister(dev['uuid'], 'DSCSI')
elif dev_type == 'tap' or dev_type == 'tap2':
self.info['vbd_refs'].remove(dev_uuid)
else:
self.info['%s_refs' % dev_type].remove(dev_uuid)
raise ex
else:
devid = None
xen.xend.XendDomain.instance().managed_config_save(self)
return self.getDeviceController(dev_type).sxpr(devid)
def pci_device_configure(self, dev_sxp, devid = 0):
"""Configure an existing pci device.
@param dev_sxp: device configuration
@type dev_sxp: SXP object (parsed config)
@param devid: device id
@type devid: int
@return: Returns True if successfully updated device
@rtype: boolean
"""
log.debug("XendDomainInfo.pci_device_configure: %s"
% scrub_password(dev_sxp))
dev_class = sxp.name(dev_sxp)
if dev_class != 'pci':
return False
pci_state = sxp.child_value(dev_sxp, 'state')
pci_sub_state = sxp.child_value(dev_sxp, 'sub_state')
existing_dev_info = self._getDeviceInfo_pci(devid)
if existing_dev_info is None and pci_state != 'Initialising':
raise XendError("Cannot detach when pci platform does not exist")
pci_dev = sxp.children(dev_sxp, 'dev')[0]
dev_config = pci_convert_sxp_to_dict(dev_sxp)
dev = dev_config['devs'][0]
# Do HVM specific processing
if self.info.is_hvm():
if pci_state == 'Initialising':
# HVM PCI device attachment
if pci_sub_state == 'Booting':
vdevfn = self.hvm_pci_device_insert(dev_config)
else:
vdevfn = self.hvm_pci_device_create(dev_config)
# Update vdevfn
dev['vdevfn'] = vdevfn
for n in sxp.children(pci_dev):
if(n[0] == 'vdevfn'):
n[1] = vdevfn
else:
# HVM PCI device detachment
existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
existing_pci_devs = existing_pci_conf['devs']
new_devs = filter(lambda x: pci_dict_cmp(x, dev),
existing_pci_devs)
if len(new_devs) < 0:
raise VmError("Device %s is not connected" %
pci_dict_to_bdf_str(dev))
new_dev = new_devs[0]
# Only tell qemu-dm to unplug function 0.
# When unplugging a function, all functions in the
# same vslot must be unplugged, and function 0 must
# be one of the functions present when a vslot is
# hot-plugged. Telling qemu-dm to unplug function 0
# also tells it to unplug all other functions in the
# same vslot.
if (PCI_FUNC(int(new_dev['vdevfn'], 16)) == 0):
self.hvm_destroyPCIDevice(new_dev)
# Update vdevfn
dev['vdevfn'] = new_dev['vdevfn']
for n in sxp.children(pci_dev):
if(n[0] == 'vdevfn'):
n[1] = new_dev['vdevfn']
# If pci platform does not exist, create and exit.
if existing_dev_info is None:
self.device_create(dev_sxp)
return True
if self.domid is not None:
# use DevController.reconfigureDevice to change device config
dev_control = self.getDeviceController(dev_class)
dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
if not self.info.is_hvm():
# in PV case, wait until backend state becomes connected.
dev_control.waitForDevice_reconfigure(devid)
num_devs = dev_control.cleanupDevice(devid)
# update XendConfig with new device info
if dev_uuid:
new_dev_sxp = dev_control.configuration(devid)
self.info.device_update(dev_uuid, new_dev_sxp)
# If there is no device left, destroy pci and remove config.
if num_devs == 0:
if self.info.is_hvm():
self.destroyDevice('pci', devid, True)
else:
self.destroyDevice('pci', devid)
del self.info['devices'][dev_uuid]
else:
new_dev_sxp = ['pci']
for cur_dev in sxp.children(existing_dev_info, 'dev'):
if pci_state == 'Closing':
if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
continue
new_dev_sxp.append(cur_dev)
if pci_state == 'Initialising' and pci_sub_state != 'Booting':
for new_dev in sxp.children(dev_sxp, 'dev'):
new_dev_sxp.append(new_dev)
dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
self.info.device_update(dev_uuid, new_dev_sxp)
# If there is no device left, remove config.
if len(sxp.children(new_dev_sxp, 'dev')) == 0:
del self.info['devices'][dev_uuid]
xen.xend.XendDomain.instance().managed_config_save(self)
return True
def vscsi_device_configure(self, dev_sxp):
"""Configure an existing vscsi device.
quoted pci funciton
"""
def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
if not dev_info:
return False
for dev in sxp.children(dev_info, 'dev'):
if p_devs is not None:
if sxp.child_value(dev, 'p-dev') in p_devs:
return True
if v_devs is not None:
if sxp.child_value(dev, 'v-dev') in v_devs:
return True
return False
def _vscsi_be(be):
be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
if be_xdi is not None:
be_domid = be_xdi.getDomid()
if be_domid is not None:
return str(be_domid)
return str(be)
dev_class = sxp.name(dev_sxp)
if dev_class != 'vscsi':
return False
dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
devs = dev_config['devs']
v_devs = [d['v-dev'] for d in devs]
state = devs[0]['state']
req_devid = int(devs[0]['devid'])
cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
if state == xenbusState['Initialising']:
# new create
# If request devid does not exist, create and exit.
p_devs = [d['p-dev'] for d in devs]
for dev_type, dev_info in self.info.all_devices_sxpr():
if dev_type != 'vscsi':
continue
if _is_vscsi_defined(dev_info, p_devs = p_devs):
raise XendError('The physical device "%s" is already defined' % \
p_devs[0])
if cur_dev_sxp is None:
self.device_create(dev_sxp)
return True
if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
raise XendError('The virtual device "%s" is already defined' % \
v_devs[0])
if int(dev_config['feature-host']) != \
int(sxp.child_value(cur_dev_sxp, 'feature-host')):
raise XendError('The physical device "%s" cannot define '
'because mode is different' % devs[0]['p-dev'])
new_be = dev_config.get('backend', None)
if new_be is not None:
cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
if cur_be is None:
cur_be = xen.xend.XendDomain.DOM0_ID
new_be_dom = _vscsi_be(new_be)
cur_be_dom = _vscsi_be(cur_be)
if new_be_dom != cur_be_dom:
raise XendError('The physical device "%s" cannot define '
'because backend is different' % devs[0]['p-dev'])
elif state == xenbusState['Closing']:
if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
raise XendError("Cannot detach vscsi device does not exist")
if self.domid is not None:
# use DevController.reconfigureDevice to change device config
dev_control = self.getDeviceController(dev_class)
dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
dev_control.waitForDevice_reconfigure(req_devid)
num_devs = dev_control.cleanupDevice(req_devid)
# update XendConfig with new device info
if dev_uuid:
new_dev_sxp = dev_control.configuration(req_devid)
self.info.device_update(dev_uuid, new_dev_sxp)
# If there is no device left, destroy vscsi and remove config.
if num_devs == 0:
self.destroyDevice('vscsi', req_devid)
del self.info['devices'][dev_uuid]
else:
new_dev_sxp = ['vscsi']
cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
new_dev_sxp.append(cur_mode)
try:
cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
new_dev_sxp.append(cur_be)
except IndexError:
pass
for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
if state == xenbusState['Closing']:
if int(cur_mode[1]) == 1:
continue
if sxp.child_value(cur_dev, 'v-dev') in v_devs:
continue
new_dev_sxp.append(cur_dev)
if state == xenbusState['Initialising']:
for new_dev in sxp.children(dev_sxp, 'dev'):
new_dev_sxp.append(new_dev)
dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
self.info.device_update(dev_uuid, new_dev_sxp)
# If there is only 'vscsi' in new_dev_sxp, remove the config.
if len(sxp.children(new_dev_sxp, 'dev')) == 0:
del self.info['devices'][dev_uuid]
xen.xend.XendDomain.instance().managed_config_save(self)
return True
def device_configure(self, dev_sxp, devid = None):
"""Configure an existing device.
@param dev_config: device configuration
@type dev_config: SXP object (parsed config)
@param devid: device id
@type devid: int
@return: Returns True if successfully updated device
@rtype: boolean
"""
# convert device sxp to a dict
dev_class = sxp.name(dev_sxp)
dev_config = {}
if dev_class == 'pci':
return self.pci_device_configure(dev_sxp)
if dev_class == 'vscsi':
return self.vscsi_device_configure(dev_sxp)
for opt_val in dev_sxp[1:]:
try:
dev_config[opt_val[0]] = opt_val[1]
except IndexError:
pass
dev_control = self.getDeviceController(dev_class)
if devid is None:
dev = dev_config.get('dev', '')
if not dev:
raise VmError('Block device must have virtual details specified')
if 'ioemu:' in dev:
(_, dev) = dev.split(':', 1)
try:
(dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
except ValueError:
pass
devid = dev_control.convertToDeviceNumber(dev)
dev_info = self._getDeviceInfo_vbd(devid)
if dev_info is None:
raise VmError("Device %s not connected" % devid)
dev_uuid = sxp.child_value(dev_info, 'uuid')
if self.domid is not None:
# use DevController.reconfigureDevice to change device config
dev_control.reconfigureDevice(devid, dev_config)
else:
(_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
if (new_f['device-type'] == 'cdrom' and
sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
new_b['mode'] == 'r' and
sxp.child_value(dev_info, 'mode') == 'r'):
pass
else:
raise VmError('Refusing to reconfigure device %s:%d to %s' %
(dev_class, devid, dev_config))
# update XendConfig with new device info
self.info.device_update(dev_uuid, dev_sxp)
xen.xend.XendDomain.instance().managed_config_save(self)
return True
def waitForDevices(self):
"""Wait for this domain's configured devices to connect.
@raise VmError: if any device fails to initialise.
"""
for devclass in XendDevices.valid_devices():
self.getDeviceController(devclass).waitForDevices()
def hvm_destroyPCIDevice(self, pci_dev):
log.debug("hvm_destroyPCIDevice: %s", pci_dev)
if not self.info.is_hvm():
raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
# Check the co-assignment.
# To pci-detach a device D from domN, we should ensure: for each DD in the
# list of D's co-assignment devices, DD is not assigned (to domN).
#
from xen.xend.server.pciif import PciDevice
try:
pci_device = PciDevice(pci_dev)
except Exception, e:
raise VmError("pci: failed to locate device and "+
"parse it's resources - "+str(e))
coassignment_list = pci_device.find_coassigned_devices()
coassignment_list.remove(pci_device.name)
assigned_pci_device_str_list = self._get_assigned_pci_devices()
for pci_str in coassignment_list:
if pci_str in assigned_pci_device_str_list:
raise VmError(("pci: failed to pci-detach %s from domain %s" + \
" because one of its co-assignment device %s is still " + \
" assigned to the domain." \
)% (pci_device.name, self.info['name_label'], pci_str))
bdf_str = pci_dict_to_bdf_str(pci_dev)
log.info("hvm_destroyPCIDevice:%s:%s!", pci_dev, bdf_str)
if self.domid is not None:
self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
return 0
def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
deviceClass, devid)
if rm_cfg:
# Convert devid to device number. A device number is
# needed to remove its configuration.
dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
# Save current sxprs. A device number and a backend
# path are needed to remove its configuration but sxprs
# do not have those after calling destroyDevice.
sxprs = self.getDeviceSxprs(deviceClass)
rc = None
if self.domid is not None:
#new blktap implementation may need a sysfs write after everything is torn down.
if deviceClass == 'tap2':
dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
path = self.getDeviceController(deviceClass).readBackend(dev, 'params')
frontpath = self.getDeviceController(deviceClass).frontendPath(dev)
backpath = xstransact.Read(frontpath, "backend")
thread.start_new_thread(self.getDeviceController(deviceClass).finishDeviceCleanup, (backpath, path))
rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
if not force and rm_cfg:
# The backend path, other than the device itself,
# has to be passed because its accompanied frontend
# path may be void until its removal is actually
# issued. It is probable because destroyDevice is
# issued first.
for dev_num, dev_info in sxprs:
dev_num = int(dev_num)
if dev_num == dev:
for x in dev_info:
if x[0] == 'backend':
backend = x[1]
break
break
self._waitForDevice_destroy(deviceClass, devid, backend)
if rm_cfg:
if deviceClass == 'vif':
if self.domid is not None:
mac = ''
for dev_num, dev_info in sxprs:
dev_num = int(dev_num)
if dev_num == dev:
for x in dev_info:
if x[0] == 'mac':
mac = x[1]
break
break
dev_info = self._getDeviceInfo_vif(mac)
else:
_, dev_info = sxprs[dev]
else: # 'vbd' or 'tap' or 'tap2'
dev_info = self._getDeviceInfo_vbd(dev)
# To remove the UUID of the device from refs,
# deviceClass must be always 'vbd'.
deviceClass = 'vbd'
if dev_info is None:
raise XendError("Device %s is not defined" % devid)
dev_uuid = sxp.child_value(dev_info, 'uuid')
del self.info['devices'][dev_uuid]
self.info['%s_refs' % deviceClass].remove(dev_uuid)
xen.xend.XendDomain.instance().managed_config_save(self)
return rc
def getDeviceSxprs(self, deviceClass):
if deviceClass == 'pci':
dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
if dev_info is None:
return []
dev_uuid = sxp.child_value(dev_info, 'uuid')
pci_devs = self.info['devices'][dev_uuid][1]['devs']
return pci_devs
if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
return self.getDeviceController(deviceClass).sxprs()
else:
sxprs = []
dev_num = 0
for dev_type, dev_info in self.info.all_devices_sxpr():
if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap', 'tap2']) or \
(deviceClass != 'vbd' and dev_type != deviceClass):
continue
if deviceClass == 'vscsi':
vscsi_devs = ['devs', []]
for vscsi_dev in sxp.children(dev_info, 'dev'):
vscsi_dev.append(['frontstate', None])
vscsi_devs[1].append(vscsi_dev)
dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
elif deviceClass == 'vbd':
dev = sxp.child_value(dev_info, 'dev')
if 'ioemu:' in dev:
(_, dev) = dev.split(':', 1)
try:
(dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
except ValueError:
dev_name = dev
dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
sxprs.append([dev_num, dev_info])
else:
sxprs.append([dev_num, dev_info])
dev_num += 1
return sxprs
def getBlockDeviceClass(self, devid):
# if the domain is running we can get the device class from xenstore.
# This is more accurate, as blktap1 devices show up as blktap2 devices
# in the config.
if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
# All block devices have a vbd frontend, so we know the frontend path
dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
frontendPath = "%s/device/vbd/%s" % (self.dompath, dev)
for devclass in XendDevices.valid_devices():
for dev in xstransact.List("%s/device/%s" % (self.vmpath, devclass)):
devFrontendPath = xstransact.Read("%s/device/%s/%s/frontend" % (self.vmpath, devclass, dev))
if frontendPath == devFrontendPath:
return devclass
else: # the domain is not active so we must get the device class
# from the config
# To get a device number from the devid,
# we temporarily use the device controller of VBD.
dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
dev_info = self._getDeviceInfo_vbd(dev)
if dev_info:
return dev_info[0]
def _getDeviceInfo_vif(self, mac):
for dev_type, dev_info in self.info.all_devices_sxpr():
if dev_type != 'vif':
continue
if mac == sxp.child_value(dev_info, 'mac'):
return dev_info
def _getDeviceInfo_vbd(self, devid):
for dev_type, dev_info in self.info.all_devices_sxpr():
if dev_type != 'vbd' and dev_type != 'tap' and dev_type != 'tap2':
continue
dev = sxp.child_value(dev_info, 'dev')
dev = dev.split(':')[0]
dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
if devid == dev:
return dev_info
def _getDeviceInfo_pci(self, devid):
for dev_type, dev_info in self.info.all_devices_sxpr():
if dev_type != 'pci':
continue
return dev_info
return None
def _getDeviceInfo_vscsi(self, devid):
devid = int(devid)
for dev_type, dev_info in self.info.all_devices_sxpr():
if dev_type != 'vscsi':
continue
devs = sxp.children(dev_info, 'dev')
if devid == int(sxp.child_value(devs[0], 'devid')):
return dev_info
return None
def _get_assigned_pci_devices(self, devid = 0):
if self.domid is not None:
return get_assigned_pci_devices(self.domid)
dev_info = self._getDeviceInfo_pci(devid)
if dev_info is None:
return []
dev_uuid = sxp.child_value(dev_info, 'uuid')
pci_conf = self.info['devices'][dev_uuid][1]
return map(pci_dict_to_bdf_str, pci_conf['devs'])
def setMemoryTarget(self, target):
"""Set the memory target of this domain.
@param target: In MiB.
"""
log.debug("Setting memory target of domain %s (%s) to %d MiB.",
self.info['name_label'], str(self.domid), target)
MiB = 1024 * 1024
memory_cur = self.get_memory_dynamic_max() / MiB
if self.domid == 0:
dom0_min_mem = xoptions.get_dom0_min_mem()
if target < memory_cur and dom0_min_mem > target:
raise XendError("memory_dynamic_max too small")
self._safe_set_memory('memory_dynamic_min', target * MiB)
self._safe_set_memory('memory_dynamic_max', target * MiB)
if self.domid >= 0:
if target > memory_cur:
balloon.free((target - memory_cur) * 1024, self)
self.storeVm("memory", target)
self.storeDom("memory/target", target << 10)
xc.domain_set_target_mem(self.domid,
(target * 1024))
xen.xend.XendDomain.instance().managed_config_save(self)
def setMemoryMaximum(self, limit):
"""Set the maximum memory limit of this domain
@param limit: In MiB.
"""
log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
self.info['name_label'], str(self.domid), limit)
maxmem_cur = self.get_memory_static_max()
MiB = 1024 * 1024
self._safe_set_memory('memory_static_max', limit * MiB)
if self.domid >= 0:
maxmem = int(limit) * 1024
try:
return xc.domain_setmaxmem(self.domid, maxmem)
except Exception, ex:
self._safe_set_memory('memory_static_max', maxmem_cur)
raise XendError(str(ex))
xen.xend.XendDomain.instance().managed_config_save(self)
def getVCPUInfo(self):
try:
# We include the domain name and ID, to help xm.
sxpr = ['domain',
['domid', self.domid],
['name', self.info['name_label']],
['vcpu_count', self.info['VCPUs_max']]]
for i in range(0, self.info['VCPUs_max']):
if self.domid is not None:
info = xc.vcpu_getinfo(self.domid, i)
sxpr.append(['vcpu',
['number', i],
['online', info['online']],
['blocked', info['blocked']],
['running', info['running']],
['cpu_time', info['cpu_time'] / 1e9],
['cpu', info['cpu']],
['cpumap', info['cpumap']]])
else:
sxpr.append(['vcpu',
['number', i],
['online', 0],
['blocked', 0],
['running', 0],
['cpu_time', 0.0],
['cpu', -1],
['cpumap', self.info['cpus'][i] and \
self.info['cpus'][i] or range(64)]])
return sxpr
except RuntimeError, exn:
raise XendError(str(exn))
def getDomInfo(self):
return dom_get(self.domid)
#
# internal functions ... TODO: re-categorised
#
def _augmentInfo(self, priv):
"""Augment self.info, as given to us through L{recreate}, with
values taken from the store. This recovers those values known
to xend but not to the hypervisor.
"""
augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
if priv:
augment_entries.remove('memory')
augment_entries.remove('maxmem')
augment_entries.remove('vcpus')
augment_entries.remove('vcpu_avail')
vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
for k in augment_entries])
# make returned lists into a dictionary
vm_config = dict(zip(augment_entries, vm_config))
for arg in augment_entries:
val = vm_config[arg]
if val != None:
if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
self.info[xapiarg] = val
elif arg == "memory":
self.info["static_memory_min"] = val
elif arg == "maxmem":
self.info["static_memory_max"] = val
else:
self.info[arg] = val
# read CPU Affinity
self.info['cpus'] = []
vcpus_info = self.getVCPUInfo()
for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
# For dom0, we ignore any stored value for the vcpus fields, and
# read the current value from Xen instead. This allows boot-time
# settings to take precedence over any entries in the store.
if priv:
xeninfo = dom_get(self.domid)
self.info['VCPUs_max'] = xeninfo['online_vcpus']
self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
# read image value
image_sxp = self._readVm('image')
if image_sxp:
self.info.update_with_image_sxp(sxp.from_string(image_sxp))
# read devices
devices = []
for devclass in XendDevices.valid_devices():
devconfig = self.getDeviceController(devclass).configurations()
if devconfig:
devices.extend(devconfig)
if not self.info['devices'] and devices is not None:
for device in devices:
self.info.device_add(device[0], cfg_sxp = device)
self._update_consoles()
def _update_consoles(self, transaction = None):
if self.domid == None or self.domid == 0:
return
# Update VT100 port if it exists
if transaction is None:
self.console_port = self.readDom('console/port')
else:
self.console_port = self.readDomTxn(transaction, 'console/port')
if self.console_port is not None:
serial_consoles = self.info.console_get_all('vt100')
if not serial_consoles:
cfg = self.info.console_add('vt100', self.console_port)
self._createDevice('console', cfg)
else:
console_uuid = serial_consoles[0].get('uuid')
self.info.console_update(console_uuid, 'location',
self.console_port)
# Update VNC port if it exists and write to xenstore
if transaction is None:
vnc_port = self.readDom('console/vnc-port')
else:
vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
if vnc_port is not None:
for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
if dev_type == 'vfb':
old_location = dev_info.get('location')
listen_host = dev_info.get('vnclisten', \
XendOptions.instance().get_vnclisten_address())
new_location = '%s:%s' % (listen_host, str(vnc_port))
if old_location == new_location:
break
dev_info['location'] = new_location
self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
vfb_ctrl = self.getDeviceController('vfb')
vfb_ctrl.reconfigureDevice(0, dev_info)
break
#
# Function to update xenstore /vm/*
#
def _readVm(self, *args):
return xstransact.Read(self.vmpath, *args)
def _writeVm(self, *args):
return xstransact.Write(self.vmpath, *args)
def _removeVm(self, *args):
return xstransact.Remove(self.vmpath, *args)
def _gatherVm(self, *args):
return xstransact.Gather(self.vmpath, *args)
def _listRecursiveVm(self, *args):
return xstransact.ListRecursive(self.vmpath, *args)
def storeVm(self, *args):
return xstransact.Store(self.vmpath, *args)
def permissionsVm(self, *args):
return xstransact.SetPermissions(self.vmpath, *args)
#
# Function to update xenstore /dom/*
#
def readDom(self, *args):
return xstransact.Read(self.dompath, *args)
def gatherDom(self, *args):
return xstransact.Gather(self.dompath, *args)
def _writeDom(self, *args):
return xstransact.Write(self.dompath, *args)
def _removeDom(self, *args):
return xstransact.Remove(self.dompath, *args)
def storeDom(self, *args):
return xstransact.Store(self.dompath, *args)
def readDomTxn(self, transaction, *args):
paths = map(lambda x: self.dompath + "/" + x, args)
return transaction.read(*paths)
def gatherDomTxn(self, transaction, *args):
paths = map(lambda x: self.dompath + "/" + x, args)
return transaction.gather(*paths)
def _writeDomTxn(self, transaction, *args):
paths = map(lambda x: self.dompath + "/" + x, args)
return transaction.write(*paths)
def _removeDomTxn(self, transaction, *args):
paths = map(lambda x: self.dompath + "/" + x, args)
return transaction.remove(*paths)
def storeDomTxn(self, transaction, *args):
paths = map(lambda x: self.dompath + "/" + x, args)
return transaction.store(*paths)
def _recreateDom(self):
complete(self.dompath, lambda t: self._recreateDomFunc(t))
def _recreateDomFunc(self, t):
t.remove()
t.mkdir()
t.set_permissions({'dom' : self.domid, 'read' : True})
t.write('vm', self.vmpath)
# NB. Solaris guests use guest/ and hvmpv/ xenstore directories
for i in [ 'device', 'control', 'error', 'memory', 'guest', 'hvmpv' ]:
t.mkdir(i)
t.set_permissions(i, {'dom' : self.domid})
def _storeDomDetails(self):
to_store = {
'domid': str(self.domid),
'vm': self.vmpath,
'name': self.info['name_label'],
'console/limit': str(xoptions.get_console_limit() * 1024),
'memory/target': str(self.info['memory_dynamic_max'] / 1024),
}
def f(n, v):
if v is not None:
if type(v) == bool:
to_store[n] = v and "1" or "0"
else:
to_store[n] = str(v)
# Figure out if we need to tell xenconsoled to ignore this guest's
# console - device model will handle console if it is running
constype = "ioemu"
if 'device_model' not in self.info['platform']:
constype = "xenconsoled"
f('console/port', self.console_port)
f('console/ring-ref', self.console_mfn)
f('console/type', constype)
f('store/port', self.store_port)
f('store/ring-ref', self.store_mfn)
if arch.type == "x86":
f('control/platform-feature-multiprocessor-suspend', True)
# elfnotes
for n, v in self.info.get_notes().iteritems():
n = n.lower().replace('_', '-')
if n == 'features':
for v in v.split('|'):
v = v.replace('_', '-')
if v.startswith('!'):
f('image/%s/%s' % (n, v[1:]), False)
else:
f('image/%s/%s' % (n, v), True)
else:
f('image/%s' % n, v)
if self.info.has_key('security_label'):
f('security_label', self.info['security_label'])
to_store.update(self._vcpuDomDetails())
log.debug("Storing domain details: %s", scrub_password(to_store))
self._writeDom(to_store)
def _vcpuDomDetails(self):
def availability(n):
if self.info['vcpu_avail'] & (1 << n):
return 'online'
else:
return 'offline'
result = {}
for v in range(0, self.info['VCPUs_max']):
result["cpu/%d/availability" % v] = availability(v)
return result
#
# xenstore watches
#
def _registerWatches(self):
"""Register a watch on this VM's entries in the store, and the
domain's control/shutdown node, so that when they are changed
externally, we keep up to date. This should only be called by {@link
#create}, {@link #recreate}, or {@link #restore}, once the domain's
details have been written, but before the new instance is returned."""
self.vmWatch = xswatch(self.vmpath, self._storeChanged)
self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
self._handleShutdownWatch)
def _storeChanged(self, _):
log.trace("XendDomainInfo.storeChanged");
changed = False
# Check whether values in the configuration have
# changed in Xenstore.
cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
'rtc/timeoffset']
vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
for k in cfg_vm])
# convert two lists into a python dictionary
vm_details = dict(zip(cfg_vm, vm_details))
for arg, val in vm_details.items():
if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
if val != None and val != self.info[xapiarg]:
self.info[xapiarg] = val
changed = True
elif arg == "memory":
if val != None and val != self.info["static_memory_min"]:
self.info["static_memory_min"] = val
changed = True
elif arg == "maxmem":
if val != None and val != self.info["static_memory_max"]:
self.info["static_memory_max"] = val
changed = True
# Check whether image definition has been updated
image_sxp = self._readVm('image')
if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
self.info.update_with_image_sxp(sxp.from_string(image_sxp))
changed = True
# Update the rtc_timeoffset to be preserved across reboot.
# NB. No need to update xenstore domain section.
val = int(vm_details.get("rtc/timeoffset", 0))
self.info["platform"]["rtc_timeoffset"] = val
if changed:
# Update the domain section of the store, as this contains some
# parameters derived from the VM configuration.
self.refresh_shutdown_lock.acquire()
try:
state = self._stateGet()
if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
self._storeDomDetails()
finally:
self.refresh_shutdown_lock.release()
return 1
def _handleShutdownWatch(self, _):
log.debug('XendDomainInfo.handleShutdownWatch')
reason = self.readDom('control/shutdown')
if reason and reason != 'suspend':
sst = self.readDom('xend/shutdown_start_time')
now = time.time()
if sst:
self.shutdownStartTime = float(sst)
timeout = float(sst) + SHUTDOWN_TIMEOUT - now
else:
self.shutdownStartTime = now
self.storeDom('xend/shutdown_start_time', now)
timeout = SHUTDOWN_TIMEOUT
log.trace(
"Scheduling refreshShutdown on domain %d in %ds.",
self.domid, timeout)
threading.Timer(timeout, self.refreshShutdown).start()
return True
#
# Public Attributes for the VM
#
def getDomid(self):
return self.domid
def setName(self, name, to_store = True):
self._checkName(name)
self.info['name_label'] = name
if to_store:
self.storeVm("name", name)
def getName(self):
return self.info['name_label']
def getDomainPath(self):
return self.dompath
def getShutdownReason(self):
return self.readDom('control/shutdown')
def getStorePort(self):
"""For use only by image.py and XendCheckpoint.py."""
return self.store_port
def getConsolePort(self):
"""For use only by image.py and XendCheckpoint.py"""
return self.console_port
def getFeatures(self):
"""For use only by image.py."""
return self.info['features']
def getVCpuCount(self):
return self.info['VCPUs_max']
def setVCpuCount(self, vcpus):
def vcpus_valid(n):
if vcpus <= 0:
raise XendError('Zero or less VCPUs is invalid')
if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
raise XendError('Cannot set vcpus greater than max vcpus on running domain')
vcpus_valid(vcpus)
self.info['vcpu_avail'] = (1 << vcpus) - 1
if self.domid >= 0:
self.storeVm('vcpu_avail', self.info['vcpu_avail'])
self._writeDom(self._vcpuDomDetails())
self.info['VCPUs_live'] = vcpus
else:
if self.info['VCPUs_max'] > vcpus:
# decreasing
del self.info['cpus'][vcpus:]
elif self.info['VCPUs_max'] < vcpus:
# increasing
for c in range(self.info['VCPUs_max'], vcpus):
self.info['cpus'].append(list())
self.info['VCPUs_max'] = vcpus
xen.xend.XendDomain.instance().managed_config_save(self)
log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
vcpus)
def getMemoryTarget(self):
"""Get this domain's target memory size, in KB."""
return self.info['memory_dynamic_max'] / 1024
def getMemoryMaximum(self):
"""Get this domain's maximum memory size, in KB."""
# remember, info now stores memory in bytes
return self.info['memory_static_max'] / 1024
def getResume(self):
return str(self._resume)
def setResume(self, isresume):
self._resume = isresume
def getCpus(self):
return self.info['cpus']
def setCpus(self, cpumap):
self.info['cpus'] = cpumap
def getCap(self):
return self.info['vcpus_params']['cap']
def setCap(self, cpu_cap):
self.info['vcpus_params']['cap'] = cpu_cap
def getWeight(self):
return self.info['vcpus_params']['weight']
def setWeight(self, cpu_weight):
self.info['vcpus_params']['weight'] = cpu_weight
def getRestartCount(self):
return self._readVm('xend/restart_count')
def refreshShutdown(self, xeninfo = None):
""" Checks the domain for whether a shutdown is required.
Called from XendDomainInfo and also image.py for HVM images.
"""
# If set at the end of this method, a restart is required, with the
# given reason. This restart has to be done out of the scope of
# refresh_shutdown_lock.
restart_reason = None
self.refresh_shutdown_lock.acquire()
try:
if xeninfo is None:
xeninfo = dom_get(self.domid)
if xeninfo is None:
# The domain no longer exists. This will occur if we have
# scheduled a timer to check for shutdown timeouts and the
# shutdown succeeded. It will also occur if someone
# destroys a domain beneath us. We clean up the domain,
# just in case, but we can't clean up the VM, because that
# VM may have migrated to a different domain on this
# machine.
self.cleanupDomain()
self._stateSet(DOM_STATE_HALTED)
return
if xeninfo['dying']:
# Dying means that a domain has been destroyed, but has not
# yet been cleaned up by Xen. This state could persist
# indefinitely if, for example, another domain has some of its
# pages mapped. We might like to diagnose this problem in the
# future, but for now all we do is make sure that it's not us
# holding the pages, by calling cleanupDomain. We can't
# clean up the VM, as above.
self.cleanupDomain()
self._stateSet(DOM_STATE_SHUTDOWN)
return
elif xeninfo['crashed']:
if self.readDom('xend/shutdown_completed'):
# We've seen this shutdown already, but we are preserving
# the domain for debugging. Leave it alone.
return
log.warn('Domain has crashed: name=%s id=%d.',
self.info['name_label'], self.domid)
self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
restart_reason = 'crash'
self._stateSet(DOM_STATE_HALTED)
elif xeninfo['shutdown']:
self._stateSet(DOM_STATE_SHUTDOWN)
if self.readDom('xend/shutdown_completed'):
# We've seen this shutdown already, but we are preserving
# the domain for debugging. Leave it alone.
return
else:
reason = shutdown_reason(xeninfo['shutdown_reason'])
log.info('Domain has shutdown: name=%s id=%d reason=%s.',
self.info['name_label'], self.domid, reason)
self._writeVm(LAST_SHUTDOWN_REASON, reason)
self._clearRestart()
if reason == 'suspend':
self._stateSet(DOM_STATE_SUSPENDED)
# Don't destroy the domain. XendCheckpoint will do
# this once it has finished. However, stop watching
# the VM path now, otherwise we will end up with one
# watch for the old domain, and one for the new.
self._unwatchVm()
elif reason in ('poweroff', 'reboot'):
restart_reason = reason
else:
self.destroy()
elif self.dompath is None:
# We have yet to manage to call introduceDomain on this
# domain. This can happen if a restore is in progress, or has
# failed. Ignore this domain.
pass
else:
# Domain is alive. If we are shutting it down, log a message
# if it seems unresponsive.
if xeninfo['paused']:
self._stateSet(DOM_STATE_PAUSED)
else:
self._stateSet(DOM_STATE_RUNNING)
if self.shutdownStartTime:
timeout = (SHUTDOWN_TIMEOUT - time.time() +
self.shutdownStartTime)
if (timeout < 0 and not self.readDom('xend/unresponsive')):
log.info(
"Domain shutdown timeout expired: name=%s id=%s",
self.info['name_label'], self.domid)
self.storeDom('xend/unresponsive', 'True')
finally:
self.refresh_shutdown_lock.release()
if restart_reason and not self.restart_in_progress:
self.restart_in_progress = True
threading.Thread(target = self._maybeRestart,
args = (restart_reason,)).start()
#
# Restart functions - handling whether we come back up on shutdown.
#
def _clearRestart(self):
self._removeDom("xend/shutdown_start_time")
def _maybeDumpCore(self, reason):
if reason == 'crash':
if xoptions.get_enable_dump() or self.get_on_crash() \
in ['coredump_and_destroy', 'coredump_and_restart']:
try:
self.dumpCore()
except XendError:
# This error has been logged -- there's nothing more
# we can do in this context.
pass
def _maybeRestart(self, reason):
# Before taking configured action, dump core if configured to do so.
#
self._maybeDumpCore(reason)
# Dispatch to the correct method based upon the configured on_{reason}
# behaviour.
actions = {"destroy" : self.destroy,
"restart" : self._restart,
"preserve" : self._preserve,
"rename-restart" : self._renameRestart,
"coredump-destroy" : self.destroy,
"coredump-restart" : self._restart}
action_conf = {
'poweroff': 'actions_after_shutdown',
'reboot': 'actions_after_reboot',
'crash': 'actions_after_crash',
}
action_target = self.info.get(action_conf.get(reason))
func = actions.get(action_target, None)
if func and callable(func):
func()
else:
self.destroy() # default to destroy
def _renameRestart(self):
self._restart(True)
def _restart(self, rename = False):
"""Restart the domain after it has exited.
@param rename True if the old domain is to be renamed and preserved,
False if it is to be destroyed.
"""
from xen.xend import XendDomain
if self._readVm(RESTART_IN_PROGRESS):
log.error('Xend failed during restart of domain %s. '
'Refusing to restart to avoid loops.',
str(self.domid))
self.destroy()
return
old_domid = self.domid
self._writeVm(RESTART_IN_PROGRESS, 'True')
elapse = time.time() - self.info['start_time']
if elapse < MINIMUM_RESTART_TIME:
log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
'Refusing to restart to avoid loops.',
self.info['name_label'], elapse)
self.destroy()
return
prev_vm_xend = self._listRecursiveVm('xend')
new_dom_info = self.info
try:
if rename:
new_dom_info = self._preserveForRestart()
else:
self._unwatchVm()
self.destroy()
# new_dom's VM will be the same as this domain's VM, except where
# the rename flag has instructed us to call preserveForRestart.
# In that case, it is important that we remove the
# RESTART_IN_PROGRESS node from the new domain, not the old one,
# once the new one is available.
new_dom = None
try:
new_dom = XendDomain.instance().domain_create_from_dict(
new_dom_info)
for x in prev_vm_xend[0][1]:
new_dom._writeVm('xend/%s' % x[0], x[1])
new_dom.waitForDevices()
new_dom.unpause()
rst_cnt = new_dom._readVm('xend/restart_count')
rst_cnt = int(rst_cnt) + 1
new_dom._writeVm('xend/restart_count', str(rst_cnt))
new_dom._removeVm(RESTART_IN_PROGRESS)
except:
if new_dom:
new_dom._removeVm(RESTART_IN_PROGRESS)
new_dom.destroy()
else:
self._removeVm(RESTART_IN_PROGRESS)
raise
except:
log.exception('Failed to restart domain %s.', str(old_domid))
def _preserveForRestart(self):
"""Preserve a domain that has been shut down, by giving it a new UUID,
cloning the VM details, and giving it a new name. This allows us to
keep this domain for debugging, but restart a new one in its place
preserving the restart semantics (name and UUID preserved).
"""
new_uuid = uuid.createString()
new_name = 'Domain-%s' % new_uuid
log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
self.info['name_label'], self.domid, self.info['uuid'],
new_name, new_uuid)
self._unwatchVm()
self._releaseDevices()
# Remove existing vm node in xenstore
self._removeVm()
new_dom_info = self.info.copy()
new_dom_info['name_label'] = self.info['name_label']
new_dom_info['uuid'] = self.info['uuid']
self.info['name_label'] = new_name
self.info['uuid'] = new_uuid
self.vmpath = XS_VMROOT + new_uuid
# Write out new vm node to xenstore
self._storeVmDetails()
self._preserve()
return new_dom_info
def _preserve(self):
log.info("Preserving dead domain %s (%d).", self.info['name_label'],
self.domid)
self._unwatchVm()
self.storeDom('xend/shutdown_completed', 'True')
self._stateSet(DOM_STATE_HALTED)
#
# Debugging ..
#
def dumpCore(self, corefile = None):
"""Create a core dump for this domain.
@raise: XendError if core dumping failed.
"""
if not corefile:
this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
self.info['name_label'], self.domid)
if os.path.isdir(corefile):
raise XendError("Cannot dump core in a directory: %s" %
corefile)
try:
try:
self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
xc.domain_dumpcore(self.domid, corefile)
except RuntimeError, ex:
corefile_incomp = corefile+'-incomplete'
try:
os.rename(corefile, corefile_incomp)
except:
pass
log.error("core dump failed: id = %s name = %s: %s",
self.domid, self.info['name_label'], str(ex))
raise XendError("Failed to dump core: %s" % str(ex))
finally:
self._removeVm(DUMPCORE_IN_PROGRESS)
#
# Device creation/deletion functions
#
def _createDevice(self, deviceClass, devConfig):
return self.getDeviceController(deviceClass).createDevice(devConfig)
def _waitForDevice(self, deviceClass, devid):
return self.getDeviceController(deviceClass).waitForDevice(devid)
def _waitForDeviceUUID(self, dev_uuid):
deviceClass, config = self.info['devices'].get(dev_uuid)
self._waitForDevice(deviceClass, config['devid'])
def _waitForDevice_destroy(self, deviceClass, devid, backpath):
return self.getDeviceController(deviceClass).waitForDevice_destroy(
devid, backpath)
def _reconfigureDevice(self, deviceClass, devid, devconfig):
return self.getDeviceController(deviceClass).reconfigureDevice(
devid, devconfig)
def _createDevices(self):
"""Create the devices for a vm.
@raise: VmError for invalid devices
"""
if self.image:
self.image.prepareEnvironment()
vscsi_uuidlist = {}
vscsi_devidlist = []
ordered_refs = self.info.ordered_device_refs()
for dev_uuid in ordered_refs:
devclass, config = self.info['devices'][dev_uuid]
if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
dev_uuid = config.get('uuid')
devid = self._createDevice(devclass, config)
# store devid in XendConfig for caching reasons
if dev_uuid in self.info['devices']:
self.info['devices'][dev_uuid][1]['devid'] = devid
elif devclass == 'vscsi':
vscsi_config = config.get('devs', [])[0]
devid = vscsi_config.get('devid', '')
dev_uuid = config.get('uuid')
vscsi_uuidlist[devid] = dev_uuid
vscsi_devidlist.append(devid)
#It is necessary to sorted it for /dev/sdxx in guest.
if len(vscsi_uuidlist) > 0:
vscsi_devidlist.sort()
for vscsiid in vscsi_devidlist:
dev_uuid = vscsi_uuidlist[vscsiid]
devclass, config = self.info['devices'][dev_uuid]
log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
dev_uuid = config.get('uuid')
devid = self._createDevice(devclass, config)
# store devid in XendConfig for caching reasons
if dev_uuid in self.info['devices']:
self.info['devices'][dev_uuid][1]['devid'] = devid
if self.image:
self.image.createDeviceModel()
#if have pass-through devs, need the virtual pci slots info from qemu
self.pci_device_configure_boot()
def _releaseDevices(self, suspend = False):
"""Release all domain's devices. Nothrow guarantee."""
if self.image:
try:
log.debug("Destroying device model")
self.image.destroyDeviceModel()
except Exception, e:
log.exception("Device model destroy failed %s" % str(e))
else:
log.debug("No device model")
log.debug("Releasing devices")
t = xstransact("%s/device" % self.vmpath)
try:
for devclass in XendDevices.valid_devices():
for dev in t.list(devclass):
try:
log.debug("Removing %s", dev);
self.destroyDevice(devclass, dev, False);
except:
# Log and swallow any exceptions in removal --
# there's nothing more we can do.
log.exception("Device release failed: %s; %s; %s",
self.info['name_label'],
devclass, dev)
finally:
t.abort()
def getDeviceController(self, name):
"""Get the device controller for this domain, and if it
doesn't exist, create it.
@param name: device class name
@type name: string
@rtype: subclass of DevController
"""
if name not in self._deviceControllers:
devController = XendDevices.make_controller(name, self)
if not devController:
raise XendError("Unknown device type: %s" % name)
self._deviceControllers[name] = devController
return self._deviceControllers[name]
#
# Migration functions (public)
#
def testMigrateDevices(self, network, dst):
""" Notify all device about intention of migration
@raise: XendError for a device that cannot be migrated
"""
for (n, c) in self.info.all_devices_sxpr():
rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
if rc != 0:
raise XendError("Device of type '%s' refuses migration." % n)
def migrateDevices(self, network, dst, step, domName=''):
"""Notify the devices about migration
"""
ctr = 0
try:
for (dev_type, dev_conf) in self.info.all_devices_sxpr():
self.migrateDevice(dev_type, dev_conf, network, dst,
step, domName)
ctr = ctr + 1
except:
for dev_type, dev_conf in self.info.all_devices_sxpr():
if ctr == 0:
step = step - 1
ctr = ctr - 1
self._recoverMigrateDevice(dev_type, dev_conf, network,
dst, step, domName)
raise
def migrateDevice(self, deviceClass, deviceConfig, network, dst,
step, domName=''):
return self.getDeviceController(deviceClass).migrate(deviceConfig,
network, dst, step, domName)
def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
dst, step, domName=''):
return self.getDeviceController(deviceClass).recover_migrate(
deviceConfig, network, dst, step, domName)
## private:
def _constructDomain(self):
"""Construct the domain.
@raise: VmError on error
"""
log.debug('XendDomainInfo.constructDomain')
self.shutdownStartTime = None
self.restart_in_progress = False
hap = 0
hvm = self.info.is_hvm()
if hvm:
hap = self.info.is_hap()
info = xc.xeninfo()
if 'hvm' not in info['xen_caps']:
raise VmError("HVM guest support is unavailable: is VT/AMD-V "
"supported by your CPU and enabled in your "
"BIOS?")
# Hack to pre-reserve some memory for initial domain creation.
# There is an implicit memory overhead for any domain creation. This
# overhead is greater for some types of domain than others. For
# example, an x86 HVM domain will have a default shadow-pagetable
# allocation of 1MB. We free up 4MB here to be on the safe side.
# 2MB memory allocation was not enough in some cases, so it's 4MB now
balloon.free(4*1024, self) # 4MB should be plenty
ssidref = 0
if security.on() == xsconstants.XS_POLICY_USE:
ssidref = security.calc_dom_ssidref_from_info(self.info)
if security.has_authorization(ssidref) == False:
raise VmError("VM is not authorized to run.")
s3_integrity = 0
if self.info.has_key('s3_integrity'):
s3_integrity = self.info['s3_integrity']
flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2)
try:
self.domid = xc.domain_create(
domid = 0,
ssidref = ssidref,
handle = uuid.fromString(self.info['uuid']),
flags = flags,
target = self.info.target())
except Exception, e:
# may get here if due to ACM the operation is not permitted
if security.on() == xsconstants.XS_POLICY_ACM:
raise VmError('Domain in conflict set with running domain?')
if self.domid < 0:
raise VmError('Creating domain failed: name=%s' %
self.info['name_label'])
self.dompath = GetDomainPath(self.domid)
self._recreateDom()
# Set timer configration of domain
timer_mode = self.info["platform"].get("timer_mode")
if hvm and timer_mode is not None:
xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
long(timer_mode))
# Set Viridian interface configuration of domain
viridian = self.info["platform"].get("viridian")
if arch.type == "x86" and hvm and viridian is not None:
xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
# Optionally enable virtual HPET
hpet = self.info["platform"].get("hpet")
if hvm and hpet is not None:
xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
long(hpet))
# Optionally enable periodic vpt aligning
vpt_align = self.info["platform"].get("vpt_align")
if hvm and vpt_align is not None:
xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
long(vpt_align))
# Set maximum number of vcpus in domain
xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
# Check for cpu_{cap|weight} validity for credit scheduler
if XendNode.instance().xenschedinfo() == 'credit':
cap = self.getCap()
weight = self.getWeight()
assert type(weight) == int
assert type(cap) == int
if weight < 1 or weight > 65535:
raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
if cap < 0 or cap > self.getVCpuCount() * 100:
raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
(self.getVCpuCount() * 100))
# Test whether the devices can be assigned with VT-d
self.info.update_platform_pci()
pci = self.info["platform"].get("pci")
pci_str = ''
if pci and len(pci) > 0:
pci = map(lambda x: x[0:4], pci) # strip options
pci_str = str(pci)
if hvm and pci_str:
bdf = xc.test_assign_device(0, pci_str)
if bdf != 0:
if bdf == -1:
raise VmError("failed to assign device: maybe the platform"
" doesn't support VT-d, or VT-d isn't enabled"
" properly?")
bus = (bdf >> 16) & 0xff
devfn = (bdf >> 8) & 0xff
dev = (devfn >> 3) & 0x1f
func = devfn & 0x7
raise VmError("fail to assign device(%x:%x.%x): maybe it has"
" already been assigned to other domain, or maybe"
" it doesn't exist." % (bus, dev, func))
# register the domain in the list
from xen.xend import XendDomain
XendDomain.instance().add_domain(self)
def _introduceDomain(self):
assert self.domid is not None
assert self.store_mfn is not None
assert self.store_port is not None
try:
IntroduceDomain(self.domid, self.store_mfn, self.store_port)
except RuntimeError, exn:
raise XendError(str(exn))
def _setTarget(self, target):
assert self.domid is not None
try:
SetTarget(self.domid, target)
self.storeDom('target', target)
except RuntimeError, exn:
raise XendError(str(exn))
def _setCPUAffinity(self):
""" Repin domain vcpus if a restricted cpus list is provided
"""
def has_cpus():
if self.info['cpus'] is not None:
for c in self.info['cpus']:
if c:
return True
return False
if has_cpus():
for v in range(0, self.info['VCPUs_max']):
if self.info['cpus'][v]:
xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
else:
def find_relaxed_node(node_list):
import sys
nr_nodes = info['nr_nodes']
if node_list is None:
node_list = range(0, nr_nodes)
nodeload = [0]
nodeload = nodeload * nr_nodes
from xen.xend import XendDomain
doms = XendDomain.instance().list('all')
for dom in filter (lambda d: d.domid != self.domid, doms):
cpuinfo = dom.getVCPUInfo()
for vcpu in sxp.children(cpuinfo, 'vcpu'):
if sxp.child_value(vcpu, 'online') == 0: continue
cpumap = list(sxp.child_value(vcpu,'cpumap'))
for i in range(0, nr_nodes):
node_cpumask = info['node_to_cpu'][i]
for j in node_cpumask:
if j in cpumap:
nodeload[i] += 1
break
for i in range(0, nr_nodes):
if len(info['node_to_cpu'][i]) > 0 and i in node_list:
nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
else:
nodeload[i] = sys.maxint
index = nodeload.index( min(nodeload) )
return index
info = xc.physinfo()
if info['nr_nodes'] > 1:
node_memory_list = info['node_to_memory']
needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
candidate_node_list = []
for i in range(0, info['nr_nodes']):
if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
candidate_node_list.append(i)
index = find_relaxed_node(candidate_node_list)
cpumask = info['node_to_cpu'][index]
for v in range(0, self.info['VCPUs_max']):
xc.vcpu_setaffinity(self.domid, v, cpumask)
def _setSchedParams(self):
if XendNode.instance().xenschedinfo() == 'credit':
from xen.xend import XendDomain
XendDomain.instance().domain_sched_credit_set(self.getDomid(),
self.getWeight(),
self.getCap())
def _initDomain(self):
log.debug('XendDomainInfo.initDomain: %s %s',
self.domid,
self.info['vcpus_params']['weight'])
self._configureBootloader()
try:
self.image = image.create(self, self.info)
# repin domain vcpus if a restricted cpus list is provided
# this is done prior to memory allocation to aide in memory
# distribution for NUMA systems.
self._setCPUAffinity()
# Set scheduling parameters.
self._setSchedParams()
# Use architecture- and image-specific calculations to determine
# the various headrooms necessary, given the raw configured
# values. maxmem, memory, and shadow are all in KiB.
# but memory_static_max etc are all stored in bytes now.
memory = self.image.getRequiredAvailableMemory(
self.info['memory_dynamic_max'] / 1024)
maxmem = self.image.getRequiredAvailableMemory(
self.info['memory_static_max'] / 1024)
shadow = self.image.getRequiredShadowMemory(
self.info['shadow_memory'] * 1024,
self.info['memory_static_max'] / 1024)
log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
# Round shadow up to a multiple of a MiB, as shadow_mem_control
# takes MiB and we must not round down and end up under-providing.
shadow = ((shadow + 1023) / 1024) * 1024
# set memory limit
xc.domain_setmaxmem(self.domid, maxmem)
# Reserve 1 page per MiB of RAM for separate VT-d page table.
vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
# Round vtd_mem up to a multiple of a MiB.
vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
# Make sure there's enough RAM available for the domain
balloon.free(memory + shadow + vtd_mem, self)
# Set up the shadow memory
shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
self.info['shadow_memory'] = shadow_cur
# machine address size
if self.info.has_key('machine_address_size'):
log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
log.debug("_initDomain: suppressing spurious page faults")
xc.domain_suppress_spurious_page_faults(self.domid)
self._createChannels()
channel_details = self.image.createImage()
self.store_mfn = channel_details['store_mfn']
if 'console_mfn' in channel_details:
self.console_mfn = channel_details['console_mfn']
if 'notes' in channel_details:
self.info.set_notes(channel_details['notes'])
if 'native_protocol' in channel_details:
self.native_protocol = channel_details['native_protocol'];
self._introduceDomain()
if self.info.target():
self._setTarget(self.info.target())
self._createDevices()
self.image.cleanupTmpImages()
self.info['start_time'] = time.time()
self._stateSet(DOM_STATE_RUNNING)
except VmError, exn:
log.exception("XendDomainInfo.initDomain: exception occurred")
if self.image:
self.image.cleanupTmpImages()
raise exn
except RuntimeError, exn:
log.exception("XendDomainInfo.initDomain: exception occurred")
if self.image:
self.image.cleanupTmpImages()
raise VmError(str(exn))
def cleanupDomain(self):
"""Cleanup domain resources; release devices. Idempotent. Nothrow
guarantee."""
self.refresh_shutdown_lock.acquire()
try:
self.unwatchShutdown()
self._releaseDevices()
bootloader_tidy(self)
if self.image:
self.image = None
try:
self._removeDom()
except:
log.exception("Removing domain path failed.")
self._stateSet(DOM_STATE_HALTED)
self.domid = None # Do not push into _stateSet()!
finally:
self.refresh_shutdown_lock.release()
def unwatchShutdown(self):
"""Remove the watch on the domain's control/shutdown node, if any.
Idempotent. Nothrow guarantee. Expects to be protected by the
refresh_shutdown_lock."""
try:
try:
if self.shutdownWatch:
self.shutdownWatch.unwatch()
finally:
self.shutdownWatch = None
except:
log.exception("Unwatching control/shutdown failed.")
def waitForShutdown(self):
self.state_updated.acquire()
try:
while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
self.state_updated.wait(timeout=1.0)
finally:
self.state_updated.release()
def waitForSuspend(self):
"""Wait for the guest to respond to a suspend request by
shutting down. If the guest hasn't re-written control/shutdown
after a certain amount of time, it's obviously not listening and
won't suspend, so we give up. HVM guests with no PV drivers
should already be shutdown.
"""
state = "suspend"
nr_tries = 60
self.state_updated.acquire()
try:
while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
self.state_updated.wait(1.0)
if state == "suspend":
if nr_tries == 0:
msg = ('Timeout waiting for domain %s to suspend'
% self.domid)
self._writeDom('control/shutdown', '')
raise XendError(msg)
state = self.readDom('control/shutdown')
nr_tries -= 1
finally:
self.state_updated.release()
#
# TODO: recategorise - called from XendCheckpoint
#
def completeRestore(self, store_mfn, console_mfn):
log.debug("XendDomainInfo.completeRestore")
self.store_mfn = store_mfn
self.console_mfn = console_mfn
self._introduceDomain()
self.image = image.create(self, self.info)
if self.image:
self.image.createDeviceModel(True)
self._storeDomDetails()
self._registerWatches()
self.refreshShutdown()
log.debug("XendDomainInfo.completeRestore done")
def _endRestore(self):
self.setResume(False)
#
# VM Destroy
#
def _prepare_phantom_paths(self):
# get associated devices to destroy
# build list of phantom devices to be removed after normal devices
plist = []
if self.domid is not None:
t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
try:
for dev in t.list():
backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
% (self.dompath, dev))
if backend_phantom_vbd is not None:
frontend_phantom_vbd = xstransact.Read("%s/frontend" \
% backend_phantom_vbd)
plist.append(backend_phantom_vbd)
plist.append(frontend_phantom_vbd)
finally:
t.abort()
return plist
def _cleanup_phantom_devs(self, plist):
# remove phantom devices
if not plist == []:
time.sleep(2)
for paths in plist:
if paths.find('backend') != -1:
# Modify online status /before/ updating state (latter is watched by
# drivers, so this ordering avoids a race).
xstransact.Write(paths, 'online', "0")
xstransact.Write(paths, 'state', str(xenbusState['Closing']))
# force
xstransact.Remove(paths)
def destroy(self):
"""Cleanup VM and destroy domain. Nothrow guarantee."""
if self.domid is None:
return
from xen.xend import XendDomain
log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
paths = self._prepare_phantom_paths()
if self.dompath is not None:
try:
xc.domain_destroy_hook(self.domid)
xc.domain_pause(self.domid)
do_FLR(self.domid)
xc.domain_destroy(self.domid)
for state in DOM_STATES_OLD:
self.info[state] = 0
self._stateSet(DOM_STATE_HALTED)
except:
log.exception("XendDomainInfo.destroy: domain destruction failed.")
XendDomain.instance().remove_domain(self)
self.cleanupDomain()
self._cleanup_phantom_devs(paths)
self._cleanupVm()
if "transient" in self.info["other_config"] \
and bool(self.info["other_config"]["transient"]):
XendDomain.instance().domain_delete_by_dominfo(self)
def resetDomain(self):
log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
old_domid = self.domid
prev_vm_xend = self._listRecursiveVm('xend')
new_dom_info = self.info
try:
self._unwatchVm()
self.destroy()
new_dom = None
try:
from xen.xend import XendDomain
new_dom_info['domid'] = None
new_dom = XendDomain.instance().domain_create_from_dict(
new_dom_info)
for x in prev_vm_xend[0][1]:
new_dom._writeVm('xend/%s' % x[0], x[1])
new_dom.waitForDevices()
new_dom.unpause()
except:
if new_dom:
new_dom.destroy()
raise
except:
log.exception('Failed to reset domain %s.', str(old_domid))
def resumeDomain(self):
log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
# resume a suspended domain (e.g. after live checkpoint, or after
# a later error during save or migate); checks that the domain
# is currently suspended first so safe to call from anywhere
xeninfo = dom_get(self.domid)
if xeninfo is None:
return
if not xeninfo['shutdown']:
return
reason = shutdown_reason(xeninfo['shutdown_reason'])
if reason != 'suspend':
return
try:
# could also fetch a parsed note from xenstore
fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
if not fast:
self._releaseDevices()
self.testDeviceComplete()
self.testvifsComplete()
log.debug("XendDomainInfo.resumeDomain: devices released")
self._resetChannels()
self._removeDom('control/shutdown')
self._removeDom('device-misc/vif/nextDeviceID')
self._createChannels()
self._introduceDomain()
self._storeDomDetails()
self._createDevices()
log.debug("XendDomainInfo.resumeDomain: devices created")
xc.domain_resume(self.domid, fast)
ResumeDomain(self.domid)
except:
log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
self.image.resumeDeviceModel()
log.debug("XendDomainInfo.resumeDomain: completed")
#
# Channels for xenstore and console
#
def _createChannels(self):
"""Create the channels to the domain.
"""
self.store_port = self._createChannel()
self.console_port = self._createChannel()
def _createChannel(self):
"""Create an event channel to the domain.
"""
try:
if self.domid != None:
return xc.evtchn_alloc_unbound(domid = self.domid,
remote_dom = 0)
except:
log.exception("Exception in alloc_unbound(%s)", str(self.domid))
raise
def _resetChannels(self):
"""Reset all event channels in the domain.
"""
try:
if self.domid != None:
return xc.evtchn_reset(dom = self.domid)
except:
log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
raise
#
# Bootloader configuration
#
def _configureBootloader(self):
"""Run the bootloader if we're configured to do so."""
blexec = self.info['PV_bootloader']
bootloader_args = self.info['PV_bootloader_args']
kernel = self.info['PV_kernel']
ramdisk = self.info['PV_ramdisk']
args = self.info['PV_args']
boot = self.info['HVM_boot_policy']
if boot:
# HVM booting.
pass
elif not blexec and kernel:
# Boot from dom0. Nothing left to do -- the kernel and ramdisk
# will be picked up by image.py.
pass
else:
# Boot using bootloader
if not blexec or blexec == 'pygrub':
blexec = auxbin.pathTo('pygrub')
blcfg = None
disks = [x for x in self.info['vbd_refs']
if self.info['devices'][x][1]['bootable']]
if not disks:
msg = "Had a bootloader specified, but no disks are bootable"
log.error(msg)
raise VmError(msg)
devinfo = self.info['devices'][disks[0]]
devtype = devinfo[0]
disk = devinfo[1]['uname']
fn = blkdev_uname_to_file(disk)
taptype = blkdev_uname_to_taptype(disk)
mounted = devtype in ['tap', 'tap2'] and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
if mounted:
# This is a file, not a device. pygrub can cope with a
# file if it's raw, but if it's QCOW or other such formats
# used through blktap, then we need to mount it first.
log.info("Mounting %s on %s." %
(fn, BOOTLOADER_LOOPBACK_DEVICE))
vbd = {
'mode': 'RO',
'device': BOOTLOADER_LOOPBACK_DEVICE,
}
from xen.xend import XendDomain
dom0 = XendDomain.instance().privilegedDomain()
dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
fn = BOOTLOADER_LOOPBACK_DEVICE
try:
blcfg = bootloader(blexec, fn, self, False,
bootloader_args, kernel, ramdisk, args)
finally:
if mounted:
log.info("Unmounting %s from %s." %
(fn, BOOTLOADER_LOOPBACK_DEVICE))
dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
if blcfg is None:
msg = "Had a bootloader specified, but can't find disk"
log.error(msg)
raise VmError(msg)
self.info.update_with_image_sxp(blcfg, True)
#
# VM Functions
#
def _readVMDetails(self, params):
"""Read the specified parameters from the store.
"""
try:
return self._gatherVm(*params)
except ValueError:
# One of the int/float entries in params has a corresponding store
# entry that is invalid. We recover, because older versions of
# Xend may have put the entry there (memory/target, for example),
# but this is in general a bad situation to have reached.
log.exception(
"Store corrupted at %s! Domain %d's configuration may be "
"affected.", self.vmpath, self.domid)
return []
def _cleanupVm(self):
"""Cleanup VM resources. Idempotent. Nothrow guarantee."""
self._unwatchVm()
try:
self._removeVm()
except:
log.exception("Removing VM path failed.")
def checkLiveMigrateMemory(self):
""" Make sure there's enough memory to migrate this domain """
overhead_kb = 0
if arch.type == "x86":
# 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
# the minimum that Xen would allocate if no value were given.
overhead_kb = self.info['VCPUs_max'] * 1024 + \
(self.info['memory_static_max'] / 1024 / 1024) * 4
overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
# The domain might already have some shadow memory
overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
if overhead_kb > 0:
balloon.free(overhead_kb, self)
def _unwatchVm(self):
"""Remove the watch on the VM path, if any. Idempotent. Nothrow
guarantee."""
try:
try:
if self.vmWatch:
self.vmWatch.unwatch()
finally:
self.vmWatch = None
except:
log.exception("Unwatching VM path failed.")
def testDeviceComplete(self):
""" For Block IO migration safety we must ensure that
the device has shutdown correctly, i.e. all blocks are
flushed to disk
"""
start = time.time()
while True:
test = 0
diff = time.time() - start
vbds = self.getDeviceController('vbd').deviceIDs()
taps = self.getDeviceController('tap').deviceIDs()
tap2s = self.getDeviceController('tap2').deviceIDs()
for i in vbds + taps + tap2s:
test = 1
log.info("Dev %s still active, looping...", i)
time.sleep(0.1)
if test == 0:
break
if diff >= MIGRATE_TIMEOUT:
log.info("Dev still active but hit max loop timeout")
break
def testvifsComplete(self):
""" In case vifs are released and then created for the same
domain, we need to wait the device shut down.
"""
start = time.time()
while True:
test = 0
diff = time.time() - start
for i in self.getDeviceController('vif').deviceIDs():
test = 1
log.info("Dev %s still active, looping...", i)
time.sleep(0.1)
if test == 0:
break
if diff >= MIGRATE_TIMEOUT:
log.info("Dev still active but hit max loop timeout")
break
def _storeVmDetails(self):
to_store = {}
for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
if self._infoIsSet(info_key):
to_store[key] = str(self.info[info_key])
if self._infoIsSet("static_memory_min"):
to_store["memory"] = str(self.info["static_memory_min"])
if self._infoIsSet("static_memory_max"):
to_store["maxmem"] = str(self.info["static_memory_max"])
image_sxpr = self.info.image_sxpr()
if image_sxpr:
to_store['image'] = sxp.to_string(image_sxpr)
if not self._readVm('xend/restart_count'):
to_store['xend/restart_count'] = str(0)
log.debug("Storing VM details: %s", scrub_password(to_store))
self._writeVm(to_store)
self._setVmPermissions()
def _setVmPermissions(self):
"""Allow the guest domain to read its UUID. We don't allow it to
access any other entry, for security."""
xstransact.SetPermissions('%s/uuid' % self.vmpath,
{ 'dom' : self.domid,
'read' : True,
'write' : False })
#
# Utility functions
#
def __getattr__(self, name):
if name == "state":
log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
log.warn("".join(traceback.format_stack()))
return self._stateGet()
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name == "state":
log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
log.warn("".join(traceback.format_stack()))
self._stateSet(value)
else:
self.__dict__[name] = value
def _stateSet(self, state):
self.state_updated.acquire()
try:
# TODO Not sure this is correct...
# _stateGet is live now. Why not fire event
# even when it hasn't changed?
if self._stateGet() != state:
self.state_updated.notifyAll()
import XendAPI
XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
'power_state')
finally:
self.state_updated.release()
def _stateGet(self):
# Lets try and reconsitute the state from xc
# first lets try and get the domain info
# from xc - this will tell us if the domain
# exists
info = dom_get(self.getDomid())
if info is None or info['shutdown']:
# We are either HALTED or SUSPENDED
# check saved image exists
from xen.xend import XendDomain
managed_config_path = \
XendDomain.instance()._managed_check_point_path( \
self.get_uuid())
if os.path.exists(managed_config_path):
return XEN_API_VM_POWER_STATE_SUSPENDED
else:
return XEN_API_VM_POWER_STATE_HALTED
elif info['crashed']:
# Crashed
return XEN_API_VM_POWER_STATE_CRASHED
else:
# We are either RUNNING or PAUSED
if info['paused']:
return XEN_API_VM_POWER_STATE_PAUSED
else:
return XEN_API_VM_POWER_STATE_RUNNING
def _infoIsSet(self, name):
return name in self.info and self.info[name] is not None
def _checkName(self, name):
"""Check if a vm name is valid. Valid names contain alphabetic
characters, digits, or characters in '_-.:/+'.
The same name cannot be used for more than one vm at the same time.
@param name: name
@raise: VmError if invalid
"""
from xen.xend import XendDomain
if name is None or name == '':
raise VmError('Missing VM Name')
if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
raise VmError('Invalid VM Name')
dom = XendDomain.instance().domain_lookup_nr(name)
if dom and dom.info['uuid'] != self.info['uuid']:
raise VmError("VM name '%s' already exists%s" %
(name,
dom.domid is not None and
(" as domain %s" % str(dom.domid)) or ""))
def update(self, info = None, refresh = True, transaction = None):
"""Update with info from xc.domain_getinfo().
"""
log.trace("XendDomainInfo.update(%s) on domain %s", info,
str(self.domid))
if not info:
info = dom_get(self.domid)
if not info:
return
if info["maxmem_kb"] < 0:
info["maxmem_kb"] = XendNode.instance() \
.physinfo_dict()['total_memory'] * 1024
# make sure state is reset for info
# TODO: we should eventually get rid of old_dom_states
self.info.update_config(info)
self._update_consoles(transaction)
if refresh:
self.refreshShutdown(info)
log.trace("XendDomainInfo.update done on domain %s: %s",
str(self.domid), self.info)
def sxpr(self, ignore_store = False, legacy_only = True):
result = self.info.to_sxp(domain = self,
ignore_devices = ignore_store,
legacy_only = legacy_only)
return result
# Xen API
# ----------------------------------------------------------------
def get_uuid(self):
dom_uuid = self.info.get('uuid')
if not dom_uuid: # if it doesn't exist, make one up
dom_uuid = uuid.createString()
self.info['uuid'] = dom_uuid
return dom_uuid
def get_memory_static_max(self):
return self.info.get('memory_static_max', 0)
def get_memory_static_min(self):
return self.info.get('memory_static_min', 0)
def get_memory_dynamic_max(self):
return self.info.get('memory_dynamic_max', 0)
def get_memory_dynamic_min(self):
return self.info.get('memory_dynamic_min', 0)
# only update memory-related config values if they maintain sanity
def _safe_set_memory(self, key, newval):
oldval = self.info.get(key, 0)
try:
self.info[key] = newval
self.info._memory_sanity_check()
except Exception, ex:
self.info[key] = oldval
raise
def set_memory_static_max(self, val):
self._safe_set_memory('memory_static_max', val)
def set_memory_static_min(self, val):
self._safe_set_memory('memory_static_min', val)
def set_memory_dynamic_max(self, val):
self._safe_set_memory('memory_dynamic_max', val)
def set_memory_dynamic_min(self, val):
self._safe_set_memory('memory_dynamic_min', val)
def get_vcpus_params(self):
if self.getDomid() is None:
return self.info['vcpus_params']
retval = xc.sched_credit_domain_get(self.getDomid())
return retval
def get_power_state(self):
return XEN_API_VM_POWER_STATE[self._stateGet()]
def get_platform(self):
return self.info.get('platform', {})
def get_pci_bus(self):
return self.info.get('pci_bus', '')
def get_tools_version(self):
return self.info.get('tools_version', {})
def get_metrics(self):
return self.metrics.get_uuid();
def get_security_label(self, xspol=None):
import xen.util.xsm.xsm as security
label = security.get_security_label(self, xspol)
return label
def set_security_label(self, seclab, old_seclab, xspol=None,
xspol_old=None):
"""
Set the security label of a domain from its old to
a new value.
@param seclab New security label formatted in the form
<policy type>:<policy name>:<vm label>
@param old_seclab The current security label that the
VM must have.
@param xspol An optional policy under which this
update should be done. If not given,
then the current active policy is used.
@param xspol_old The old policy; only to be passed during
the updating of a policy
@return Returns return code, a string with errors from
the hypervisor's operation, old label of the
domain
"""
rc = 0
errors = ""
old_label = ""
new_ssidref = 0
domid = self.getDomid()
res_labels = None
is_policy_update = (xspol_old != None)
from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
state = self._stateGet()
# Relabel only HALTED or RUNNING or PAUSED domains
if domid != 0 and \
state not in \
[ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
DOM_STATE_SUSPENDED ]:
log.warn("Relabeling domain not possible in state '%s'" %
DOM_STATES[state])
return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
# Remove security label. Works only for halted or suspended domains
if not seclab or seclab == "":
if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
if self.info.has_key('security_label'):
old_label = self.info['security_label']
# Check label against expected one.
if old_label != old_seclab:
return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
del self.info['security_label']
xen.xend.XendDomain.instance().managed_config_save(self)
return (xsconstants.XSERR_SUCCESS, "", "", 0)
tmp = seclab.split(":")
if len(tmp) != 3:
return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
typ, policy, label = tmp
poladmin = XSPolicyAdminInstance()
if not xspol:
xspol = poladmin.get_policy_by_name(policy)
try:
xen.xend.XendDomain.instance().policy_lock.acquire_writer()
if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
#if domain is running or paused try to relabel in hypervisor
if not xspol:
return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
if typ != xspol.get_type_name() or \
policy != xspol.get_name():
return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
if typ == xsconstants.ACM_POLICY_ID:
new_ssidref = xspol.vmlabel_to_ssidref(label)
if new_ssidref == xsconstants.INVALID_SSIDREF:
return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
# Check that all used resources are accessible under the
# new label
if not is_policy_update and \
not security.resources_compatible_with_vmlabel(xspol,
self, label):
return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
#Check label against expected one. Can only do this
# if the policy hasn't changed underneath in the meantime
if xspol_old == None:
old_label = self.get_security_label()
if old_label != old_seclab:
log.info("old_label != old_seclab: %s != %s" %
(old_label, old_seclab))
return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
# relabel domain in the hypervisor
rc, errors = security.relabel_domains([[domid, new_ssidref]])
log.info("rc from relabeling in HV: %d" % rc)
else:
return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
if rc == 0:
# HALTED, RUNNING or PAUSED
if domid == 0:
if xspol:
self.info['security_label'] = seclab
ssidref = poladmin.set_domain0_bootlabel(xspol, label)
else:
return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
else:
if self.info.has_key('security_label'):
old_label = self.info['security_label']
# Check label against expected one, unless wildcard
if old_label != old_seclab:
return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
self.info['security_label'] = seclab
try:
xen.xend.XendDomain.instance().managed_config_save(self)
except:
pass
return (rc, errors, old_label, new_ssidref)
finally:
xen.xend.XendDomain.instance().policy_lock.release()
def get_on_shutdown(self):
after_shutdown = self.info.get('actions_after_shutdown')
if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
return XEN_API_ON_NORMAL_EXIT[-1]
return after_shutdown
def get_on_reboot(self):
after_reboot = self.info.get('actions_after_reboot')
if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
return XEN_API_ON_NORMAL_EXIT[-1]
return after_reboot
def get_on_suspend(self):
# TODO: not supported
after_suspend = self.info.get('actions_after_suspend')
if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
return XEN_API_ON_NORMAL_EXIT[-1]
return after_suspend
def get_on_crash(self):
after_crash = self.info.get('actions_after_crash')
if not after_crash or after_crash not in \
XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
return XEN_API_ON_CRASH_BEHAVIOUR[0]
return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
def get_dev_config_by_uuid(self, dev_class, dev_uuid):
""" Get's a device configuration either from XendConfig or
from the DevController.
@param dev_class: device class, either, 'vbd' or 'vif'
@param dev_uuid: device UUID
@rtype: dictionary
"""
dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
# shortcut if the domain isn't started because
# the devcontrollers will have no better information
# than XendConfig.
if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
XEN_API_VM_POWER_STATE_SUSPENDED):
if dev_config:
return copy.deepcopy(dev_config)
return None
# instead of using dev_class, we use the dev_type
# that is from XendConfig.
controller = self.getDeviceController(dev_type)
if not controller:
return None
all_configs = controller.getAllDeviceConfigurations()
if not all_configs:
return None
updated_dev_config = copy.deepcopy(dev_config)
for _devid, _devcfg in all_configs.items():
if _devcfg.get('uuid') == dev_uuid:
updated_dev_config.update(_devcfg)
updated_dev_config['id'] = _devid
return updated_dev_config
return updated_dev_config
def get_dev_xenapi_config(self, dev_class, dev_uuid):
config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
if not config:
return {}
config['VM'] = self.get_uuid()
if dev_class == 'vif':
if not config.has_key('name'):
config['name'] = config.get('vifname', '')
if not config.has_key('MAC'):
config['MAC'] = config.get('mac', '')
if not config.has_key('type'):
config['type'] = 'paravirtualised'
if not config.has_key('device'):
devid = config.get('id')
if devid != None:
config['device'] = 'eth%s' % devid
else:
config['device'] = ''
if not config.has_key('network'):
try:
bridge = config.get('bridge', None)
if bridge is None:
from xen.util import Brctl
if_to_br = dict([(i,b)
for (b,ifs) in Brctl.get_state().items()
for i in ifs])
vifname = "vif%s.%s" % (self.getDomid(),
config.get('id'))
bridge = if_to_br.get(vifname, None)
config['network'] = \
XendNode.instance().bridge_to_network(
config.get('bridge')).get_uuid()
except Exception:
log.exception('bridge_to_network')
# Ignore this for now -- it may happen if the device
# has been specified using the legacy methods, but at
# some point we're going to have to figure out how to
# handle that properly.
config['MTU'] = 1500 # TODO
if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
xennode = XendNode.instance()
rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
config['io_read_kbs'] = rx_bps/1024
config['io_write_kbs'] = tx_bps/1024
rx, tx = xennode.get_vif_stat(self.domid, devid)
config['io_total_read_kbs'] = rx/1024
config['io_total_write_kbs'] = tx/1024
else:
config['io_read_kbs'] = 0.0
config['io_write_kbs'] = 0.0
config['io_total_read_kbs'] = 0.0
config['io_total_write_kbs'] = 0.0
config['security_label'] = config.get('security_label', '')
if dev_class == 'vbd':
if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
controller = self.getDeviceController(dev_class)
devid, _1, _2 = controller.getDeviceDetails(config)
xennode = XendNode.instance()
rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
config['io_read_kbs'] = rd_blkps
config['io_write_kbs'] = wr_blkps
else:
config['io_read_kbs'] = 0.0
config['io_write_kbs'] = 0.0
config['VDI'] = config.get('VDI', '')
config['device'] = config.get('dev', '')
if ':' in config['device']:
vbd_name, vbd_type = config['device'].split(':', 1)
config['device'] = vbd_name
if vbd_type == 'cdrom':
config['type'] = XEN_API_VBD_TYPE[0]
else:
config['type'] = XEN_API_VBD_TYPE[1]
config['driver'] = 'paravirtualised' # TODO
config['image'] = config.get('uname', '')
if config.get('mode', 'r') == 'r':
config['mode'] = 'RO'
else:
config['mode'] = 'RW'
if dev_class == 'vtpm':
if not config.has_key('type'):
config['type'] = 'paravirtualised' # TODO
if not config.has_key('backend'):
config['backend'] = "00000000-0000-0000-0000-000000000000"
return config
def get_dev_property(self, dev_class, dev_uuid, field):
config = self.get_dev_xenapi_config(dev_class, dev_uuid)
try:
return config[field]
except KeyError:
raise XendError('Invalid property for device: %s' % field)
def set_dev_property(self, dev_class, dev_uuid, field, value):
self.info['devices'][dev_uuid][1][field] = value
def get_vcpus_util(self):
vcpu_util = {}
xennode = XendNode.instance()
if 'VCPUs_max' in self.info and self.domid != None:
for i in range(0, self.info['VCPUs_max']):
util = xennode.get_vcpu_util(self.domid, i)
vcpu_util[str(i)] = util
return vcpu_util
def get_consoles(self):
return self.info.get('console_refs', [])
def get_vifs(self):
return self.info.get('vif_refs', [])
def get_vbds(self):
return self.info.get('vbd_refs', [])
def get_vtpms(self):
return self.info.get('vtpm_refs', [])
def get_dpcis(self):
return XendDPCI.get_by_VM(self.info.get('uuid'))
def get_dscsis(self):
return XendDSCSI.get_by_VM(self.info.get('uuid'))
def create_vbd(self, xenapi_vbd, vdi_image_path):
"""Create a VBD using a VDI from XendStorageRepository.
@param xenapi_vbd: vbd struct from the Xen API
@param vdi_image_path: VDI UUID
@rtype: string
@return: uuid of the device
"""
xenapi_vbd['image'] = vdi_image_path
if vdi_image_path.startswith('tap'):
dev_uuid = self.info.device_add('tap2', cfg_xenapi = xenapi_vbd)
else:
dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
if not dev_uuid:
raise XendError('Failed to create device')
if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
XEN_API_VM_POWER_STATE_PAUSED):
_, config = self.info['devices'][dev_uuid]
if vdi_image_path.startswith('tap'):
dev_control = self.getDeviceController('tap2')
else:
dev_control = self.getDeviceController('vbd')
try:
devid = dev_control.createDevice(config)
dev_control.waitForDevice(devid)
self.info.device_update(dev_uuid,
cfg_xenapi = {'devid': devid})
except Exception, exn:
log.exception(exn)
del self.info['devices'][dev_uuid]
self.info['vbd_refs'].remove(dev_uuid)
raise
return dev_uuid
def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
"""Create a VBD using a VDI from XendStorageRepository.
@param xenapi_vbd: vbd struct from the Xen API
@param vdi_image_path: VDI UUID
@rtype: string
@return: uuid of the device
"""
xenapi_vbd['image'] = vdi_image_path
dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
if not dev_uuid:
raise XendError('Failed to create device')
if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
_, config = self.info['devices'][dev_uuid]
config['devid'] = self.getDeviceController('tap').createDevice(config)
return config['devid']
def create_vif(self, xenapi_vif):
"""Create VIF device from the passed struct in Xen API format.
@param xenapi_vif: Xen API VIF Struct.
@rtype: string
@return: UUID
"""
dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
if not dev_uuid:
raise XendError('Failed to create device')
if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
XEN_API_VM_POWER_STATE_PAUSED):
_, config = self.info['devices'][dev_uuid]
dev_control = self.getDeviceController('vif')
try:
devid = dev_control.createDevice(config)
dev_control.waitForDevice(devid)
self.info.device_update(dev_uuid,
cfg_xenapi = {'devid': devid})
except Exception, exn:
log.exception(exn)
del self.info['devices'][dev_uuid]
self.info['vif_refs'].remove(dev_uuid)
raise
return dev_uuid
def create_vtpm(self, xenapi_vtpm):
"""Create a VTPM device from the passed struct in Xen API format.
@return: uuid of the device
@rtype: string
"""
if self._stateGet() not in (DOM_STATE_HALTED,):
raise VmError("Can only add vTPM to a halted domain.")
if self.get_vtpms() != []:
raise VmError('Domain already has a vTPM.')
dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
if not dev_uuid:
raise XendError('Failed to create device')
return dev_uuid
def create_console(self, xenapi_console):
""" Create a console device from a Xen API struct.
@return: uuid of device
@rtype: string
"""
if self._stateGet() not in (DOM_STATE_HALTED,):
raise VmError("Can only add console to a halted domain.")
dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
if not dev_uuid:
raise XendError('Failed to create device')
return dev_uuid
def set_console_other_config(self, console_uuid, other_config):
self.info.console_update(console_uuid, 'other_config', other_config)
def create_dpci(self, xenapi_pci):
"""Create pci device from the passed struct in Xen API format.
@param xenapi_pci: DPCI struct from Xen API
@rtype: bool
#@rtype: string
@return: True if successfully created device
#@return: UUID
"""
dpci_uuid = uuid.createString()
dpci_opts = []
opts_dict = xenapi_pci.get('options')
for k in opts_dict.keys():
dpci_opts.append([k, opts_dict[k]])
opts_sxp = pci_opts_list_to_sxp(dpci_opts)
# Convert xenapi to sxp
ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
dev_sxp = ['dev',
['domain', '0x%02x' % ppci.get_domain()],
['bus', '0x%02x' % ppci.get_bus()],
['slot', '0x%02x' % ppci.get_slot()],
['func', '0x%1x' % ppci.get_func()],
['vdevfn', '0x%02x' % xenapi_pci.get('hotplug_slot')],
['key', xenapi_pci['key']],
['uuid', dpci_uuid]]
dev_sxp = sxp.merge(dev_sxp, opts_sxp)
target_pci_sxp = ['pci', dev_sxp, ['state', 'Initialising'] ]
if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
old_pci_sxp = self._getDeviceInfo_pci(0)
if old_pci_sxp is None:
dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
if not dev_uuid:
raise XendError('Failed to create device')
else:
new_pci_sxp = ['pci']
for existing_dev in sxp.children(old_pci_sxp, 'dev'):
new_pci_sxp.append(existing_dev)
new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
self.info.device_update(dev_uuid, new_pci_sxp)
xen.xend.XendDomain.instance().managed_config_save(self)
else:
try:
self.device_configure(target_pci_sxp)
except Exception, exn:
raise XendError('Failed to create device')
return dpci_uuid
def create_dscsi(self, xenapi_dscsi):
"""Create scsi device from the passed struct in Xen API format.
@param xenapi_dscsi: DSCSI struct from Xen API
@rtype: string
@return: UUID
"""
dscsi_uuid = uuid.createString()
# Convert xenapi to sxp
pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
target_vscsi_sxp = \
['vscsi',
['dev',
['devid', devid],
['p-devname', pscsi.get_dev_name()],
['p-dev', pscsi.get_physical_HCTL()],
['v-dev', xenapi_dscsi.get('virtual_HCTL')],
['state', xenbusState['Initialising']],
['uuid', dscsi_uuid]
],
['feature-host', 0]
]
if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
if cur_vscsi_sxp is None:
dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
if not dev_uuid:
raise XendError('Failed to create device')
else:
new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
new_vscsi_sxp.append(existing_dev)
new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
self.info.device_update(dev_uuid, new_vscsi_sxp)
xen.xend.XendDomain.instance().managed_config_save(self)
else:
try:
self.device_configure(target_vscsi_sxp)
except Exception, exn:
raise XendError('Failed to create device')
return dscsi_uuid
def destroy_device_by_uuid(self, dev_type, dev_uuid):
if dev_uuid not in self.info['devices']:
raise XendError('Device does not exist')
try:
if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
XEN_API_VM_POWER_STATE_PAUSED):
_, config = self.info['devices'][dev_uuid]
devid = config.get('devid')
if devid != None:
self.getDeviceController(dev_type).destroyDevice(devid, force = False)
else:
raise XendError('Unable to get devid for device: %s:%s' %
(dev_type, dev_uuid))
finally:
del self.info['devices'][dev_uuid]
self.info['%s_refs' % dev_type].remove(dev_uuid)
def destroy_vbd(self, dev_uuid):
self.destroy_device_by_uuid('vbd', dev_uuid)
def destroy_vif(self, dev_uuid):
self.destroy_device_by_uuid('vif', dev_uuid)
def destroy_vtpm(self, dev_uuid):
self.destroy_device_by_uuid('vtpm', dev_uuid)
def destroy_dpci(self, dev_uuid):
dpci = XendAPIStore.get(dev_uuid, 'DPCI')
ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
old_pci_sxp = self._getDeviceInfo_pci(0)
dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
target_dev = None
new_pci_sxp = ['pci']
for dev in sxp.children(old_pci_sxp, 'dev'):
pci_dev = {}
pci_dev['domain'] = sxp.child_value(dev, 'domain')
pci_dev['bus'] = sxp.child_value(dev, 'bus')
pci_dev['slot'] = sxp.child_value(dev, 'slot')
pci_dev['func'] = sxp.child_value(dev, 'func')
if ppci.get_name() == pci_dict_to_bdf_str(pci_dev):
target_dev = dev
else:
new_pci_sxp.append(dev)
if target_dev is None:
raise XendError('Failed to destroy device')
target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
self.info.device_update(dev_uuid, new_pci_sxp)
if len(sxp.children(new_pci_sxp, 'dev')) == 0:
del self.info['devices'][dev_uuid]
xen.xend.XendDomain.instance().managed_config_save(self)
else:
try:
self.device_configure(target_pci_sxp)
except Exception, exn:
raise XendError('Failed to destroy device')
def destroy_dscsi(self, dev_uuid):
dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
devid = dscsi.get_virtual_host()
vHCTL = dscsi.get_virtual_HCTL()
cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
target_dev = None
new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
for dev in sxp.children(cur_vscsi_sxp, 'dev'):
if vHCTL == sxp.child_value(dev, 'v-dev'):
target_dev = dev
else:
new_vscsi_sxp.append(dev)
if target_dev is None:
raise XendError('Failed to destroy device')
target_dev.append(['state', xenbusState['Closing']])
target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
self.info.device_update(dev_uuid, new_vscsi_sxp)
if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
del self.info['devices'][dev_uuid]
xen.xend.XendDomain.instance().managed_config_save(self)
else:
try:
self.device_configure(target_vscsi_sxp)
except Exception, exn:
raise XendError('Failed to destroy device')
def destroy_xapi_instances(self):
"""Destroy Xen-API instances stored in XendAPIStore.
"""
# Xen-API classes based on XendBase have their instances stored
# in XendAPIStore. Cleanup these instances here, if they are supposed
# to be destroyed when the parent domain is dead.
#
# Most of the virtual devices (vif, vbd, vfb, etc) are not based on
# XendBase and there's no need to remove them from XendAPIStore.
from xen.xend import XendDomain
if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
# domain still exists.
return
# Destroy the VMMetrics instance.
if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
is not None:
self.metrics.destroy()
# Destroy DPCI instances.
for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
XendAPIStore.deregister(dpci_uuid, "DPCI")
# Destroy DSCSI instances.
for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
XendAPIStore.deregister(dscsi_uuid, "DSCSI")
def has_device(self, dev_class, dev_uuid):
return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
def __str__(self):
return '<domain id=%s name=%s memory=%s state=%s>' % \
(str(self.domid), self.info['name_label'],
str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
__repr__ = __str__
|
david-zwicker/convert-formula
|
refs/heads/master
|
src/parser_line.py
|
1
|
""" Defines classes for parsing mathematical formulas according to various
language definitions.
This parser was build based on the code `fourFn.py` given in the example section
of the pyparsing webpage. The code was written by `Paul McGuire`.
"""
import copy
from pyparsing import Optional, ZeroOrMore, Forward, downcaseTokens
from .language import LanguageBase
def _show_token(strg, loc, toks):
""" Auxilariy function for printing the current token """
print("Strg: %r" % strg)
print("Loc: %r" % loc)
print("Toks: %r" % toks)
class ParserLine(object):
"""Base class describing a generic parser handling input in a 'common'
style"""
def __init__(self, language):
""" Initializes the parser """
if isinstance(language, LanguageBase):
self.language = language
else:
raise ValueError("`language` is not of type LanguageBase")
self.result_parse = []
self.result_stack = []
self.result_nested = None
self.parser = self.init_parser()
def set_assignment(self, strg, loc, toks):
"""Helper function used to remember the variable the value is assigned
to"""
if len(toks) > 0:
self.assignment = toks[0]
def push_first(self, strg, loc, toks):
""" Helper function for creating the expression stack """
if len(toks) > 0:
self.result_stack.append(toks[0])
# if toks and len(toks)>1 and toks[0].isalpha():
# self.result_stack.append('FCALL')
def push_unary_minus(self, strg, loc, toks):
"""Helper function for pushing the unary minus onto the expression
stack"""
if toks and toks[0] == "-":
self.result_stack.append("UNARY-")
# ~ self.exprStack.append('-1')
# ~ self.exprStack.append('*')
def _push2stack(self, value):
"""returns a function which pushes `value` to the expression stack.
This function migth be used as a ParseAction"""
return lambda s, l, t: self.result_stack.append(value)
def init_parser(self):
"""
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: real | Word(alphas) | array | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
"""
atoms = self.language.get_parser_atoms()
variable = atoms["variable"] # .setParseAction(downcaseTokens)
func_lpar = atoms["func_lpar"].setParseAction(self._push2stack("("))
func_delim = atoms["func_delim"]
func_rpar = atoms["func_rpar"].setParseAction(self._push2stack(")"))
array_lpar = atoms["array_lpar"].setParseAction(self._push2stack("["))
array_delim = atoms["array_delim"]
array_rpar = atoms["array_rpar"].setParseAction(self._push2stack("]"))
lpar = atoms["lpar"].suppress()
rpar = atoms["rpar"].suppress()
expop = atoms["exp"]
addop = atoms["plus"] | atoms["minus"]
multop = atoms["mult"] | atoms["div"]
cmpop = atoms["equal"]
expr = Forward() # forward declaration of an entire expression
# this is necessary for defining the recursive grammar
# smallest entity of a mathematical expression:
array = (
variable
+ array_lpar
+ atoms["int"].addParseAction(self.push_first)
+ ZeroOrMore(array_delim + atoms["int"])
+ array_rpar
)
func_call = (
atoms["function"].setParseAction(downcaseTokens)
+ func_lpar
+ expr
+ ZeroOrMore(func_delim + expr)
+ func_rpar
)
obj = atoms["consts"] | atoms["float"] | array | func_call | variable
atom = (
Optional("-")
+ ( # optional unary minus
obj.addParseAction(self.push_first)
| (lpar + expr.suppress() + rpar) # subexpression
)
).setParseAction(self.push_unary_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of
# "atom [ ^ atom ]...", we get right-to-left exponents, instead of
# left-to-right. That is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor << atom + ZeroOrMore((expop + factor).setParseAction(self.push_first))
# sequence of multiplications
term = factor + ZeroOrMore((multop + factor).setParseAction(self.push_first))
# sequence of summations
expr << term + ZeroOrMore((addop + term).setParseAction(self.push_first))
# comparison operators
equation = expr + Optional(cmpop + expr).setParseAction(self.push_first)
# assignment operator
self.parser = (
(variable ^ array).setParseAction(self.push_first)
+ atoms["assign"]
+ equation
).setParseAction(self._push2stack("=")) | equation
return self.parser
def _get_nested_structure_rec(self, s, array_list=False, func_list=False):
""" Calculates the nested structure from the expression """
# initialize values
op = s.pop()
array_end = False
func_end = False
# check whether we are currently in a function list
if func_list and op == "(":
res = None
func_end = True
elif array_list and op == "[":
res = None
array_end = True
elif op == "UNARY-":
res = dict(
op="UNARY-", pos="prefix", args=[self._get_nested_structure_rec(s)[0]]
)
# elif op == '=':
# res = dict(op='=', args=[self._get_nested_structure_rec(s)])
elif op in "+-*/^=" or op == "==": # operators using two values
arg2 = self._get_nested_structure_rec(s)[0]
arg1 = self._get_nested_structure_rec(s)[0]
if op == "^" and arg1 == "E": # optimization
res = dict(op="exp", pos="prefix", args=[arg2])
else:
res = dict(op=op, pos="infix", args=[arg1, arg2])
elif len(s) > 1 and s[-1] == "]": # array selector has started
s.pop() # remove the bracket
# iterate through arguments of the function
args = []
while True:
val, array_finished, func_finished = self._get_nested_structure_rec(
s, True, func_list
)
if array_finished:
break
args.append(val)
args.reverse()
res = dict(op=op, pos="array", args=args)
elif len(s) > 1 and s[-1] == ")": # function has started
s.pop() # remove the bracket
# iterate through arguments of the function
args = []
while True:
val, array_finished, func_finished = self._get_nested_structure_rec(
s, array_list, True
)
if func_finished:
break
args.append(val)
args.reverse()
res = dict(op=op, pos="function", args=args)
# constants and variables
else:
res = op
return res, array_end, func_end
def get_nested_structure(self):
""" Calculates the nested structure from the expression """
if self.result_stack == []:
raise ValueError("Nothing has been parsed, yet.")
s = copy.copy(self.result_stack) # otherwise, we loose self.exprStack
res = self._get_nested_structure_rec(s)[0]
# if self.assignment is not None:
# res = { 'op':'=', 'pos':'infix', 'args':[self.assignment, res] }
return res
def parse_string(self, s):
""" Parses a formula given as a string """
# reset cache
self.result_stack = []
# process the input string
s = self.language.pre_process(s)
if s.strip() == "":
self.result_parse = []
self.result_nested = ""
else:
self.result_parse = self.parser.parseString(s)
self.result_nested = self.get_nested_structure()
return self.result_nested
|
renatopp/batma
|
refs/heads/master
|
batma/maths/__init__.py
|
12133432
| |
hryamzik/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/profitbricks/__init__.py
|
12133432
| |
hanlind/nova
|
refs/heads/master
|
nova/virt/xenapi/image/__init__.py
|
12133432
| |
joebos/django-allauth
|
refs/heads/master
|
allauth/account/auth_backends.py
|
6
|
from django.contrib.auth.backends import ModelBackend
from django.db.models import Q
from ..utils import get_user_model
from .app_settings import AuthenticationMethod
from . import app_settings
User = get_user_model()
class AuthenticationBackend(ModelBackend):
def authenticate(self, **credentials):
ret = None
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
ret = self._authenticate_by_email(**credentials)
elif app_settings.AUTHENTICATION_METHOD \
== AuthenticationMethod.USERNAME_EMAIL:
ret = self._authenticate_by_email(**credentials)
if not ret:
ret = self._authenticate_by_username(**credentials)
else:
ret = self._authenticate_by_username(**credentials)
return ret
def _authenticate_by_username(self, **credentials):
username_field = app_settings.USER_MODEL_USERNAME_FIELD
if not username_field:
return None
try:
# Username query is case insensitive
query = {username_field+'__iexact': credentials["username"]}
user = User.objects.get(**query)
if user.check_password(credentials["password"]):
return user
except User.DoesNotExist:
return None
def _authenticate_by_email(self, **credentials):
# Even though allauth will pass along `email`, other apps may
# not respect this setting. For example, when using
# django-tastypie basic authentication, the login is always
# passed as `username`. So let's place nice with other apps
# and use username as fallback
email = credentials.get('email', credentials.get('username'))
if email:
users = User.objects.filter(Q(email__iexact=email)
| Q(emailaddress__email__iexact=email))
for user in users:
if user.check_password(credentials["password"]):
return user
return None
|
jenskutilek/jkRFoTools
|
refs/heads/master
|
Lib/jkRFoTools/FontChooserDraggable.py
|
1
|
from Cocoa import NSStringPboardType, NSDragOperationMove, NSArray
import vanilla
from defconAppKit.windows.baseWindow import BaseWindowController
from defconAppKit.windows.progressWindow import ProgressWindow
from lib.scripting.codeEditor import CodeEditor
from robofab.world import AllFonts
class ProcessFonts(BaseWindowController):
def __init__(self, message="Choose Fonts", function=None, show_results=True, width=400, height=300):
"""Open a window containing a list of all open fonts, to select some fonts and process them with a supplied function.
message: The title to display in the title bar, e. g. the name of your script
function: A function that will be called for each selected font with the RFont as its argument
show_results: Boolean to indicate if your function returns a result which should be displayed in the result box
width: The initial width of the window (optional)
height: The initial height of the window (optional)
Select and double-click rows in the result list to copy them to the pasteboard."""
self.w = vanilla.Window(
(width, height),
message,
(400, 300),
)
self.function = function
column_descriptions = [
{
"title": "Font",
"typingSensitive": True,
"editable": False,
},
{
"title": "Result",
},
]
self.w.message = vanilla.TextBox(
(10, 10, -10, 30),
"Select fonts to process:",
)
self.w.font_list = vanilla.List(
(10, 40, -10, 100),
[],
columnDescriptions = column_descriptions,
drawFocusRing = True,
allowsMultipleSelection = True,
doubleClickCallback = self.copy_result,
selectionCallback = self.show_result,
selfDropSettings = {
"type": NSStringPboardType,
"operation": NSDragOperationMove,
"callback": self.list_drop,
},
dragSettings = {
"type": NSStringPboardType,
"dropDataFormat": None,
"callback": self.list_drag,
},
)
self.w.result_box = CodeEditor((10, 150, -10, -42), "", lexer="text")
self.w.copy_button = vanilla.Button(
(10, -32, 110, -10),
"Copy results",
callback = self.copy_result,
)
self.w.cancel_button = vanilla.Button(
(-180, -32, -100, -10),
"Cancel",
callback = self.cancel,
)
self.w.ok_button = vanilla.Button(
(-90, -32, -10, -10),
"Process",
callback = self.ok,
)
self.setUpBaseWindowBehavior()
self._drag_src_rows = []
self.update_font_list()
self.w.open()
def cancel(self, sender=None):
self.w.close()
def ok(self, sender=None):
self.w.cancel_button.enable(False)
self.w.ok_button.enable(False)
self.w.copy_button.enable(False)
fonts = self.w.font_list.getSelection()
progress = ProgressWindow(
"",
tickCount = len(fonts),
parentWindow = self.w,
)
results = []
for i in range(len(AllFonts())):
font = AllFonts()[i]
if i in fonts:
progress.update("Processing %s %s ..." % (font.info.familyName, font.info.styleName))
result = self.function(font)
if result is None:
result = "Unknown"
results.append(
{
"Font": "%s %s" % (font.info.familyName, font.info.styleName),
"Result": result,
}
)
else:
results.append(
{
"Font": "%s %s" % (font.info.familyName, font.info.styleName),
"Result": self.w.font_list.get()[i]["Result"],
}
)
progress.close()
self.w.font_list.set(results)
self.w.font_list.setSelection(fonts)
self.w.cancel_button.setTitle("Close")
self.w.cancel_button.enable(True)
self.w.ok_button.enable(True)
self.w.copy_button.enable(True)
def copy_result(self, sender):
from string import strip
from AppKit import NSPasteboard, NSArray
s = u""
results = self.w.font_list.getSelection()
for i in results:
s += self.w.font_list.get()[i]["Font"] + "\n\n"
s += self.w.font_list.get()[i]["Result"] + "\n\n\n"
pb = NSPasteboard.generalPasteboard()
pb.clearContents()
a = NSArray.arrayWithObject_(s.strip("\n"))
pb.writeObjects_(a)
def show_result(self, sender=None):
results = self.w.font_list.getSelection()
if len(results) > 0:
self.w.result_box.set(self.w.font_list.get()[results[0]]["Result"])
selection_empty = False
else:
self.w.result_box.set("")
selection_empty = True
if selection_empty:
self.w.ok_button.enable(False)
self.w.copy_button.enable(False)
else:
self.w.ok_button.enable(True)
self.w.copy_button.enable(True)
def update_font_list(self):
font_list = []
self.w.font_list.set(
[
{
"Font": "%s %s" % (
AllFonts()[i].info.familyName,
AllFonts()[i].info.styleName
),
"Result": ""
} for i in range(len(AllFonts()))
]
)
def list_drag(self, sender=None, drop_info=None):
self._drag_src_rows = drop_info
print "Drag Source Rows:", drop_info
data = [self.w.font_list[i] for i in drop_info]
return data
def list_drop(self, sender=None, drop_info=None):
if drop_info is not None:
if drop_info["isProposal"]:
# TODO: check if drop is acceptable
return True
else:
print "DEBUG: dropped item in position %i" % drop_info["rowIndex"]
print " Data: %s" % drop_info["data"]
print drop_info
# TODO: accept the drop (actually do something)
self.insert_data(drop_info["data"], drop_info["rowIndex"])
return True
def delete_rows(self, row_index_list):
for row_index in row_index_list:
del self.w.font_list[i]
def insert_data(self, data, row_index):
print "insert_data"
#print type(data), data
for i in range(len(data)-1, -1, -1):
print "Insert:", row_index, i, data[i]
self.w.font_list.insert(row_index, data[i])
def windowCloseCallback(self, sender):
super(ProcessFonts, self).windowCloseCallback(sender)
if __name__ == "__main__":
ProcessFonts()
|
chouseknecht/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/aws_batch_job_queue.py
|
5
|
#!/usr/bin/python
# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_batch_job_queue
short_description: Manage AWS Batch Job Queues
description:
- This module allows the management of AWS Batch Job Queues.
It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute
environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions.
version_added: "2.5"
author: Jon Meran (@jonmer85)
options:
job_queue_name:
description:
- The name for the job queue
required: true
state:
description:
- Describes the desired state.
default: "present"
choices: ["present", "absent"]
job_queue_state:
description:
- The state of the job queue. If the job queue state is ENABLED , it is able to accept jobs.
default: "ENABLED"
choices: ["ENABLED", "DISABLED"]
priority:
description:
- The priority of the job queue. Job queues with a higher priority (or a lower integer value for the priority
parameter) are evaluated first when associated with same compute environment. Priority is determined in
ascending order, for example, a job queue with a priority value of 1 is given scheduling preference over a job
queue with a priority value of 10.
required: true
compute_environment_order:
description:
- The set of compute environments mapped to a job queue and their order relative to each other. The job
scheduler uses this parameter to determine which compute environment should execute a given job. Compute
environments must be in the VALID state before you can associate them with a job queue. You can associate up to
3 compute environments with a job queue.
required: true
requirements:
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: My Batch Job Queue
batch_job_queue:
job_queue_name: jobQueueName
state: present
region: us-east-1
job_queue_state: ENABLED
priority: 1
compute_environment_order:
- order: 1
compute_environment: my_compute_env1
- order: 2
compute_environment: my_compute_env2
- name: show results
debug: var=batch_job_queue_action
'''
RETURN = '''
---
output:
description: "returns what action was taken, whether something was changed, invocation and response"
returned: always
sample:
batch_job_queue_action: updated
changed: false
response:
job_queue_arn: "arn:aws:batch:...."
job_queue_name: <name>
priority: 1
state: DISABLED
status: UPDATING
status_reason: "JobQueue Healthy"
type: dict
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.batch import AWSConnection, cc, set_api_params
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
import re
import traceback
try:
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
except ImportError:
pass # Handled by HAS_BOTO3
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module:
:param aws:
:return:
"""
return
# ---------------------------------------------------------------------------------------------------
#
# Batch Job Queue functions
#
# ---------------------------------------------------------------------------------------------------
def get_current_job_queue(module, connection):
try:
environments = connection.client().describe_job_queues(
jobQueues=[module.params['job_queue_name']]
)
return environments['jobQueues'][0] if len(environments['jobQueues']) > 0 else None
except ClientError:
return None
def create_job_queue(module, aws):
"""
Adds a Batch job queue
:param module:
:param aws:
:return:
"""
client = aws.client('batch')
changed = False
# set API parameters
params = ('job_queue_name', 'priority')
api_params = set_api_params(module, params)
if module.params['job_queue_state'] is not None:
api_params['state'] = module.params['job_queue_state']
api_params['computeEnvironmentOrder'] = get_compute_environment_order_list(module)
try:
if not module.check_mode:
client.create_job_queue(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating compute environment: {0}'.format(to_native(e)),
exception=traceback.format_exc())
return changed
def get_compute_environment_order_list(module):
compute_environment_order_list = []
for ceo in module.params['compute_environment_order']:
compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment']))
return compute_environment_order_list
def remove_job_queue(module, aws):
"""
Remove a Batch job queue
:param module:
:param aws:
:return:
"""
client = aws.client('batch')
changed = False
# set API parameters
api_params = {'jobQueue': module.params['job_queue_name']}
try:
if not module.check_mode:
client.delete_job_queue(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing job queue: {0}'.format(to_native(e)),
exception=traceback.format_exc())
return changed
def manage_state(module, aws):
changed = False
current_state = 'absent'
state = module.params['state']
job_queue_state = module.params['job_queue_state']
job_queue_name = module.params['job_queue_name']
priority = module.params['priority']
action_taken = 'none'
response = None
check_mode = module.check_mode
# check if the job queue exists
current_job_queue = get_current_job_queue(module, aws)
if current_job_queue:
current_state = 'present'
if state == 'present':
if current_state == 'present':
updates = False
# Update Batch Job Queue configuration
job_kwargs = {'jobQueue': job_queue_name}
# Update configuration if needed
if job_queue_state and current_job_queue['state'] != job_queue_state:
job_kwargs.update({'state': job_queue_state})
updates = True
if priority is not None and current_job_queue['priority'] != priority:
job_kwargs.update({'priority': priority})
updates = True
new_compute_environment_order_list = get_compute_environment_order_list(module)
if new_compute_environment_order_list != current_job_queue['computeEnvironmentOrder']:
job_kwargs['computeEnvironmentOrder'] = new_compute_environment_order_list
updates = True
if updates:
try:
if not check_mode:
aws.client().update_job_queue(**job_kwargs)
changed = True
action_taken = "updated"
except (ParamValidationError, ClientError) as e:
module.fail_json(msg="Unable to update job queue: {0}".format(to_native(e)),
exception=traceback.format_exc())
else:
# Create Job Queue
changed = create_job_queue(module, aws)
action_taken = 'added'
# Describe job queue
response = get_current_job_queue(module, aws)
if not response:
module.fail_json(msg='Unable to get job queue information after creating/updating')
else:
if current_state == 'present':
# remove the Job Queue
changed = remove_job_queue(module, aws)
action_taken = 'deleted'
return dict(changed=changed, batch_job_queue_action=action_taken, response=response)
# ---------------------------------------------------------------------------------------------------
#
# MAIN
#
# ---------------------------------------------------------------------------------------------------
def main():
"""
Main entry point.
:return dict: changed, batch_job_queue_action, response
"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
job_queue_name=dict(required=True),
job_queue_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
priority=dict(type='int', required=True),
compute_environment_order=dict(type='list', required=True),
region=dict(aliases=['aws_region', 'ec2_region'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['batch'])
validate_params(module, aws)
results = manage_state(module, aws)
module.exit_json(**camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()
|
ShineFan/odoo
|
refs/heads/8.0
|
addons/auth_openid/controllers/main.py
|
382
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import tempfile
import getpass
import werkzeug.urls
import werkzeug.exceptions
from openid import oidutil
from openid.store import filestore
from openid.consumer import consumer
from openid.cryptutil import randomString
from openid.extensions import ax, sreg
import openerp
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
from openerp.addons.web.controllers.main import login_and_redirect, set_cookie_and_redirect
import openerp.http as http
from openerp.http import request
from .. import utils
_logger = logging.getLogger(__name__)
oidutil.log = _logger.debug
def get_system_user():
"""Return system user info string, such as USERNAME-EUID"""
try:
info = getpass.getuser()
except ImportError:
if os.name == 'nt':
# when there is no 'USERNAME' in environment, getpass.getuser()
# fail when trying to import 'pwd' module - which is unix only.
# In that case we have to fallback to real win32 API.
import win32api
info = win32api.GetUserName()
else:
raise
euid = getattr(os, 'geteuid', None) # Non available on some platforms
if euid is not None:
info = '%s-%d' % (info, euid())
return info
_storedir = os.path.join(tempfile.gettempdir(),
'openerp-auth_openid-%s-store' % get_system_user())
class GoogleAppsAwareConsumer(consumer.GenericConsumer):
def complete(self, message, endpoint, return_to):
if message.getOpenIDNamespace() == consumer.OPENID2_NS:
server_url = message.getArg(consumer.OPENID2_NS, 'op_endpoint', '')
if server_url.startswith('https://www.google.com/a/'):
assoc_handle = message.getArg(consumer.OPENID_NS, 'assoc_handle')
assoc = self.store.getAssociation(server_url, assoc_handle)
if assoc:
# update fields
for attr in ['claimed_id', 'identity']:
value = message.getArg(consumer.OPENID2_NS, attr, '')
value = 'https://www.google.com/accounts/o8/user-xrds?uri=%s' % werkzeug.url_quote_plus(value)
message.setArg(consumer.OPENID2_NS, attr, value)
# now, resign the message
message.delArg(consumer.OPENID2_NS, 'sig')
message.delArg(consumer.OPENID2_NS, 'signed')
message = assoc.signMessage(message)
return super(GoogleAppsAwareConsumer, self).complete(message, endpoint, return_to)
class OpenIDController(http.Controller):
_store = filestore.FileOpenIDStore(_storedir)
_REQUIRED_ATTRIBUTES = ['email']
_OPTIONAL_ATTRIBUTES = 'nickname fullname postcode country language timezone'.split()
def _add_extensions(self, oidrequest):
"""Add extensions to the oidrequest"""
sreg_request = sreg.SRegRequest(required=self._REQUIRED_ATTRIBUTES,
optional=self._OPTIONAL_ATTRIBUTES)
oidrequest.addExtension(sreg_request)
ax_request = ax.FetchRequest()
for alias in self._REQUIRED_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=True, alias=alias))
for alias in self._OPTIONAL_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=False, alias=alias))
oidrequest.addExtension(ax_request)
def _get_attributes_from_success_response(self, success_response):
attrs = {}
all_attrs = self._REQUIRED_ATTRIBUTES + self._OPTIONAL_ATTRIBUTES
sreg_resp = sreg.SRegResponse.fromSuccessResponse(success_response)
if sreg_resp:
for attr in all_attrs:
value = sreg_resp.get(attr)
if value is not None:
attrs[attr] = value
ax_resp = ax.FetchResponse.fromSuccessResponse(success_response)
if ax_resp:
for attr in all_attrs:
value = ax_resp.getSingle(utils.SREG2AX[attr])
if value is not None:
attrs[attr] = value
return attrs
def _get_realm(self):
return request.httprequest.host_url
@http.route('/auth_openid/login/verify_direct', type='http', auth='none')
def verify_direct(self, db, url):
result = self._verify(db, url)
if 'error' in result:
return werkzeug.exceptions.BadRequest(result['error'])
if result['action'] == 'redirect':
return werkzeug.utils.redirect(result['value'])
return result['value']
@http.route('/auth_openid/login/verify', type='json', auth='none')
def verify(self, db, url):
return self._verify(db, url)
def _verify(self, db, url):
redirect_to = werkzeug.urls.Href(request.httprequest.host_url + 'auth_openid/login/process')(session_id=request.session_id)
realm = self._get_realm()
session = dict(dbname=db, openid_url=url) # TODO add origin page ?
oidconsumer = consumer.Consumer(session, self._store)
try:
oidrequest = oidconsumer.begin(url)
except consumer.DiscoveryFailure, exc:
fetch_error_string = 'Error in discovery: %s' % (str(exc[0]),)
return {'error': fetch_error_string, 'title': 'OpenID Error'}
if oidrequest is None:
return {'error': 'No OpenID services found', 'title': 'OpenID Error'}
request.session.openid_session = session
self._add_extensions(oidrequest)
if oidrequest.shouldSendRedirect():
redirect_url = oidrequest.redirectURL(realm, redirect_to)
return {'action': 'redirect', 'value': redirect_url, 'session_id': request.session_id}
else:
form_html = oidrequest.htmlMarkup(realm, redirect_to)
return {'action': 'post', 'value': form_html, 'session_id': request.session_id}
@http.route('/auth_openid/login/process', type='http', auth='none')
def process(self, **kw):
session = getattr(request.session, 'openid_session', None)
if not session:
return set_cookie_and_redirect('/')
oidconsumer = consumer.Consumer(session, self._store, consumer_class=GoogleAppsAwareConsumer)
query = request.httprequest.args
info = oidconsumer.complete(query, request.httprequest.base_url)
display_identifier = info.getDisplayIdentifier()
session['status'] = info.status
if info.status == consumer.SUCCESS:
dbname = session['dbname']
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
Modules = registry.get('ir.module.module')
installed = Modules.search_count(cr, SUPERUSER_ID, ['&', ('name', '=', 'auth_openid'), ('state', '=', 'installed')]) == 1
if installed:
Users = registry.get('res.users')
#openid_url = info.endpoint.canonicalID or display_identifier
openid_url = session['openid_url']
attrs = self._get_attributes_from_success_response(info)
attrs['openid_url'] = openid_url
session['attributes'] = attrs
openid_email = attrs.get('email', False)
domain = []
if openid_email:
domain += ['|', ('openid_email', '=', False)]
domain += [('openid_email', '=', openid_email)]
domain += [('openid_url', '=', openid_url), ('active', '=', True)]
ids = Users.search(cr, SUPERUSER_ID, domain)
assert len(ids) < 2
if ids:
user_id = ids[0]
login = Users.browse(cr, SUPERUSER_ID, user_id).login
key = randomString(utils.KEY_LENGTH, '0123456789abcdef')
Users.write(cr, SUPERUSER_ID, [user_id], {'openid_key': key})
# TODO fill empty fields with the ones from sreg/ax
cr.commit()
return login_and_redirect(dbname, login, key)
session['message'] = 'This OpenID identifier is not associated to any active users'
elif info.status == consumer.SETUP_NEEDED:
session['message'] = info.setup_url
elif info.status == consumer.FAILURE and display_identifier:
fmt = "Verification of %s failed: %s"
session['message'] = fmt % (display_identifier, info.message)
else: # FAILURE
# Either we don't understand the code or there is no
# openid_url included with the error. Give a generic
# failure message. The library should supply debug
# information in a log.
session['message'] = 'Verification failed.'
return set_cookie_and_redirect('/#action=login&loginerror=1')
@http.route('/auth_openid/login/status', type='json', auth='none')
def status(self):
session = getattr(request.session, 'openid_session', {})
return {'status': session.get('status'), 'message': session.get('message')}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ss300/p2pool
|
refs/heads/master
|
wstools/Utility.py
|
292
|
# Copyright (c) 2003, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of
# any required approvals from the U.S. Dept. of Energy). All rights
# reserved.
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
ident = "$Id$"
import sys, types, httplib, urllib, socket, weakref
from os.path import isfile
from string import join, strip, split
from UserDict import UserDict
from cStringIO import StringIO
from TimeoutSocket import TimeoutSocket, TimeoutError
from urlparse import urlparse
from httplib import HTTPConnection, HTTPSConnection
from exceptions import Exception
try:
from ZSI import _get_idstr
except:
def _get_idstr(pyobj):
'''Python 2.3.x generates a FutureWarning for negative IDs, so
we use a different prefix character to ensure uniqueness, and
call abs() to avoid the warning.'''
x = id(pyobj)
if x < 0:
return 'x%x' % abs(x)
return 'o%x' % x
import xml.dom.minidom
from xml.dom import Node
import logging
from c14n import Canonicalize
from Namespaces import SCHEMA, SOAP, XMLNS, ZSI_SCHEMA_URI
try:
from xml.dom.ext import SplitQName
except:
def SplitQName(qname):
'''SplitQName(qname) -> (string, string)
Split Qualified Name into a tuple of len 2, consisting
of the prefix and the local name.
(prefix, localName)
Special Cases:
xmlns -- (localName, 'xmlns')
None -- (None, localName)
'''
l = qname.split(':')
if len(l) == 1:
l.insert(0, None)
elif len(l) == 2:
if l[0] == 'xmlns':
l.reverse()
else:
return
return tuple(l)
#
# python2.3 urllib.basejoin does not remove current directory ./
# from path and this causes problems on subsequent basejoins.
#
basejoin = urllib.basejoin
if sys.version_info[0:2] < (2, 4, 0, 'final', 0)[0:2]:
#basejoin = lambda base,url: urllib.basejoin(base,url.lstrip('./'))
token = './'
def basejoin(base, url):
if url.startswith(token) is True:
return urllib.basejoin(base,url[2:])
return urllib.basejoin(base,url)
class NamespaceError(Exception):
"""Used to indicate a Namespace Error."""
class RecursionError(Exception):
"""Used to indicate a HTTP redirect recursion."""
class ParseError(Exception):
"""Used to indicate a XML parsing error."""
class DOMException(Exception):
"""Used to indicate a problem processing DOM."""
class Base:
"""Base class for instance level Logging"""
def __init__(self, module=__name__):
self.logger = logging.getLogger('%s-%s(%s)' %(module, self.__class__, _get_idstr(self)))
class HTTPResponse:
"""Captures the information in an HTTP response message."""
def __init__(self, response):
self.status = response.status
self.reason = response.reason
self.headers = response.msg
self.body = response.read() or None
response.close()
class TimeoutHTTP(HTTPConnection):
"""A custom http connection object that supports socket timeout."""
def __init__(self, host, port=None, timeout=20):
HTTPConnection.__init__(self, host, port)
self.timeout = timeout
def connect(self):
self.sock = TimeoutSocket(self.timeout)
self.sock.connect((self.host, self.port))
class TimeoutHTTPS(HTTPSConnection):
"""A custom https object that supports socket timeout. Note that this
is not really complete. The builtin SSL support in the Python socket
module requires a real socket (type) to be passed in to be hooked to
SSL. That means our fake socket won't work and our timeout hacks are
bypassed for send and recv calls. Since our hack _is_ in place at
connect() time, it should at least provide some timeout protection."""
def __init__(self, host, port=None, timeout=20, **kwargs):
HTTPSConnection.__init__(self, str(host), port, **kwargs)
self.timeout = timeout
def connect(self):
sock = TimeoutSocket(self.timeout)
sock.connect((self.host, self.port))
realsock = getattr(sock.sock, '_sock', sock.sock)
ssl = socket.ssl(realsock, self.key_file, self.cert_file)
self.sock = httplib.FakeSocket(sock, ssl)
def urlopen(url, timeout=20, redirects=None):
"""A minimal urlopen replacement hack that supports timeouts for http.
Note that this supports GET only."""
scheme, host, path, params, query, frag = urlparse(url)
if not scheme in ('http', 'https'):
return urllib.urlopen(url)
if params: path = '%s;%s' % (path, params)
if query: path = '%s?%s' % (path, query)
if frag: path = '%s#%s' % (path, frag)
if scheme == 'https':
# If ssl is not compiled into Python, you will not get an exception
# until a conn.endheaders() call. We need to know sooner, so use
# getattr.
try:
import M2Crypto
except ImportError:
if not hasattr(socket, 'ssl'):
raise RuntimeError, 'no built-in SSL Support'
conn = TimeoutHTTPS(host, None, timeout)
else:
ctx = M2Crypto.SSL.Context()
ctx.set_session_timeout(timeout)
conn = M2Crypto.httpslib.HTTPSConnection(host, ssl_context=ctx)
conn.set_debuglevel(1)
else:
conn = TimeoutHTTP(host, None, timeout)
conn.putrequest('GET', path)
conn.putheader('Connection', 'close')
conn.endheaders()
response = None
while 1:
response = conn.getresponse()
if response.status != 100:
break
conn._HTTPConnection__state = httplib._CS_REQ_SENT
conn._HTTPConnection__response = None
status = response.status
# If we get an HTTP redirect, we will follow it automatically.
if status >= 300 and status < 400:
location = response.msg.getheader('location')
if location is not None:
response.close()
if redirects is not None and redirects.has_key(location):
raise RecursionError(
'Circular HTTP redirection detected.'
)
if redirects is None:
redirects = {}
redirects[location] = 1
return urlopen(location, timeout, redirects)
raise HTTPResponse(response)
if not (status >= 200 and status < 300):
raise HTTPResponse(response)
body = StringIO(response.read())
response.close()
return body
class DOM:
"""The DOM singleton defines a number of XML related constants and
provides a number of utility methods for DOM related tasks. It
also provides some basic abstractions so that the rest of the
package need not care about actual DOM implementation in use."""
# Namespace stuff related to the SOAP specification.
NS_SOAP_ENV_1_1 = 'http://schemas.xmlsoap.org/soap/envelope/'
NS_SOAP_ENC_1_1 = 'http://schemas.xmlsoap.org/soap/encoding/'
NS_SOAP_ENV_1_2 = 'http://www.w3.org/2001/06/soap-envelope'
NS_SOAP_ENC_1_2 = 'http://www.w3.org/2001/06/soap-encoding'
NS_SOAP_ENV_ALL = (NS_SOAP_ENV_1_1, NS_SOAP_ENV_1_2)
NS_SOAP_ENC_ALL = (NS_SOAP_ENC_1_1, NS_SOAP_ENC_1_2)
NS_SOAP_ENV = NS_SOAP_ENV_1_1
NS_SOAP_ENC = NS_SOAP_ENC_1_1
_soap_uri_mapping = {
NS_SOAP_ENV_1_1 : '1.1',
NS_SOAP_ENV_1_2 : '1.2',
}
SOAP_ACTOR_NEXT_1_1 = 'http://schemas.xmlsoap.org/soap/actor/next'
SOAP_ACTOR_NEXT_1_2 = 'http://www.w3.org/2001/06/soap-envelope/actor/next'
SOAP_ACTOR_NEXT_ALL = (SOAP_ACTOR_NEXT_1_1, SOAP_ACTOR_NEXT_1_2)
def SOAPUriToVersion(self, uri):
"""Return the SOAP version related to an envelope uri."""
value = self._soap_uri_mapping.get(uri)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP envelope uri: %s' % uri
)
def GetSOAPEnvUri(self, version):
"""Return the appropriate SOAP envelope uri for a given
human-friendly SOAP version string (e.g. '1.1')."""
attrname = 'NS_SOAP_ENV_%s' % join(split(version, '.'), '_')
value = getattr(self, attrname, None)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP version: %s' % version
)
def GetSOAPEncUri(self, version):
"""Return the appropriate SOAP encoding uri for a given
human-friendly SOAP version string (e.g. '1.1')."""
attrname = 'NS_SOAP_ENC_%s' % join(split(version, '.'), '_')
value = getattr(self, attrname, None)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP version: %s' % version
)
def GetSOAPActorNextUri(self, version):
"""Return the right special next-actor uri for a given
human-friendly SOAP version string (e.g. '1.1')."""
attrname = 'SOAP_ACTOR_NEXT_%s' % join(split(version, '.'), '_')
value = getattr(self, attrname, None)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP version: %s' % version
)
# Namespace stuff related to XML Schema.
NS_XSD_99 = 'http://www.w3.org/1999/XMLSchema'
NS_XSI_99 = 'http://www.w3.org/1999/XMLSchema-instance'
NS_XSD_00 = 'http://www.w3.org/2000/10/XMLSchema'
NS_XSI_00 = 'http://www.w3.org/2000/10/XMLSchema-instance'
NS_XSD_01 = 'http://www.w3.org/2001/XMLSchema'
NS_XSI_01 = 'http://www.w3.org/2001/XMLSchema-instance'
NS_XSD_ALL = (NS_XSD_99, NS_XSD_00, NS_XSD_01)
NS_XSI_ALL = (NS_XSI_99, NS_XSI_00, NS_XSI_01)
NS_XSD = NS_XSD_01
NS_XSI = NS_XSI_01
_xsd_uri_mapping = {
NS_XSD_99 : NS_XSI_99,
NS_XSD_00 : NS_XSI_00,
NS_XSD_01 : NS_XSI_01,
}
for key, value in _xsd_uri_mapping.items():
_xsd_uri_mapping[value] = key
def InstanceUriForSchemaUri(self, uri):
"""Return the appropriate matching XML Schema instance uri for
the given XML Schema namespace uri."""
return self._xsd_uri_mapping.get(uri)
def SchemaUriForInstanceUri(self, uri):
"""Return the appropriate matching XML Schema namespace uri for
the given XML Schema instance namespace uri."""
return self._xsd_uri_mapping.get(uri)
# Namespace stuff related to WSDL.
NS_WSDL_1_1 = 'http://schemas.xmlsoap.org/wsdl/'
NS_WSDL_ALL = (NS_WSDL_1_1,)
NS_WSDL = NS_WSDL_1_1
NS_SOAP_BINDING_1_1 = 'http://schemas.xmlsoap.org/wsdl/soap/'
NS_HTTP_BINDING_1_1 = 'http://schemas.xmlsoap.org/wsdl/http/'
NS_MIME_BINDING_1_1 = 'http://schemas.xmlsoap.org/wsdl/mime/'
NS_SOAP_BINDING_ALL = (NS_SOAP_BINDING_1_1,)
NS_HTTP_BINDING_ALL = (NS_HTTP_BINDING_1_1,)
NS_MIME_BINDING_ALL = (NS_MIME_BINDING_1_1,)
NS_SOAP_BINDING = NS_SOAP_BINDING_1_1
NS_HTTP_BINDING = NS_HTTP_BINDING_1_1
NS_MIME_BINDING = NS_MIME_BINDING_1_1
NS_SOAP_HTTP_1_1 = 'http://schemas.xmlsoap.org/soap/http'
NS_SOAP_HTTP_ALL = (NS_SOAP_HTTP_1_1,)
NS_SOAP_HTTP = NS_SOAP_HTTP_1_1
_wsdl_uri_mapping = {
NS_WSDL_1_1 : '1.1',
}
def WSDLUriToVersion(self, uri):
"""Return the WSDL version related to a WSDL namespace uri."""
value = self._wsdl_uri_mapping.get(uri)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP envelope uri: %s' % uri
)
def GetWSDLUri(self, version):
attr = 'NS_WSDL_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLSoapBindingUri(self, version):
attr = 'NS_SOAP_BINDING_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLHttpBindingUri(self, version):
attr = 'NS_HTTP_BINDING_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLMimeBindingUri(self, version):
attr = 'NS_MIME_BINDING_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLHttpTransportUri(self, version):
attr = 'NS_SOAP_HTTP_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
# Other xml namespace constants.
NS_XMLNS = 'http://www.w3.org/2000/xmlns/'
def isElement(self, node, name, nsuri=None):
"""Return true if the given node is an element with the given
name and optional namespace uri."""
if node.nodeType != node.ELEMENT_NODE:
return 0
return node.localName == name and \
(nsuri is None or self.nsUriMatch(node.namespaceURI, nsuri))
def getElement(self, node, name, nsuri=None, default=join):
"""Return the first child of node with a matching name and
namespace uri, or the default if one is provided."""
nsmatch = self.nsUriMatch
ELEMENT_NODE = node.ELEMENT_NODE
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if ((child.localName == name or name is None) and
(nsuri is None or nsmatch(child.namespaceURI, nsuri))
):
return child
if default is not join:
return default
raise KeyError, name
def getElementById(self, node, id, default=join):
"""Return the first child of node matching an id reference."""
attrget = self.getAttr
ELEMENT_NODE = node.ELEMENT_NODE
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if attrget(child, 'id') == id:
return child
if default is not join:
return default
raise KeyError, name
def getMappingById(self, document, depth=None, element=None,
mapping=None, level=1):
"""Create an id -> element mapping of those elements within a
document that define an id attribute. The depth of the search
may be controlled by using the (1-based) depth argument."""
if document is not None:
element = document.documentElement
mapping = {}
attr = element._attrs.get('id', None)
if attr is not None:
mapping[attr.value] = element
if depth is None or depth > level:
level = level + 1
ELEMENT_NODE = element.ELEMENT_NODE
for child in element.childNodes:
if child.nodeType == ELEMENT_NODE:
self.getMappingById(None, depth, child, mapping, level)
return mapping
def getElements(self, node, name, nsuri=None):
"""Return a sequence of the child elements of the given node that
match the given name and optional namespace uri."""
nsmatch = self.nsUriMatch
result = []
ELEMENT_NODE = node.ELEMENT_NODE
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if ((child.localName == name or name is None) and (
(nsuri is None) or nsmatch(child.namespaceURI, nsuri))):
result.append(child)
return result
def hasAttr(self, node, name, nsuri=None):
"""Return true if element has attribute with the given name and
optional nsuri. If nsuri is not specified, returns true if an
attribute exists with the given name with any namespace."""
if nsuri is None:
if node.hasAttribute(name):
return True
return False
return node.hasAttributeNS(nsuri, name)
def getAttr(self, node, name, nsuri=None, default=join):
"""Return the value of the attribute named 'name' with the
optional nsuri, or the default if one is specified. If
nsuri is not specified, an attribute that matches the
given name will be returned regardless of namespace."""
if nsuri is None:
result = node._attrs.get(name, None)
if result is None:
for item in node._attrsNS.keys():
if item[1] == name:
result = node._attrsNS[item]
break
else:
result = node._attrsNS.get((nsuri, name), None)
if result is not None:
return result.value
if default is not join:
return default
return ''
def getAttrs(self, node):
"""Return a Collection of all attributes
"""
attrs = {}
for k,v in node._attrs.items():
attrs[k] = v.value
return attrs
def getElementText(self, node, preserve_ws=None):
"""Return the text value of an xml element node. Leading and trailing
whitespace is stripped from the value unless the preserve_ws flag
is passed with a true value."""
result = []
for child in node.childNodes:
nodetype = child.nodeType
if nodetype == child.TEXT_NODE or \
nodetype == child.CDATA_SECTION_NODE:
result.append(child.nodeValue)
value = join(result, '')
if preserve_ws is None:
value = strip(value)
return value
def findNamespaceURI(self, prefix, node):
"""Find a namespace uri given a prefix and a context node."""
attrkey = (self.NS_XMLNS, prefix)
DOCUMENT_NODE = node.DOCUMENT_NODE
ELEMENT_NODE = node.ELEMENT_NODE
while 1:
if node is None:
raise DOMException('Value for prefix %s not found.' % prefix)
if node.nodeType != ELEMENT_NODE:
node = node.parentNode
continue
result = node._attrsNS.get(attrkey, None)
if result is not None:
return result.value
if hasattr(node, '__imported__'):
raise DOMException('Value for prefix %s not found.' % prefix)
node = node.parentNode
if node.nodeType == DOCUMENT_NODE:
raise DOMException('Value for prefix %s not found.' % prefix)
def findDefaultNS(self, node):
"""Return the current default namespace uri for the given node."""
attrkey = (self.NS_XMLNS, 'xmlns')
DOCUMENT_NODE = node.DOCUMENT_NODE
ELEMENT_NODE = node.ELEMENT_NODE
while 1:
if node.nodeType != ELEMENT_NODE:
node = node.parentNode
continue
result = node._attrsNS.get(attrkey, None)
if result is not None:
return result.value
if hasattr(node, '__imported__'):
raise DOMException('Cannot determine default namespace.')
node = node.parentNode
if node.nodeType == DOCUMENT_NODE:
raise DOMException('Cannot determine default namespace.')
def findTargetNS(self, node):
"""Return the defined target namespace uri for the given node."""
attrget = self.getAttr
attrkey = (self.NS_XMLNS, 'xmlns')
DOCUMENT_NODE = node.DOCUMENT_NODE
ELEMENT_NODE = node.ELEMENT_NODE
while 1:
if node.nodeType != ELEMENT_NODE:
node = node.parentNode
continue
result = attrget(node, 'targetNamespace', default=None)
if result is not None:
return result
node = node.parentNode
if node.nodeType == DOCUMENT_NODE:
raise DOMException('Cannot determine target namespace.')
def getTypeRef(self, element):
"""Return (namespaceURI, name) for a type attribue of the given
element, or None if the element does not have a type attribute."""
typeattr = self.getAttr(element, 'type', default=None)
if typeattr is None:
return None
parts = typeattr.split(':', 1)
if len(parts) == 2:
nsuri = self.findNamespaceURI(parts[0], element)
else:
nsuri = self.findDefaultNS(element)
return (nsuri, parts[1])
def importNode(self, document, node, deep=0):
"""Implements (well enough for our purposes) DOM node import."""
nodetype = node.nodeType
if nodetype in (node.DOCUMENT_NODE, node.DOCUMENT_TYPE_NODE):
raise DOMException('Illegal node type for importNode')
if nodetype == node.ENTITY_REFERENCE_NODE:
deep = 0
clone = node.cloneNode(deep)
self._setOwnerDoc(document, clone)
clone.__imported__ = 1
return clone
def _setOwnerDoc(self, document, node):
node.ownerDocument = document
for child in node.childNodes:
self._setOwnerDoc(document, child)
def nsUriMatch(self, value, wanted, strict=0, tt=type(())):
"""Return a true value if two namespace uri values match."""
if value == wanted or (type(wanted) is tt) and value in wanted:
return 1
if not strict and value is not None:
wanted = type(wanted) is tt and wanted or (wanted,)
value = value[-1:] != '/' and value or value[:-1]
for item in wanted:
if item == value or item[:-1] == value:
return 1
return 0
def createDocument(self, nsuri, qname, doctype=None):
"""Create a new writable DOM document object."""
impl = xml.dom.minidom.getDOMImplementation()
return impl.createDocument(nsuri, qname, doctype)
def loadDocument(self, data):
"""Load an xml file from a file-like object and return a DOM
document instance."""
return xml.dom.minidom.parse(data)
def loadFromURL(self, url):
"""Load an xml file from a URL and return a DOM document."""
if isfile(url) is True:
file = open(url, 'r')
else:
file = urlopen(url)
try:
result = self.loadDocument(file)
except Exception, ex:
file.close()
raise ParseError(('Failed to load document %s' %url,) + ex.args)
else:
file.close()
return result
DOM = DOM()
class MessageInterface:
'''Higher Level Interface, delegates to DOM singleton, must
be subclassed and implement all methods that throw NotImplementedError.
'''
def __init__(self, sw):
'''Constructor, May be extended, do not override.
sw -- soapWriter instance
'''
self.sw = None
if type(sw) != weakref.ReferenceType and sw is not None:
self.sw = weakref.ref(sw)
else:
self.sw = sw
def AddCallback(self, func, *arglist):
self.sw().AddCallback(func, *arglist)
def Known(self, obj):
return self.sw().Known(obj)
def Forget(self, obj):
return self.sw().Forget(obj)
def canonicalize(self):
'''canonicalize the underlying DOM, and return as string.
'''
raise NotImplementedError, ''
def createDocument(self, namespaceURI=SOAP.ENV, localName='Envelope'):
'''create Document
'''
raise NotImplementedError, ''
def createAppendElement(self, namespaceURI, localName):
'''create and append element(namespaceURI,localName), and return
the node.
'''
raise NotImplementedError, ''
def findNamespaceURI(self, qualifiedName):
raise NotImplementedError, ''
def resolvePrefix(self, prefix):
raise NotImplementedError, ''
def setAttributeNS(self, namespaceURI, localName, value):
'''set attribute (namespaceURI, localName)=value
'''
raise NotImplementedError, ''
def setAttributeType(self, namespaceURI, localName):
'''set attribute xsi:type=(namespaceURI, localName)
'''
raise NotImplementedError, ''
def setNamespaceAttribute(self, namespaceURI, prefix):
'''set namespace attribute xmlns:prefix=namespaceURI
'''
raise NotImplementedError, ''
class ElementProxy(Base, MessageInterface):
'''
'''
_soap_env_prefix = 'SOAP-ENV'
_soap_enc_prefix = 'SOAP-ENC'
_zsi_prefix = 'ZSI'
_xsd_prefix = 'xsd'
_xsi_prefix = 'xsi'
_xml_prefix = 'xml'
_xmlns_prefix = 'xmlns'
_soap_env_nsuri = SOAP.ENV
_soap_enc_nsuri = SOAP.ENC
_zsi_nsuri = ZSI_SCHEMA_URI
_xsd_nsuri = SCHEMA.XSD3
_xsi_nsuri = SCHEMA.XSI3
_xml_nsuri = XMLNS.XML
_xmlns_nsuri = XMLNS.BASE
standard_ns = {\
_xml_prefix:_xml_nsuri,
_xmlns_prefix:_xmlns_nsuri
}
reserved_ns = {\
_soap_env_prefix:_soap_env_nsuri,
_soap_enc_prefix:_soap_enc_nsuri,
_zsi_prefix:_zsi_nsuri,
_xsd_prefix:_xsd_nsuri,
_xsi_prefix:_xsi_nsuri,
}
name = None
namespaceURI = None
def __init__(self, sw, message=None):
'''Initialize.
sw -- SoapWriter
'''
self._indx = 0
MessageInterface.__init__(self, sw)
Base.__init__(self)
self._dom = DOM
self.node = None
if type(message) in (types.StringType,types.UnicodeType):
self.loadFromString(message)
elif isinstance(message, ElementProxy):
self.node = message._getNode()
else:
self.node = message
self.processorNss = self.standard_ns.copy()
self.processorNss.update(self.reserved_ns)
def __str__(self):
return self.toString()
def evaluate(self, expression, processorNss=None):
'''expression -- XPath compiled expression
'''
from Ft.Xml import XPath
if not processorNss:
context = XPath.Context.Context(self.node, processorNss=self.processorNss)
else:
context = XPath.Context.Context(self.node, processorNss=processorNss)
nodes = expression.evaluate(context)
return map(lambda node: ElementProxy(self.sw,node), nodes)
#############################################
# Methods for checking/setting the
# classes (namespaceURI,name) node.
#############################################
def checkNode(self, namespaceURI=None, localName=None):
'''
namespaceURI -- namespace of element
localName -- local name of element
'''
namespaceURI = namespaceURI or self.namespaceURI
localName = localName or self.name
check = False
if localName and self.node:
check = self._dom.isElement(self.node, localName, namespaceURI)
if not check:
raise NamespaceError, 'unexpected node type %s, expecting %s' %(self.node, localName)
def setNode(self, node=None):
if node:
if isinstance(node, ElementProxy):
self.node = node._getNode()
else:
self.node = node
elif self.node:
node = self._dom.getElement(self.node, self.name, self.namespaceURI, default=None)
if not node:
raise NamespaceError, 'cant find element (%s,%s)' %(self.namespaceURI,self.name)
self.node = node
else:
#self.node = self._dom.create(self.node, self.name, self.namespaceURI, default=None)
self.createDocument(self.namespaceURI, localName=self.name, doctype=None)
self.checkNode()
#############################################
# Wrapper Methods for direct DOM Element Node access
#############################################
def _getNode(self):
return self.node
def _getElements(self):
return self._dom.getElements(self.node, name=None)
def _getOwnerDocument(self):
return self.node.ownerDocument or self.node
def _getUniquePrefix(self):
'''I guess we need to resolve all potential prefixes
because when the current node is attached it copies the
namespaces into the parent node.
'''
while 1:
self._indx += 1
prefix = 'ns%d' %self._indx
try:
self._dom.findNamespaceURI(prefix, self._getNode())
except DOMException, ex:
break
return prefix
def _getPrefix(self, node, nsuri):
'''
Keyword arguments:
node -- DOM Element Node
nsuri -- namespace of attribute value
'''
try:
if node and (node.nodeType == node.ELEMENT_NODE) and \
(nsuri == self._dom.findDefaultNS(node)):
return None
except DOMException, ex:
pass
if nsuri == XMLNS.XML:
return self._xml_prefix
if node.nodeType == Node.ELEMENT_NODE:
for attr in node.attributes.values():
if attr.namespaceURI == XMLNS.BASE \
and nsuri == attr.value:
return attr.localName
else:
if node.parentNode:
return self._getPrefix(node.parentNode, nsuri)
raise NamespaceError, 'namespaceURI "%s" is not defined' %nsuri
def _appendChild(self, node):
'''
Keyword arguments:
node -- DOM Element Node
'''
if node is None:
raise TypeError, 'node is None'
self.node.appendChild(node)
def _insertBefore(self, newChild, refChild):
'''
Keyword arguments:
child -- DOM Element Node to insert
refChild -- DOM Element Node
'''
self.node.insertBefore(newChild, refChild)
def _setAttributeNS(self, namespaceURI, qualifiedName, value):
'''
Keyword arguments:
namespaceURI -- namespace of attribute
qualifiedName -- qualified name of new attribute value
value -- value of attribute
'''
self.node.setAttributeNS(namespaceURI, qualifiedName, value)
#############################################
#General Methods
#############################################
def isFault(self):
'''check to see if this is a soap:fault message.
'''
return False
def getPrefix(self, namespaceURI):
try:
prefix = self._getPrefix(node=self.node, nsuri=namespaceURI)
except NamespaceError, ex:
prefix = self._getUniquePrefix()
self.setNamespaceAttribute(prefix, namespaceURI)
return prefix
def getDocument(self):
return self._getOwnerDocument()
def setDocument(self, document):
self.node = document
def importFromString(self, xmlString):
doc = self._dom.loadDocument(StringIO(xmlString))
node = self._dom.getElement(doc, name=None)
clone = self.importNode(node)
self._appendChild(clone)
def importNode(self, node):
if isinstance(node, ElementProxy):
node = node._getNode()
return self._dom.importNode(self._getOwnerDocument(), node, deep=1)
def loadFromString(self, data):
self.node = self._dom.loadDocument(StringIO(data))
def canonicalize(self):
return Canonicalize(self.node)
def toString(self):
return self.canonicalize()
def createDocument(self, namespaceURI, localName, doctype=None):
'''If specified must be a SOAP envelope, else may contruct an empty document.
'''
prefix = self._soap_env_prefix
if namespaceURI == self.reserved_ns[prefix]:
qualifiedName = '%s:%s' %(prefix,localName)
elif namespaceURI is localName is None:
self.node = self._dom.createDocument(None,None,None)
return
else:
raise KeyError, 'only support creation of document in %s' %self.reserved_ns[prefix]
document = self._dom.createDocument(nsuri=namespaceURI, qname=qualifiedName, doctype=doctype)
self.node = document.childNodes[0]
#set up reserved namespace attributes
for prefix,nsuri in self.reserved_ns.items():
self._setAttributeNS(namespaceURI=self._xmlns_nsuri,
qualifiedName='%s:%s' %(self._xmlns_prefix,prefix),
value=nsuri)
#############################################
#Methods for attributes
#############################################
def hasAttribute(self, namespaceURI, localName):
return self._dom.hasAttr(self._getNode(), name=localName, nsuri=namespaceURI)
def setAttributeType(self, namespaceURI, localName):
'''set xsi:type
Keyword arguments:
namespaceURI -- namespace of attribute value
localName -- name of new attribute value
'''
self.logger.debug('setAttributeType: (%s,%s)', namespaceURI, localName)
value = localName
if namespaceURI:
value = '%s:%s' %(self.getPrefix(namespaceURI),localName)
xsi_prefix = self.getPrefix(self._xsi_nsuri)
self._setAttributeNS(self._xsi_nsuri, '%s:type' %xsi_prefix, value)
def createAttributeNS(self, namespace, name, value):
document = self._getOwnerDocument()
##this function doesn't exist!! it has only two arguments
attrNode = document.createAttributeNS(namespace, name, value)
def setAttributeNS(self, namespaceURI, localName, value):
'''
Keyword arguments:
namespaceURI -- namespace of attribute to create, None is for
attributes in no namespace.
localName -- local name of new attribute
value -- value of new attribute
'''
prefix = None
if namespaceURI:
try:
prefix = self.getPrefix(namespaceURI)
except KeyError, ex:
prefix = 'ns2'
self.setNamespaceAttribute(prefix, namespaceURI)
qualifiedName = localName
if prefix:
qualifiedName = '%s:%s' %(prefix, localName)
self._setAttributeNS(namespaceURI, qualifiedName, value)
def setNamespaceAttribute(self, prefix, namespaceURI):
'''
Keyword arguments:
prefix -- xmlns prefix
namespaceURI -- value of prefix
'''
self._setAttributeNS(XMLNS.BASE, 'xmlns:%s' %prefix, namespaceURI)
#############################################
#Methods for elements
#############################################
def createElementNS(self, namespace, qname):
'''
Keyword arguments:
namespace -- namespace of element to create
qname -- qualified name of new element
'''
document = self._getOwnerDocument()
node = document.createElementNS(namespace, qname)
return ElementProxy(self.sw, node)
def createAppendSetElement(self, namespaceURI, localName, prefix=None):
'''Create a new element (namespaceURI,name), append it
to current node, then set it to be the current node.
Keyword arguments:
namespaceURI -- namespace of element to create
localName -- local name of new element
prefix -- if namespaceURI is not defined, declare prefix. defaults
to 'ns1' if left unspecified.
'''
node = self.createAppendElement(namespaceURI, localName, prefix=None)
node=node._getNode()
self._setNode(node._getNode())
def createAppendElement(self, namespaceURI, localName, prefix=None):
'''Create a new element (namespaceURI,name), append it
to current node, and return the newly created node.
Keyword arguments:
namespaceURI -- namespace of element to create
localName -- local name of new element
prefix -- if namespaceURI is not defined, declare prefix. defaults
to 'ns1' if left unspecified.
'''
declare = False
qualifiedName = localName
if namespaceURI:
try:
prefix = self.getPrefix(namespaceURI)
except:
declare = True
prefix = prefix or self._getUniquePrefix()
if prefix:
qualifiedName = '%s:%s' %(prefix, localName)
node = self.createElementNS(namespaceURI, qualifiedName)
if declare:
node._setAttributeNS(XMLNS.BASE, 'xmlns:%s' %prefix, namespaceURI)
self._appendChild(node=node._getNode())
return node
def createInsertBefore(self, namespaceURI, localName, refChild):
qualifiedName = localName
prefix = self.getPrefix(namespaceURI)
if prefix:
qualifiedName = '%s:%s' %(prefix, localName)
node = self.createElementNS(namespaceURI, qualifiedName)
self._insertBefore(newChild=node._getNode(), refChild=refChild._getNode())
return node
def getElement(self, namespaceURI, localName):
'''
Keyword arguments:
namespaceURI -- namespace of element
localName -- local name of element
'''
node = self._dom.getElement(self.node, localName, namespaceURI, default=None)
if node:
return ElementProxy(self.sw, node)
return None
def getAttributeValue(self, namespaceURI, localName):
'''
Keyword arguments:
namespaceURI -- namespace of attribute
localName -- local name of attribute
'''
if self.hasAttribute(namespaceURI, localName):
attr = self.node.getAttributeNodeNS(namespaceURI,localName)
return attr.value
return None
def getValue(self):
return self._dom.getElementText(self.node, preserve_ws=True)
#############################################
#Methods for text nodes
#############################################
def createAppendTextNode(self, pyobj):
node = self.createTextNode(pyobj)
self._appendChild(node=node._getNode())
return node
def createTextNode(self, pyobj):
document = self._getOwnerDocument()
node = document.createTextNode(pyobj)
return ElementProxy(self.sw, node)
#############################################
#Methods for retrieving namespaceURI's
#############################################
def findNamespaceURI(self, qualifiedName):
parts = SplitQName(qualifiedName)
element = self._getNode()
if len(parts) == 1:
return (self._dom.findTargetNS(element), value)
return self._dom.findNamespaceURI(parts[0], element)
def resolvePrefix(self, prefix):
element = self._getNode()
return self._dom.findNamespaceURI(prefix, element)
def getSOAPEnvURI(self):
return self._soap_env_nsuri
def isEmpty(self):
return not self.node
class Collection(UserDict):
"""Helper class for maintaining ordered named collections."""
default = lambda self,k: k.name
def __init__(self, parent, key=None):
UserDict.__init__(self)
self.parent = weakref.ref(parent)
self.list = []
self._func = key or self.default
def __getitem__(self, key):
if type(key) is type(1):
return self.list[key]
return self.data[key]
def __setitem__(self, key, item):
item.parent = weakref.ref(self)
self.list.append(item)
self.data[key] = item
def keys(self):
return map(lambda i: self._func(i), self.list)
def items(self):
return map(lambda i: (self._func(i), i), self.list)
def values(self):
return self.list
class CollectionNS(UserDict):
"""Helper class for maintaining ordered named collections."""
default = lambda self,k: k.name
def __init__(self, parent, key=None):
UserDict.__init__(self)
self.parent = weakref.ref(parent)
self.targetNamespace = None
self.list = []
self._func = key or self.default
def __getitem__(self, key):
self.targetNamespace = self.parent().targetNamespace
if type(key) is types.IntType:
return self.list[key]
elif self.__isSequence(key):
nsuri,name = key
return self.data[nsuri][name]
return self.data[self.parent().targetNamespace][key]
def __setitem__(self, key, item):
item.parent = weakref.ref(self)
self.list.append(item)
targetNamespace = getattr(item, 'targetNamespace', self.parent().targetNamespace)
if not self.data.has_key(targetNamespace):
self.data[targetNamespace] = {}
self.data[targetNamespace][key] = item
def __isSequence(self, key):
return (type(key) in (types.TupleType,types.ListType) and len(key) == 2)
def keys(self):
keys = []
for tns in self.data.keys():
keys.append(map(lambda i: (tns,self._func(i)), self.data[tns].values()))
return keys
def items(self):
return map(lambda i: (self._func(i), i), self.list)
def values(self):
return self.list
# This is a runtime guerilla patch for pulldom (used by minidom) so
# that xml namespace declaration attributes are not lost in parsing.
# We need them to do correct QName linking for XML Schema and WSDL.
# The patch has been submitted to SF for the next Python version.
from xml.dom.pulldom import PullDOM, START_ELEMENT
if 1:
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or ''
PullDOM.startPrefixMapping = startPrefixMapping
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
PullDOM.startElementNS = startElementNS
#
# This is a runtime guerilla patch for minidom so
# that xmlns prefixed attributes dont raise AttributeErrors
# during cloning.
#
# Namespace declarations can appear in any start-tag, must look for xmlns
# prefixed attribute names during cloning.
#
# key (attr.namespaceURI, tag)
# ('http://www.w3.org/2000/xmlns/', u'xsd') <xml.dom.minidom.Attr instance at 0x82227c4>
# ('http://www.w3.org/2000/xmlns/', 'xmlns') <xml.dom.minidom.Attr instance at 0x8414b3c>
#
# xml.dom.minidom.Attr.nodeName = xmlns:xsd
# xml.dom.minidom.Attr.value = = http://www.w3.org/2001/XMLSchema
if 1:
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
prefix, tag = xml.dom.minidom._nssplit(attr.nodeName)
if prefix == 'xmlns':
a = clone.getAttributeNodeNS(attr.namespaceURI, tag)
elif prefix:
a = clone.getAttributeNodeNS(attr.namespaceURI, tag)
else:
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.nodeName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = xml.dom.minidom._clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == xml.dom.minidom.Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = xml.dom.minidom._clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == xml.dom.minidom.Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == xml.dom.minidom.Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == xml.dom.minidom.Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == xml.dom.minidom.Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == xml.dom.minidom.Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == xml.dom.minidom.Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = xml.dom.minidom.Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = xml.dom.minidom.Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implemenetation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
xml.dom.minidom._clone_node = _clone_node
|
numerigraphe/odoo
|
refs/heads/8.0
|
addons/purchase_analytic_plans/__init__.py
|
441
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#----------------------------------------------------------
# Init Sales
#----------------------------------------------------------
import purchase_analytic_plans
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
faywong/FFPlayer
|
refs/heads/trunk
|
project/jni/python/src/Lib/distutils/unixccompiler.py
|
33
|
"""distutils.unixccompiler
Contains the UnixCCompiler class, a subclass of CCompiler that handles
the "typical" Unix-style command-line C compiler:
* macros defined with -Dname[=value]
* macros undefined with -Uname
* include search directories specified with -Idir
* libraries specified with -lllib
* library search directories specified with -Ldir
* compile handled by 'cc' (or similar) executable with -c option:
compiles .c to .o
* link static library handled by 'ar' command (possibly with 'ranlib')
* link shared library handled by 'cc -shared'
"""
__revision__ = "$Id: unixccompiler.py 65012 2008-07-16 13:24:06Z jesse.noller $"
import os, sys
from types import StringType, NoneType
from distutils import sysconfig
from distutils.dep_util import newer
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.errors import \
DistutilsExecError, CompileError, LibError, LinkError
from distutils import log
# XXX Things not currently handled:
# * optimization/debug/warning flags; we just use whatever's in Python's
# Makefile and live with it. Is this adequate? If not, we might
# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
# SunCCompiler, and I suspect down that road lies madness.
# * even if we don't know a warning flag from an optimization flag,
# we need some way for outsiders to feed preprocessor/compiler/linker
# flags in to us -- eg. a sysadmin might want to mandate certain flags
# via a site config file, or a user might want to set something for
# compiling this module distribution only via the setup.py command
# line, whatever. As long as these options come from something on the
# current system, they can be as system-dependent as they like, and we
# should just happily stuff them into the preprocessor/compiler/linker
# options and carry on.
def _darwin_compiler_fixup(compiler_so, cc_args):
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
stripArch = stripSysroot = 0
compiler_so = list(compiler_so)
kernel_version = os.uname()[2] # 8.4.3
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = '-isysroot' in cc_args
if stripArch or 'ARCHFLAGS' in os.environ:
while 1:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
try:
index = compiler_so.index('-isysroot')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
pass
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
if '-isysroot' in cc_args:
idx = cc_args.index('-isysroot')
sysroot = cc_args[idx+1]
elif '-isysroot' in compiler_so:
idx = compiler_so.index('-isysroot')
sysroot = compiler_so[idx+1]
if sysroot and not os.path.isdir(sysroot):
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
sysroot)
log.warn("Please check your Xcode installation")
return compiler_so
class UnixCCompiler(CCompiler):
compiler_type = 'unix'
# These are used by CCompiler in two places: the constructor sets
# instance attributes 'preprocessor', 'compiler', etc. from them, and
# 'set_executable()' allows any of these to be set. The defaults here
# are pretty generic; they will probably have to be set by an outsider
# (eg. using information discovered by the sysconfig about building
# Python extensions).
executables = {'preprocessor' : None,
'compiler' : ["cc"],
'compiler_so' : ["cc"],
'compiler_cxx' : ["cc"],
'linker_so' : ["cc", "-shared"],
'linker_exe' : ["cc"],
'archiver' : ["ar", "-cr"],
'ranlib' : None,
}
if sys.platform[:6] == "darwin":
executables['ranlib'] = ["ranlib"]
# Needed for the filename generation methods provided by the base
# class, CCompiler. NB. whoever instantiates/uses a particular
# UnixCCompiler instance should set 'shared_lib_ext' -- we set a
# reasonable common default here, but it's not necessarily used on all
# Unices!
src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".so"
dylib_lib_extension = ".dylib"
static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
if sys.platform == "cygwin":
exe_extension = ".exe"
def preprocess(self, source,
output_file=None, macros=None, include_dirs=None,
extra_preargs=None, extra_postargs=None):
ignore, macros, include_dirs = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = self.preprocessor + pp_opts
if output_file:
pp_args.extend(['-o', output_file])
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or we're
# generating output to stdout, or there's a target output file and
# the source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
raise CompileError, msg
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_so = self.compiler_so
if sys.platform == 'darwin':
compiler_so = _darwin_compiler_fixup(compiler_so, cc_args + extra_postargs)
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
self.mkpath(os.path.dirname(output_filename))
self.spawn(self.archiver +
[output_filename] +
objects + self.objects)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
try:
self.spawn(self.ranlib + [output_filename])
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = \
self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if type(output_dir) not in (StringType, NoneType):
raise TypeError, "'output_dir' must be a string or None"
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ld_args = (objects + self.objects +
lib_opts + ['-o', output_filename])
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
if target_lang == "c++" and self.compiler_cxx:
# skip over environment variable settings if /usr/bin/env
# is used to set up the linker's environment.
# This is needed on OSX. Note: this assumes that the
# normal and C++ compiler have the same environment
# settings.
i = 0
if os.path.basename(linker[0]) == "env":
i = 1
while '=' in linker[i]:
i = i + 1
linker[i] = self.compiler_cxx[i]
if sys.platform == 'darwin':
linker = _darwin_compiler_fixup(linker, ld_args)
self.spawn(linker + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "-L" + dir
def runtime_library_dir_option(self, dir):
# XXX Hackish, at the very least. See Python bug #445902:
# http://sourceforge.net/tracker/index.php
# ?func=detail&aid=445902&group_id=5470&atid=105470
# Linkers on different platforms need different options to
# specify that directories need to be added to the list of
# directories searched for dependencies when a dynamic library
# is sought. GCC has to be told to pass the -R option through
# to the linker, whereas other compilers just know this.
# Other compilers may need something slightly different. At
# this time, there's no way to determine this information from
# the configuration data stored in the Python installation, so
# we use this hack.
compiler = os.path.basename(sysconfig.get_config_var("CC"))
if sys.platform[:6] == "darwin":
# MacOSX's linker doesn't understand the -R flag at all
return "-L" + dir
elif sys.platform[:5] == "hp-ux":
return "+s -L" + dir
elif sys.platform[:7] == "irix646" or sys.platform[:6] == "osf1V5":
return ["-rpath", dir]
elif compiler[:3] == "gcc" or compiler[:3] == "g++":
return "-Wl,-R" + dir
else:
return "-R" + dir
def library_option(self, lib):
return "-l" + lib
def find_library_file(self, dirs, lib, debug=0):
shared_f = self.library_filename(lib, lib_type='shared')
dylib_f = self.library_filename(lib, lib_type='dylib')
static_f = self.library_filename(lib, lib_type='static')
for dir in dirs:
shared = os.path.join(dir, shared_f)
dylib = os.path.join(dir, dylib_f)
static = os.path.join(dir, static_f)
# We're second-guessing the linker here, with not much hard
# data to go on: GCC seems to prefer the shared library, so I'm
# assuming that *all* Unix C compilers do. And of course I'm
# ignoring even GCC's "-static" option. So sue me.
if os.path.exists(dylib):
return dylib
elif os.path.exists(shared):
return shared
elif os.path.exists(static):
return static
# Oops, didn't find it in *any* of 'dirs'
return None
|
ow2-compatibleone/accords-platform
|
refs/heads/master
|
testsuite/basic/accords.py
|
3
|
# vim: set et sw=4 ts=4 ai:
import unittest
import utils
from testbin import TestBin
class TestBinAccords(TestBin, unittest.TestCase):
def setUp(self):
self.bin = 'accords'
self.return_value = 1
def tearDown(self):
pass
|
withtone/depot_tools
|
refs/heads/master
|
git_retry.py
|
31
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import subprocess
import sys
import threading
import time
from git_common import GIT_EXE, GIT_TRANSIENT_ERRORS_RE
class TeeThread(threading.Thread):
def __init__(self, fd, out_fd, name):
super(TeeThread, self).__init__(name='git-retry.tee.%s' % (name,))
self.data = None
self.fd = fd
self.out_fd = out_fd
def run(self):
chunks = []
for line in self.fd:
chunks.append(line)
self.out_fd.write(line)
self.data = ''.join(chunks)
class GitRetry(object):
logger = logging.getLogger('git-retry')
DEFAULT_DELAY_SECS = 3.0
DEFAULT_RETRY_COUNT = 5
def __init__(self, retry_count=None, delay=None, delay_factor=None):
self.retry_count = retry_count or self.DEFAULT_RETRY_COUNT
self.delay = max(delay, 0) if delay else 0
self.delay_factor = max(delay_factor, 0) if delay_factor else 0
def shouldRetry(self, stderr):
m = GIT_TRANSIENT_ERRORS_RE.search(stderr)
if not m:
return False
self.logger.info("Encountered known transient error: [%s]",
stderr[m.start(): m.end()])
return True
@staticmethod
def execute(*args):
args = (GIT_EXE,) + args
proc = subprocess.Popen(
args,
stderr=subprocess.PIPE,
)
stderr_tee = TeeThread(proc.stderr, sys.stderr, 'stderr')
# Start our process. Collect/tee 'stdout' and 'stderr'.
stderr_tee.start()
try:
proc.wait()
except KeyboardInterrupt:
proc.kill()
raise
finally:
stderr_tee.join()
return proc.returncode, None, stderr_tee.data
def computeDelay(self, iteration):
"""Returns: the delay (in seconds) for a given iteration
The first iteration has a delay of '0'.
Args:
iteration: (int) The iteration index (starting with zero as the first
iteration)
"""
if (not self.delay) or (iteration == 0):
return 0
if self.delay_factor == 0:
# Linear delay
return iteration * self.delay
# Exponential delay
return (self.delay_factor ** (iteration - 1)) * self.delay
def __call__(self, *args):
returncode = 0
for i in xrange(self.retry_count):
# If the previous run failed and a delay is configured, delay before the
# next run.
delay = self.computeDelay(i)
if delay > 0:
self.logger.info("Delaying for [%s second(s)] until next retry", delay)
time.sleep(delay)
self.logger.debug("Executing subprocess (%d/%d) with arguments: %s",
(i+1), self.retry_count, args)
returncode, _, stderr = self.execute(*args)
self.logger.debug("Process terminated with return code: %d", returncode)
if returncode == 0:
break
if not self.shouldRetry(stderr):
self.logger.error("Process failure was not known to be transient; "
"terminating with return code %d", returncode)
break
return returncode
def main(args):
parser = optparse.OptionParser()
parser.disable_interspersed_args()
parser.add_option('-v', '--verbose',
action='count', default=0,
help="Increase verbosity; can be specified multiple times")
parser.add_option('-c', '--retry-count', metavar='COUNT',
type=int, default=GitRetry.DEFAULT_RETRY_COUNT,
help="Number of times to retry (default=%default)")
parser.add_option('-d', '--delay', metavar='SECONDS',
type=float, default=GitRetry.DEFAULT_DELAY_SECS,
help="Specifies the amount of time (in seconds) to wait "
"between successive retries (default=%default). This "
"can be zero.")
parser.add_option('-D', '--delay-factor', metavar='FACTOR',
type=int, default=2,
help="The exponential factor to apply to delays in between "
"successive failures (default=%default). If this is "
"zero, delays will increase linearly. Set this to "
"one to have a constant (non-increasing) delay.")
opts, args = parser.parse_args(args)
# Configure logging verbosity
if opts.verbose == 0:
logging.getLogger().setLevel(logging.WARNING)
elif opts.verbose == 1:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.DEBUG)
# Execute retries
retry = GitRetry(
retry_count=opts.retry_count,
delay=opts.delay,
delay_factor=opts.delay_factor,
)
return retry(*args)
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger().setLevel(logging.WARNING)
try:
sys.exit(main(sys.argv[2:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
boooka/GeoPowerOff
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/contrib/admin/migrations/__init__.py
|
12133432
| |
pdelsante/thug
|
refs/heads/master
|
thug/DOM/W3C/Style/__init__.py
|
12133432
| |
carlosperate/mu
|
refs/heads/master
|
mu/contrib/__init__.py
|
12133432
| |
starqiu/PythonLearn
|
refs/heads/master
|
Django-1.6.5/tests/admin_views/__init__.py
|
12133432
| |
neo/django-allauth
|
refs/heads/master
|
allauth/socialaccount/providers/orcid/__init__.py
|
12133432
| |
gangadhar-kadam/smrterpfrappe
|
refs/heads/develop
|
frappe/core/doctype/user/__init__.py
|
12133432
| |
TheJJ100100/bedrock
|
refs/heads/master
|
bedrock/events/tests/__init__.py
|
12133432
| |
halfak/yamlconf
|
refs/heads/master
|
yamlconf/tests/__init__.py
|
12133432
| |
sauliusl/scipy
|
refs/heads/master
|
scipy/linalg/tests/test_solvers.py
|
95
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import inv
from numpy.testing import TestCase, rand, run_module_suite, assert_raises, \
assert_equal, assert_almost_equal, assert_array_almost_equal, assert_, \
assert_allclose
from scipy.linalg import solve_sylvester, solve_lyapunov, \
solve_discrete_lyapunov, solve_continuous_are, solve_discrete_are
class TestSolveLyapunov(TestCase):
cases = [
(np.array([[1, 2], [3, 4]]),
np.array([[9, 10], [11, 12]])),
# a, q all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a real; q complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a complex; q real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0, 2.0],[-1.0, 2.0]])),
# An example from Kitagawa, 1977
(np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3],
[1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]),
np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3],
[0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])),
# Companion matrix example. a complex; q real; a.shape[0] = 11
(np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j,
0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j,
0.010+0.j],
[1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j]]),
np.eye(11)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.array(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T))),
]
def check_continuous_case(self, a, q):
x = solve_lyapunov(a, q)
assert_array_almost_equal(np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
def check_discrete_case(self, a, q, method=None):
x = solve_discrete_lyapunov(a, q, method=method)
assert_array_almost_equal(np.dot(np.dot(a, x),a.conj().transpose()) - x, -1.0*q)
def test_cases(self):
for case in self.cases:
self.check_continuous_case(case[0], case[1])
self.check_discrete_case(case[0], case[1])
self.check_discrete_case(case[0], case[1], method='direct')
self.check_discrete_case(case[0], case[1], method='bilinear')
class TestSolveContinuousARE(TestCase):
cases = [
# An example from Laub, A. J.
# (http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf)
(np.matrix([[0, 1], [0, 0]]),
np.matrix([[0,], [1,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Difficult from a numerical standpoint, again from Laub, A. J.
(np.matrix([[4, 3], [-9.0/2.0, -7.0/2.0]]),
np.matrix([[1,], [-1,]]),
np.matrix([[9, 6], [6, 4]]),
np.matrix([[1,],])),
# Complex a; real b, q, r
(np.matrix([[0, 1-2j], [0, -3j]]),
np.matrix([[0,], [1,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, q, r; complex b
(np.matrix([[0, 1], [0, -1]]),
np.matrix([[-2j,], [1j,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, b; complex q, r
(np.matrix([[0, 1], [0, -1]]),
np.matrix([[1, 2], [1, 3]]),
np.matrix([[1, -3j], [1-1j, 2]]),
np.matrix([[-2j, 2], [1j, 3]])),
]
def check_case(self, a, b, q, r):
"""Checks if (A'X + XA - XBR^-1B'X+Q=0) is true"""
x = solve_continuous_are(a, b, q, r)
assert_array_almost_equal(
a.getH()*x + x*a - x*b*inv(r)*b.getH()*x + q, 0.0)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2], case[3])
class TestSolveDiscreteARE(TestCase):
cases = [
# Difficult from a numerical standpoint, again from Laub, A. J.
# (http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf)
(np.matrix([[4, 3], [-9.0/2.0, -7.0/2.0]]),
np.matrix([[1,], [-1,]]),
np.matrix([[9, 6], [6, 4]]),
np.matrix([[1,],])),
# Another example from Laub
(np.matrix([[0.9512, 0], [0, 0.9048]]),
np.matrix([[4.877, 4.877], [-1.1895, 3.569]]),
np.matrix([[0.005, 0],[0, 0.02]]),
np.matrix([[1.0/3.0, 0],[0, 3]])),
# Complex a; real b, q, r
(np.matrix([[2, 1-2j], [0, -3j]]),
np.matrix([[0,], [1,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, q, r; complex b
(np.matrix([[2, 1], [0, -1]]),
np.matrix([[-2j,], [1j,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, b; complex q, r
(np.matrix([[3, 1], [0, -1]]),
np.matrix([[1, 2], [1, 3]]),
np.matrix([[1, -3j], [1-1j, 2]]),
np.matrix([[-2j, 2], [1j, 3]])),
]
def check_case(self, a, b, q, r):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
x = solve_discrete_are(a, b, q, r)
assert_array_almost_equal(
a.getH()*x*a-(a.getH()*x*b)*inv(r+b.getH()*x*b)*(b.getH()*x*a)+q-x, 0.0)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2], case[3])
class TestSolveSylvester(TestCase):
cases = [
# a, b, c all real.
(np.array([[1, 2], [0, 4]]),
np.array([[5, 6], [0, 8]]),
np.array([[9, 10], [11, 12]])),
# a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their
# quasi-triangular form.
(np.array([[1.0, 0, 0, 0], [0, 1.0, 2.0, 0.0], [0, 0, 3.0, -4], [0, 0, 2, 5]]),
np.array([[2.0, 0, 0,1.0], [0, 1.0, 0.0, 0.0], [0, 0, 1.0, -1], [0, 0, 1, 1]]),
np.array([[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]])),
# a, b, c all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 2j], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a and b real; c complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a and c complex; b real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a complex; b and c real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0, 2.0],[-1.0, 2.0]])),
# not square matrices, real
(np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5]]),
np.array([[1, 2], [3, 4], [5, 6]])),
# not square matrices, complex
(np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5-1j]]),
np.array([[1, 2j], [3, 4j], [5j, 6+7j]])),
]
def check_case(self, a, b, c):
x = solve_sylvester(a, b, c)
assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2])
def test_trivial(self):
a = np.array([[1.0, 0.0], [0.0, 1.0]])
b = np.array([[1.0]])
c = np.array([2.0, 2.0]).reshape(-1,1)
x = solve_sylvester(a, b, c)
assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1,1))
if __name__ == "__main__":
run_module_suite()
|
40223139/2015cdaa5-12
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/unittest/test/testmock/testwith.py
|
739
|
import unittest
from warnings import catch_warnings
from unittest.test.testmock.support import is_instance
from unittest.mock import MagicMock, Mock, patch, sentinel, mock_open, call
something = sentinel.Something
something_else = sentinel.SomethingElse
class WithTest(unittest.TestCase):
def test_with_statement(self):
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
self.assertEqual(something, sentinel.Something)
def test_with_statement_exception(self):
try:
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
raise Exception('pow')
except Exception:
pass
else:
self.fail("patch swallowed exception")
self.assertEqual(something, sentinel.Something)
def test_with_statement_as(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertTrue(is_instance(mock_something, MagicMock),
"patching wrong type")
self.assertEqual(something, sentinel.Something)
def test_patch_object_with_statement(self):
class Foo(object):
something = 'foo'
original = Foo.something
with patch.object(Foo, 'something'):
self.assertNotEqual(Foo.something, original, "unpatched")
self.assertEqual(Foo.something, original)
def test_with_statement_nested(self):
with catch_warnings(record=True):
with patch('%s.something' % __name__) as mock_something, patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_with_statement_specified(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(mock_something, sentinel.Patched, "wrong patch")
self.assertEqual(something, sentinel.Something)
def testContextManagerMocking(self):
mock = Mock()
mock.__enter__ = Mock()
mock.__exit__ = Mock()
mock.__exit__.return_value = False
with mock as m:
self.assertEqual(m, mock.__enter__.return_value)
mock.__enter__.assert_called_with()
mock.__exit__.assert_called_with(None, None, None)
def test_context_manager_with_magic_mock(self):
mock = MagicMock()
with self.assertRaises(TypeError):
with mock:
'foo' + 3
mock.__enter__.assert_called_with()
self.assertTrue(mock.__exit__.called)
def test_with_statement_same_attribute(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something' % __name__) as mock_again:
self.assertEqual(something, mock_again, "unpatched")
self.assertEqual(something, mock_something,
"restored with wrong instance")
self.assertEqual(something, sentinel.Something, "not restored")
def test_with_statement_imbricated(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_dict_context_manager(self):
foo = {}
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
self.assertEqual(foo, {})
with self.assertRaises(NameError):
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
raise NameError('Konrad')
self.assertEqual(foo, {})
class TestMockOpen(unittest.TestCase):
def test_mock_open(self):
mock = mock_open()
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_mock_open_context_manager(self):
mock = mock_open()
handle = mock.return_value
with patch('%s.open' % __name__, mock, create=True):
with open('foo') as f:
f.read()
expected_calls = [call('foo'), call().__enter__(), call().read(),
call().__exit__(None, None, None)]
self.assertEqual(mock.mock_calls, expected_calls)
self.assertIs(f, handle)
def test_explicit_mock(self):
mock = MagicMock()
mock_open(mock)
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_read_data(self):
mock = mock_open(read_data='foo')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
result = h.read()
self.assertEqual(result, 'foo')
if __name__ == '__main__':
unittest.main()
|
Tesora-Release/tesora-python-troveclient
|
refs/heads/master
|
troveclient/v1/accounts.py
|
3
|
# Copyright 2011 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from troveclient import base
from troveclient import common
class Account(base.Resource):
"""Account is an opaque instance used to hold account information."""
def __repr__(self):
return "<Account: %s>" % self.name
class Accounts(base.ManagerWithFind):
"""Manage :class:`Account` information."""
resource_class = Account
def _list(self, url, response_key):
resp, body = self.api.client.get(url)
if not body:
raise Exception("Call to " + url + " did not return a body.")
return self.resource_class(self, body[response_key])
def index(self):
"""Get a list of all accounts with non-deleted instances."""
url = "/mgmt/accounts"
resp, body = self.api.client.get(url)
common.check_for_exceptions(resp, body, url)
if not body:
raise Exception("Call to " + url + " did not return a body.")
return base.Resource(self, body)
def show(self, account):
"""Get details of one account.
:rtype: :class:`Account`.
"""
acct_name = self._get_account_name(account)
return self._list("/mgmt/accounts/%s" % acct_name, 'account')
# Appease the abc gods
def list(self):
pass
@staticmethod
def _get_account_name(account):
try:
if account.name:
return account.name
except AttributeError:
return account
|
TobbeTripitaka/src
|
refs/heads/master
|
user/karl/contour_cmap_plot.py
|
8
|
"""
Renders some contoured and colormapped images of a scalar value field.
- Left-drag pans the plot.
- Mousewheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import cos, linspace, log, meshgrid, pi, sin
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, ColorBar, gmt_drywet, \
HPlotContainer, LinearMapper, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create a scalar field to colormap
x_extents = (-2*pi, 2*pi)
y_extents = (-1.5*pi, 1.5*pi)
xs = linspace(-2*pi, 2*pi, 200)
ys = linspace(-1.5*pi, 1.5*pi, 100)
x, y = meshgrid(xs,ys)
zs = sin(log(abs((x+1)**4)+0.05))*cos(y)*1.1*(-y) + \
sin(((x+1)**2 + y**2)/4)
# Create a plot data obect and give it this data
pd = ArrayPlotData()
pd.set_data("imagedata", zs)
# Create the left plot, a colormap and simple contours
lplot = Plot(pd)
lplot.img_plot("imagedata",
name="cm_plot",
xbounds=x_extents,
ybounds=y_extents,
colormap=gmt_drywet)
lplot.contour_plot("imagedata",
type="line",
xbounds=x_extents,
ybounds=y_extents)
# Tweak some of the plot properties
lplot.title = "Colormap and contours"
lplot.padding = 20
lplot.bg_color = "white"
lplot.fill_padding = True
# Add some tools to the plot
zoom = ZoomTool(lplot, tool_mode="box", always_on=False)
lplot.overlays.append(zoom)
lplot.tools.append(PanTool(lplot, constrain_key="shift"))
# Right now, some of the tools are a little invasive, and we need the
# actual CMapImage object to give to them
cm_plot = lplot.plots["cm_plot"][0]
# Create the colorbar, handing in the appropriate range and colormap
colormap = cm_plot.color_mapper
colorbar = ColorBar(index_mapper=LinearMapper(range=colormap.range),
color_mapper=colormap,
plot=cm_plot,
orientation='v',
resizable='v',
width=30,
padding=20)
colorbar.padding_top = lplot.padding_top
colorbar.padding_bottom = lplot.padding_bottom
# Create the left plot, contours of varying color and width
rplot = Plot(pd, range2d=lplot.range2d)
rplot.contour_plot("imagedata",
type="line",
xbounds=x_extents,
ybounds=y_extents,
bgcolor="black",
levels=15,
styles="solid",
widths=list(linspace(4.0, 0.1, 15)),
colors=gmt_drywet)
# Add some tools to the plot
zoom = ZoomTool(rplot, tool_mode="box", always_on=False)
rplot.overlays.append(zoom)
rplot.tools.append(PanTool(rplot, constrain_key="shift"))
# Tweak some of the plot properties
rplot.title = "Varying contour lines"
rplot.padding = 20
rplot.bg_color = "white"
rplot.fill_padding = True
# Create a container and add our plots
container = HPlotContainer(padding=40, fill_padding=True,
bgcolor = "white", use_backbuffer=True)
container.add(colorbar)
container.add(lplot)
container.add(rplot)
return container
#===============================================================================
# Attributes to use for the plot view.
size=(950,650)
title="Some contour plots"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
# EOF
|
TRESCLOUD/odoo
|
refs/heads/Integracion&ControlDeCalidad
|
addons/base_import/tests/__init__.py
|
179
|
from . import test_cases
checks = [test_cases]
|
al1221/ghost-openshift
|
refs/heads/master
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/scripts/check_sources.py
|
117
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Checker for file headers
~~~~~~~~~~~~~~~~~~~~~~~~
Make sure each Python file has a correct file header
including copyright and license information.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys, os, re
import getopt
import cStringIO
from os.path import join, splitext, abspath
checkers = {}
def checker(*suffixes, **kwds):
only_pkg = kwds.pop('only_pkg', False)
def deco(func):
for suffix in suffixes:
checkers.setdefault(suffix, []).append(func)
func.only_pkg = only_pkg
return func
return deco
name_mail_re = r'[\w ]+(<.*?>)?'
copyright_re = re.compile(r'^ :copyright: Copyright 2006-2013 by '
r'the Pygments team, see AUTHORS\.$', re.UNICODE)
copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
(name_mail_re, name_mail_re), re.UNICODE)
coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+')
is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b')
misspellings = ["developement", "adress", "verificate", # ALLOW-MISSPELLING
"informations"] # ALLOW-MISSPELLING
@checker('.py')
def check_syntax(fn, lines):
try:
compile(''.join(lines), fn, "exec")
except SyntaxError, err:
yield 0, "not compilable: %s" % err
@checker('.py')
def check_style_and_encoding(fn, lines):
encoding = 'ascii'
for lno, line in enumerate(lines):
if len(line) > 90:
yield lno+1, "line too long"
m = not_ix_re.search(line)
if m:
yield lno+1, '"' + m.group() + '"'
if is_const_re.search(line):
yield lno+1, 'using == None/True/False'
if lno < 2:
co = coding_re.search(line)
if co:
encoding = co.group(1)
try:
line.decode(encoding)
except UnicodeDecodeError, err:
yield lno+1, "not decodable: %s\n Line: %r" % (err, line)
except LookupError, err:
yield 0, "unknown encoding: %s" % encoding
encoding = 'latin1'
@checker('.py', only_pkg=True)
def check_fileheader(fn, lines):
# line number correction
c = 1
if lines[0:1] == ['#!/usr/bin/env python\n']:
lines = lines[1:]
c = 2
llist = []
docopen = False
for lno, l in enumerate(lines):
llist.append(l)
if lno == 0:
if l == '# -*- coding: rot13 -*-\n':
# special-case pony package
return
elif l != '# -*- coding: utf-8 -*-\n':
yield 1, "missing coding declaration"
elif lno == 1:
if l != '"""\n' and l != 'r"""\n':
yield 2, 'missing docstring begin (""")'
else:
docopen = True
elif docopen:
if l == '"""\n':
# end of docstring
if lno <= 4:
yield lno+c, "missing module name in docstring"
break
if l != "\n" and l[:4] != ' ' and docopen:
yield lno+c, "missing correct docstring indentation"
if lno == 2:
# if not in package, don't check the module name
modname = fn[:-3].replace('/', '.').replace('.__init__', '')
while modname:
if l.lower()[4:-1] == modname:
break
modname = '.'.join(modname.split('.')[1:])
else:
yield 3, "wrong module name in docstring heading"
modnamelen = len(l.strip())
elif lno == 3:
if l.strip() != modnamelen * "~":
yield 4, "wrong module name underline, should be ~~~...~"
else:
yield 0, "missing end and/or start of docstring..."
# check for copyright and license fields
license = llist[-2:-1]
if license != [" :license: BSD, see LICENSE for details.\n"]:
yield 0, "no correct license info"
ci = -3
copyright = [s.decode('utf-8') for s in llist[ci:ci+1]]
while copyright and copyright_2_re.match(copyright[0]):
ci -= 1
copyright = llist[ci:ci+1]
if not copyright or not copyright_re.match(copyright[0]):
yield 0, "no correct copyright info"
@checker('.py', '.html', '.js')
def check_whitespace_and_spelling(fn, lines):
for lno, line in enumerate(lines):
if "\t" in line:
yield lno+1, "OMG TABS!!!1 "
if line[:-1].rstrip(' \t') != line[:-1]:
yield lno+1, "trailing whitespace"
for word in misspellings:
if word in line and 'ALLOW-MISSPELLING' not in line:
yield lno+1, '"%s" used' % word
bad_tags = ('<b>', '<i>', '<u>', '<s>', '<strike>'
'<center>', '<big>', '<small>', '<font')
@checker('.html')
def check_xhtml(fn, lines):
for lno, line in enumerate(lines):
for bad_tag in bad_tags:
if bad_tag in line:
yield lno+1, "used " + bad_tag
def main(argv):
try:
gopts, args = getopt.getopt(argv[1:], "vi:")
except getopt.GetoptError:
print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
return 2
opts = {}
for opt, val in gopts:
if opt == '-i':
val = abspath(val)
opts.setdefault(opt, []).append(val)
if len(args) == 0:
path = '.'
elif len(args) == 1:
path = args[0]
else:
print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
return 2
verbose = '-v' in opts
num = 0
out = cStringIO.StringIO()
# TODO: replace os.walk run with iteration over output of
# `svn list -R`.
for root, dirs, files in os.walk(path):
if '.svn' in dirs:
dirs.remove('.svn')
if '-i' in opts and abspath(root) in opts['-i']:
del dirs[:]
continue
# XXX: awkward: for the Makefile call: don't check non-package
# files for file headers
in_pocoo_pkg = root.startswith('./pygments')
for fn in files:
fn = join(root, fn)
if fn[:2] == './': fn = fn[2:]
if '-i' in opts and abspath(fn) in opts['-i']:
continue
ext = splitext(fn)[1]
checkerlist = checkers.get(ext, None)
if not checkerlist:
continue
if verbose:
print "Checking %s..." % fn
try:
f = open(fn, 'r')
lines = list(f)
except (IOError, OSError), err:
print "%s: cannot open: %s" % (fn, err)
num += 1
continue
for checker in checkerlist:
if not in_pocoo_pkg and checker.only_pkg:
continue
for lno, msg in checker(fn, lines):
print >>out, "%s:%d: %s" % (fn, lno, msg)
num += 1
if verbose:
print
if num == 0:
print "No errors found."
else:
print out.getvalue().rstrip('\n')
print "%d error%s found." % (num, num > 1 and "s" or "")
return int(num > 0)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
LeeRisk/zulip
|
refs/heads/master
|
confirmation/settings.py
|
121
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2008, Jarek Zgoda <jarek.zgoda@gmail.com>
__revision__ = '$Id: settings.py 12 2008-11-23 19:38:52Z jarek.zgoda $'
STATUS_ACTIVE = 1
STATUS_FIELDS = {
}
|
cisco-openstack/tempest
|
refs/heads/proposed
|
tempest/lib/services/volume/v2/quotas_client.py
|
3
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from debtcollector import moves
from tempest.lib.services.volume.v3 import quotas_client
QuotasClient = moves.moved_class(
quotas_client.QuotasClient, 'QuotasClient',
__name__, version="Rocky", removal_version='?')
|
endlessm/chromium-browser
|
refs/heads/master
|
ui/resources/PRESUBMIT.py
|
6
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium UI resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
https://chromium.googlesource.com/chromium/src/+/master/styleguide/web/web.md
for the rules we're checking against here.
"""
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
resources = input_api.PresubmitLocalPath()
# List of paths with their associated scale factor. This is used to verify
# that the images modified in one are the correct scale of the other.
path_scales = [
[(100, 'default_100_percent/'), (200, 'default_200_percent/')],
]
import sys
old_path = sys.path
try:
sys.path = [resources] + old_path
from resource_check import resource_scale_factors
for paths in path_scales:
results.extend(resource_scale_factors.ResourceScaleFactors(
input_api, output_api, paths).RunChecks())
finally:
sys.path = old_path
return results
|
brandond/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/avi/avi_vsvip.py
|
31
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.2
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_vsvip
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of VsVip Avi RESTful Object
description:
- This module is used to configure VsVip object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
cloud_ref:
description:
- It is a reference to an object of type cloud.
- Field introduced in 17.1.1.
dns_info:
description:
- Service discovery specific data including fully qualified domain name, type and time-to-live of the dns record.
- Field introduced in 17.1.1.
east_west_placement:
description:
- Force placement on all service engines in the service engine group (container clouds only).
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
name:
description:
- Name for the vsvip object.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the vsvip object.
- Field introduced in 17.1.1.
vip:
description:
- List of virtual service ips and other shareable entities.
- Field introduced in 17.1.1.
vrf_context_ref:
description:
- Virtual routing context that the virtual service is bound to.
- This is used to provide the isolation of the set of networks the application is attached to.
- It is a reference to an object of type vrfcontext.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create VsVip object
avi_vsvip:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_vsvip
"""
RETURN = '''
obj:
description: VsVip (api/vsvip) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
cloud_ref=dict(type='str',),
dns_info=dict(type='list',),
east_west_placement=dict(type='bool',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
vip=dict(type='list',),
vrf_context_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'vsvip',
set([]))
if __name__ == '__main__':
main()
|
jctanner/ansible
|
refs/heads/devel
|
docs/docsite/rst/conf.py
|
14
|
# -*- coding: utf-8 -*-
#
# documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import os
# pip install sphinx_rtd_theme
# import sphinx_rtd_theme
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# sys.path.append(os.path.abspath('some/directory'))
#
sys.path.insert(0, os.path.join('ansible', 'lib'))
sys.path.append(os.path.abspath(os.path.join('..', '_extensions')))
# We want sphinx to document the ansible modules contained in this repository,
# not those that may happen to be installed in the version
# of Python used to run sphinx. When sphinx loads in order to document,
# the repository version needs to be the one that is loaded:
sys.path.insert(0, os.path.abspath(os.path.join('..', '..', '..', 'lib')))
VERSION = 'devel'
AUTHOR = 'Ansible, Inc'
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# TEST: 'sphinxcontrib.fulltoc'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments_lexer', 'notfound.extension']
# Later on, add 'sphinx.ext.viewcode' to the list if you want to have
# colorized code generated too for references.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Ansible'
copyright = "2019 Red Hat, Inc."
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
# exclude_dirs = []
# A list of glob-style patterns that should be excluded when looking
# for source files.
# OBSOLETE - removing this - dharmabumstead 2018-02-06
# exclude_patterns = ['modules']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'YAML+Jinja'
# Substitutions, variables, entities, & shortcuts for text which do not need to link to anything.
# For titles which should be a link, use the intersphinx anchors set at the index, chapter, and section levels, such as qi_start_:
# |br| is useful for formatting fields inside of tables
# |_| is a nonbreaking space; similarly useful inside of tables
rst_epilog = """
.. |br| raw:: html
<br>
.. |_| unicode:: 0xA0
:trim:
"""
# Options for HTML output
# -----------------------
html_theme_path = ['../_themes']
html_theme = 'sphinx_rtd_theme'
html_short_title = 'Ansible Documentation'
html_show_sphinx = False
html_theme_options = {
'canonical_url': "https://docs.ansible.com/ansible/latest/",
'vcs_pageview_mode': 'edit'
}
html_context = {
'display_github': 'True',
'github_user': 'ansible',
'github_repo': 'ansible',
'github_version': 'devel/docs/docsite/rst/',
'github_module_version': 'devel/lib/ansible/modules/',
'github_root_dir': 'devel/lib/ansible',
'github_cli_version': 'devel/lib/ansible/cli/',
'current_version': version,
'latest_version': '2.10',
# list specifically out of order to make latest work
'available_versions': ('latest', '2.9', '2.9_ja', '2.8', 'devel'),
'css_files': ('_static/ansible.css', # overrides to the standard theme
),
}
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'solar.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Ansible Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
# html_logo =
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = 'https://docs.ansible.com/ansible/latest'
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Poseidodoc'
# Configuration for sphinx-notfound-pages
# with no 'notfound_template' and no 'notfound_context' set,
# the extension builds 404.rst into a location-agnostic 404 page
#
# default is `en` - using this for the sub-site:
notfound_default_language = "ansible"
# default is `latest`:
# setting explicitly - docsite serves up /ansible/latest/404.html
# so keep this set to `latest` even on the `devel` branch
# then no maintenance is needed when we branch a new stable_x.x
notfound_default_version = "latest"
# makes default setting explicit:
notfound_no_urls_prefix = False
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'ansible.tex', 'Ansible 2.2 Documentation', AUTHOR, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
autoclass_content = 'both'
# Note: Our strategy for intersphinx mappings is to have the upstream build location as the
# canonical source and then cached copies of the mapping stored locally in case someone is building
# when disconnected from the internet. We then have a script to update the cached copies.
#
# Because of that, each entry in this mapping should have this format:
# name: ('http://UPSTREAM_URL', (None, 'path/to/local/cache.inv'))
#
# The update script depends on this format so deviating from this (for instance, adding a third
# location for the mappning to live) will confuse it.
intersphinx_mapping = {'python': ('https://docs.python.org/2/', (None, '../python2.inv')),
'python3': ('https://docs.python.org/3/', (None, '../python3.inv')),
'jinja2': ('http://jinja.palletsprojects.com/', (None, '../jinja2.inv')),
'ansible_2_10': ('https://docs.ansible.com/ansible/2.10/', (None, '../ansible_2_10.inv')),
'ansible_2_9': ('https://docs.ansible.com/ansible/2.9/', (None, '../ansible_2_9.inv')),
'ansible_2_8': ('https://docs.ansible.com/ansible/2.8/', (None, '../ansible_2_8.inv')),
'ansible_2_7': ('https://docs.ansible.com/ansible/2.7/', (None, '../ansible_2_7.inv')),
'ansible_2_6': ('https://docs.ansible.com/ansible/2.6/', (None, '../ansible_2_6.inv')),
'ansible_2_5': ('https://docs.ansible.com/ansible/2.5/', (None, '../ansible_2_5.inv')),
}
# linckchecker settings
linkcheck_ignore = [
r'http://irc\.freenode\.net',
]
linkcheck_workers = 25
# linkcheck_anchors = False
|
JohnPek/PHP_PythonParser
|
refs/heads/master
|
classes/__init__.py
|
12133432
| |
Juniper/ceilometer
|
refs/heads/master
|
ceilometer/agent/__init__.py
|
12133432
| |
WALR/taiga-back
|
refs/heads/master
|
taiga/feedback/migrations/0001_initial.py
|
29
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='FeedbackEntry',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('full_name', models.CharField(verbose_name='full name', max_length=256)),
('email', models.EmailField(verbose_name='email address', max_length=255)),
('comment', models.TextField(verbose_name='comment')),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='created date')),
],
options={
'verbose_name': 'feedback entry',
'verbose_name_plural': 'feedback entries',
'ordering': ['-created_date', 'id'],
},
bases=(models.Model,),
),
]
|
ThomasMiconi/nupic.research
|
refs/heads/master
|
projects/sdr_paper/compute_optimal_threshold.py
|
11
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This simulation computes false positive statistics over a wide range of n,
# sparsity, and the spiking threshold theta.
# Invoke with: time ipython --no-confirm-exit --gui=osx averaged_error.py
import numpy
from sympy import *
init_printing()
from IPython.display import display
from prettytable import PrettyTable
import math
import csv
import pickle
# The various symbols
oxp_s = Symbol("Omega_x'")
b_s = Symbol("b")
n_s = Symbol("n")
theta_s = Symbol("theta")
w_s = Symbol("w")
s_s = Symbol("s")
a_s = Symbol("a")
subsampledOmega = (binomial(s_s, b_s) * binomial(n_s - s_s, a_s - b_s)) / \
binomial(n_s, a_s)
subsampledFpF = Sum(subsampledOmega, (b_s, theta_s, s_s))
display(subsampledFpF)
subsampledOmegaSlow = (binomial(s_s, b_s) * binomial(n_s - s_s, a_s - b_s))
subsampledFpFSlow = Sum(subsampledOmegaSlow, (b_s, theta_s, s_s))/ binomial(
n_s, a_s)
display(subsampledFpFSlow)
# Will hold the false positive rate for different values of theta
fpRate = {}
table = PrettyTable(["Sparsity", "n", "a", "s", "theta", "error rate"])
# Compute the various traces. Print out a table and put everything in a csv
# file.
with open("out.csv","w") as f:
csvWriter = csv.writer(f)
csvWriter.writerow( ["Sparsity", "n", "a", "s", "theta","error rate"])
for theta in range(3,25,1):
fpRate[theta] = []
for n in range(10000,200000,20000):
print "theta=",theta,"n=",n
sparsity = 0.005
while sparsity < 0.035:
for s in range(20,51,5):
a = round(sparsity*n)
fp = subsampledFpFSlow.subs(s_s, s).subs(n_s, n).subs(a_s, a).subs(
theta_s,theta).evalf()
table.add_row([sparsity,n,a,s,theta,fp])
csvWriter.writerow([sparsity,n,a,s,theta,fp])
f.flush()
fpRate[theta].append(fp)
sparsity += 0.005
print table.get_string().encode("utf-8")
csvWriter.writerow([])
csvWriter.writerow(["theta", "avgError","stdev", "min", "max", "median"])
thetas = fpRate.keys()
thetas.sort()
for theta in thetas:
# numpy.stdev seems to result in an error
variance = numpy.var(fpRate[theta])
csvWriter.writerow([theta,numpy.mean(fpRate[theta]),math.sqrt(variance),
numpy.min(fpRate[theta]), numpy.max(fpRate[theta]),
numpy.median(fpRate[theta])])
# Pickle the dict and the table
with open("fpRate.p","w") as pf:
pickle.dump(fpRate, pf)
|
ImageEngine/gaffer
|
refs/heads/master
|
python/GafferTest/ParallelAlgoTest.py
|
7
|
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import six
import threading
import unittest
import timeit
import IECore
import Gaffer
import GafferTest
class ParallelAlgoTest( GafferTest.TestCase ) :
# Context manager used to test code which uses `ParallelAlgo::callOnUIThread()`.
# This emulates the call handler that the UI would usually install.
class UIThreadCallHandler( object ) :
def __enter__( self ) :
self.__assertDone = False
self.__queue = six.moves.queue.Queue()
Gaffer.ParallelAlgo.pushUIThreadCallHandler( self.__callOnUIThread )
return self
def __exit__( self, type, value, traceBack ) :
Gaffer.ParallelAlgo.popUIThreadCallHandler()
while True :
try :
f = self.__queue.get( block = False )
except six.moves.queue.Empty:
return
if self.__assertDone :
raise AssertionError( "UIThread call queue not empty" )
f()
def __callOnUIThread( self, f ) :
self.__queue.put( f )
# Waits for a single use of `callOnUIThread()`, raising
# a test failure if none arises before `timeout` seconds.
def assertCalled( self, timeout = 30.0 ) :
try :
f = self.__queue.get( block = True, timeout = timeout )
except six.moves.queue.Empty :
raise AssertionError( "UIThread call not made within {} seconds".format( timeout ) )
f()
# Asserts that no further uses of `callOnUIThread()` will
# be made with this handler. This is checked on context exit.
def assertDone( self ) :
self.__assertDone = True
# Waits for `time` seconds, processing any calls to
# `ParallelAlgo::callOnUIThread()` made during that time.
def waitFor( self, time ) :
startTime = timeit.default_timer()
elapsed = 0.0
while elapsed < time:
try:
f = self.__queue.get( block = True, timeout = time - elapsed )
except six.moves.queue.Empty:
return
f()
elapsed = timeit.default_timer() - startTime
def testCallOnUIThread( self ) :
s = Gaffer.ScriptNode()
def uiThreadFunction() :
s.setName( "test" )
s.uiThreadId = six.moves._thread.get_ident()
with self.UIThreadCallHandler() as h :
t = threading.Thread(
target = lambda : Gaffer.ParallelAlgo.callOnUIThread( uiThreadFunction )
)
t.start()
h.assertCalled()
t.join()
h.assertDone()
self.assertEqual( s.getName(), "test" )
self.assertEqual( s.uiThreadId, six.moves._thread.get_ident() )
def testNestedUIThreadCallHandler( self ) :
# This is testing our `UIThreadCallHandler` utility
# class more than it's testing `ParallelAlgo`.
s = Gaffer.ScriptNode()
def uiThreadFunction1() :
s.setName( "test" )
s.uiThreadId1 = six.moves._thread.get_ident()
def uiThreadFunction2() :
s["fileName"].setValue( "test" )
s.uiThreadId2 = six.moves._thread.get_ident()
with self.UIThreadCallHandler() as h1 :
t1 = threading.Thread(
target = lambda : Gaffer.ParallelAlgo.callOnUIThread( uiThreadFunction1 )
)
t1.start()
h1.assertCalled()
h1.assertDone()
with self.UIThreadCallHandler() as h2 :
t2 = threading.Thread(
target = lambda : Gaffer.ParallelAlgo.callOnUIThread( uiThreadFunction2 )
)
t2.start()
h2.assertCalled()
h2.assertDone()
self.assertEqual( s.getName(), "test" )
self.assertEqual( s.uiThreadId1, six.moves._thread.get_ident() )
self.assertEqual( s["fileName"].getValue(), "test" )
self.assertEqual( s.uiThreadId2, six.moves._thread.get_ident() )
t1.join()
t2.join()
def testCallOnBackgroundThread( self ) :
script = Gaffer.ScriptNode()
script["n"] = GafferTest.AddNode()
foregroundContext = Gaffer.Context( script.context() )
foregroundContext["a"] = "a"
def f() :
backgroundContext = Gaffer.Context.current()
self.assertFalse( backgroundContext.isSame( foregroundContext ) )
self.assertEqual( backgroundContext, foregroundContext )
with self.assertRaises( IECore.Cancelled ) :
while True :
script["n"]["sum"].getValue()
# We might expect that `script["n"]["sum"].getValue()`
# would be guaranteed to throw after cancellation has been
# requested. But that is not the case if both the hash and the
# value are already cached, because cancellation is only checked
# for automatically when a Process is constructed. So we take
# a belt and braces approach and perform an explicit check here.
#
# The alternative would be to move the cancellation check outside
# of the Process class, so it is performed before the cache lookup.
# This may be the better approach, but we would need to benchmark
# it to ensure that performance was not adversely affected. To our
# knowledge, this "cache hits avoid cancellation" problem has not
# been responsible for unresponsive cancellation in the wild, because
# background tasks are typically triggered by `plugDirtiedSignal()`,
# and the hash cache is cleared when a plug is dirtied.
IECore.Canceller.check( backgroundContext.canceller() )
# Explicit cancellation
with foregroundContext :
backgroundTask = Gaffer.ParallelAlgo.callOnBackgroundThread( script["n"]["sum"], f )
backgroundTask.cancel()
# Implicit cancellation through graph edit
with foregroundContext :
backgroundTask = Gaffer.ParallelAlgo.callOnBackgroundThread( script["n"]["sum"], f )
script["n"]["op1"].setValue( 10 )
# Cancellation through deletion
with foregroundContext :
backgroundTask = Gaffer.ParallelAlgo.callOnBackgroundThread( script["n"]["sum"], f )
del backgroundTask
def testBackgroundThreadMonitoring( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.MultiplyNode()
s["n"]["op2"].setValue( 1 )
s["e"] = Gaffer.Expression()
s["e"].setExpression( """parent["n"]["op1"] = context["op1"]""" )
def backgroundFunction() :
with Gaffer.Context() as c :
for i in range( 0, 10000 ) :
c["op1"] = i
self.assertEqual( s["n"]["product"].getValue(), i )
with Gaffer.PerformanceMonitor() as m :
t = Gaffer.ParallelAlgo.callOnBackgroundThread(
s["n"]["product"], backgroundFunction
)
t.wait()
# The monitor was active when we launched the background
# process, so we expect it to have been transferred to the
# background thread and remained active there for the duration.
self.assertEqual( m.plugStatistics( s["n"]["product"] ).computeCount, 10000 )
if __name__ == "__main__":
unittest.main()
|
sg0/Elemental
|
refs/heads/master
|
examples/interface/BPDN.py
|
1
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El, time
m = 2000
n = 4000
numLambdas = 7
startLambda = 0
endLambda = 1
display = True
worldRank = El.mpi.WorldRank()
# Make a sparse matrix with the last column dense
def Rectang(height,width):
A = El.DistSparseMatrix()
A.Resize(height,width)
firstLocalRow = A.FirstLocalRow()
localHeight = A.LocalHeight()
A.Reserve(5*localHeight)
for sLoc in xrange(localHeight):
s = firstLocalRow + sLoc
if s < width:
A.QueueLocalUpdate( sLoc, s, 11 )
if s >= 1 and s-1 < width:
A.QueueLocalUpdate( sLoc, s-1, -1 )
if s+1 < width:
A.QueueLocalUpdate( sLoc, s+1, 2 )
if s >= height and s-height < width:
A.QueueLocalUpdate( sLoc, s-height, -3 )
if s+height < width:
A.QueueLocalUpdate( sLoc, s+height, 4 )
# The dense last column
A.QueueLocalUpdate( sLoc, width-1, -5/height );
A.MakeConsistent()
return A
A = Rectang(m,n)
b = El.DistMultiVec()
El.Gaussian( b, m, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.QPAffineCtrl_d()
ctrl.mehrotraCtrl.progress = True
for j in xrange(0,numLambdas):
lambd = startLambda + j*(endLambda-startLambda)/(numLambdas-1.)
if worldRank == 0:
print "lambda =", lambd
startBPDN = time.clock()
x = El.BPDN( A, b, lambd, ctrl )
endBPDN = time.clock()
if worldRank == 0:
print "BPDN time: ", endBPDN-startBPDN
if display:
El.Display( x, "x" )
xOneNorm = El.EntrywiseNorm( x, 1 )
e = El.DistMultiVec()
El.Copy( b, e )
El.SparseMultiply( El.NORMAL, -1., A, x, 1., e )
if display:
El.Display( e, "e" )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| x ||_1 =", xOneNorm
print "|| A x - b ||_2 =", eTwoNorm
# Require the user to press a button before the figures are closed
commSize = El.mpi.Size( El.mpi.COMM_WORLD() )
El.Finalize()
if commSize == 1:
raw_input('Press Enter to exit')
|
TheKnarf/apprtc
|
refs/heads/master
|
src/third_party/oauth2client/tools.py
|
171
|
# Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ['argparser', 'run_flow', 'run', 'message_if_missing']
import BaseHTTPServer
import argparse
import httplib2
import logging
import os
import socket
import sys
import webbrowser
from oauth2client import client
from oauth2client import file
from oauth2client import util
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
_CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
"""
# run_parser is an ArgumentParser that contains command-line options expected
# by tools.run(). Pass it in as part of the 'parents' argument to your own
# ArgumentParser.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('--auth_host_name', default='localhost',
help='Hostname when running a local web server.')
argparser.add_argument('--noauth_local_webserver', action='store_true',
default=False, help='Do not run a local web server.')
argparser.add_argument('--auth_host_port', default=[8080, 8090], type=int,
nargs='*', help='Port web server should listen on.')
argparser.add_argument('--logging_level', default='ERROR',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR',
'CRITICAL'],
help='Set the logging level of detail.')
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
@util.positional(3)
def run_flow(flow, storage, flags, http=None):
"""Core code for a command-line application.
The run() function is called from your application and runs through all the
steps to obtain credentials. It takes a Flow argument and attempts to open an
authorization server page in the user's default web browser. The server asks
the user to grant your application access to the user's data. If the user
grants access, the run() function returns new credentials. The new credentials
are also stored in the Storage argument, which updates the file associated
with the Storage object.
It presumes it is run from a command-line application and supports the
following flags:
--auth_host_name: Host name to use when running a local web server
to handle redirects during OAuth authorization.
(default: 'localhost')
--auth_host_port: Port to use when running a local web server to handle
redirects during OAuth authorization.;
repeat this option to specify a list of values
(default: '[8080, 8090]')
(an integer)
--[no]auth_local_webserver: Run a local web server to handle redirects
during OAuth authorization.
(default: 'true')
The tools module defines an ArgumentParser the already contains the flag
definitions that run() requires. You can pass that ArgumentParser to your
ArgumentParser constructor:
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.run_parser])
flags = parser.parse_args(argv)
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
flags: argparse.ArgumentParser, the command-line flags.
http: An instance of httplib2.Http.request
or something that acts like it.
Returns:
Credentials, the obtained credential.
"""
logging.getLogger().setLevel(getattr(logging, flags.logging_level))
if not flags.noauth_local_webserver:
success = False
port_number = 0
for port in flags.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((flags.auth_host_name, port),
ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
flags.noauth_local_webserver = not success
if not success:
print 'Failed to start a local webserver listening on either port 8080'
print 'or port 9090. Please check your firewall settings and locally'
print 'running programs that may be blocking or using those ports.'
print
print 'Falling back to --noauth_local_webserver and continuing with',
print 'authorization.'
print
if not flags.noauth_local_webserver:
oauth_callback = 'http://%s:%s/' % (flags.auth_host_name, port_number)
else:
oauth_callback = client.OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if not flags.noauth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print 'Your browser has been opened to visit:'
print
print ' ' + authorize_url
print
print 'If your browser is on a different machine then exit and re-run this'
print 'application with the command-line parameter '
print
print ' --noauth_local_webserver'
print
else:
print 'Go to the following link in your browser:'
print
print ' ' + authorize_url
print
code = None
if not flags.noauth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print 'Failed to find "code" in the query parameters of the redirect.'
sys.exit('Try running with --noauth_local_webserver.')
else:
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http=http)
except client.FlowExchangeError, e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print 'Authentication successful.'
return credential
def message_if_missing(filename):
"""Helpful message to display if the CLIENT_SECRETS file is missing."""
return _CLIENT_SECRETS_MESSAGE % filename
try:
from old_run import run
from old_run import FLAGS
except ImportError:
def run(*args, **kwargs):
raise NotImplementedError(
'The gflags library must be installed to use tools.run(). '
'Please install gflags or preferrably switch to using '
'tools.run_flow().')
|
rob356/SickRage
|
refs/heads/master
|
lib/hachoir_core/event_handler.py
|
188
|
class EventHandler(object):
"""
Class to connect events to event handlers.
"""
def __init__(self):
self.handlers = {}
def connect(self, event_name, handler):
"""
Connect an event handler to an event. Append it to handlers list.
"""
try:
self.handlers[event_name].append(handler)
except KeyError:
self.handlers[event_name] = [handler]
def raiseEvent(self, event_name, *args):
"""
Raiser an event: call each handler for this event_name.
"""
if event_name not in self.handlers:
return
for handler in self.handlers[event_name]:
handler(*args)
|
tmuelle2/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/outputtee.py
|
192
|
# Copyright (c) 2009, Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import os
import sys
# Simple class to split output between multiple destinations
class Tee:
def __init__(self, *files):
self.files = files
# Callers should pass an already encoded string for writing.
def write(self, bytes):
for file in self.files:
file.write(bytes)
class OutputTee:
def __init__(self):
self._original_stdout = None
self._original_stderr = None
self._files_for_output = []
def add_log(self, path):
log_file = self._open_log_file(path)
self._files_for_output.append(log_file)
self._tee_outputs_to_files(self._files_for_output)
return log_file
def remove_log(self, log_file):
self._files_for_output.remove(log_file)
self._tee_outputs_to_files(self._files_for_output)
log_file.close()
@staticmethod
def _open_log_file(log_path):
(log_directory, log_name) = os.path.split(log_path)
if log_directory and not os.path.exists(log_directory):
os.makedirs(log_directory)
return codecs.open(log_path, "a+", "utf-8")
def _tee_outputs_to_files(self, files):
if not self._original_stdout:
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
if files and len(files):
sys.stdout = Tee(self._original_stdout, *files)
sys.stderr = Tee(self._original_stderr, *files)
else:
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
|
ahmedaljazzar/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/modulestore/split_mongo/split.py
|
15
|
"""
Provides full versioning CRUD and representation for collections of xblocks (e.g., courses, modules, etc).
Representation:
* course_index: a dictionary:
** '_id': a unique id which cannot change,
** 'org': the org's id. Only used for searching not identity,
** 'course': the course's catalog number
** 'run': the course's run id,
** 'edited_by': user_id of user who created the original entry,
** 'edited_on': the datetime of the original creation,
** 'versions': versions_dict: {branch_id: structure_id, ...}
** 'search_targets': a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
* structure:
** '_id': an ObjectId (guid),
** 'root': BlockKey (the block_type and block_id of the root block in the 'blocks' dictionary)
** 'previous_version': the structure from which this one was derived. For published courses, this
points to the previously published version of the structure not the draft published to this.
** 'original_version': the original structure id in the previous_version relation. Is a pseudo object
identifier enabling quick determination if 2 structures have any shared history,
** 'edited_by': user_id of the user whose change caused the creation of this structure version,
** 'edited_on': the datetime for the change causing this creation of this structure version,
** 'blocks': dictionary of xblocks in this structure:
*** BlockKey: key mapping to each BlockData:
*** BlockData: object containing the following attributes:
**** 'block_type': the xblock type id
**** 'definition': the db id of the record containing the content payload for this xblock
**** 'fields': the Scope.settings and children field values
***** 'children': This is stored as a list of (block_type, block_id) pairs
**** 'defaults': Scope.settings default values copied from a template block (used e.g. when
blocks are copied from a library to a course)
**** 'edit_info': EditInfo object:
***** 'edited_on': when was this xblock's fields last changed (will be edited_on value of
update_version structure)
***** 'edited_by': user_id for who changed this xblock last (will be edited_by value of
update_version structure)
***** 'update_version': the guid for the structure where this xblock got its current field
values. This may point to a structure not in this structure's history (e.g., to a draft
branch from which this version was published.)
***** 'previous_version': the guid for the structure which previously changed this xblock
(will be the previous value of update_version; so, may point to a structure not in this
structure's history.)
***** 'source_version': the guid for the structure was copied/published into this block
* definition: shared content with revision history for xblock content fields
** '_id': definition_id (guid),
** 'block_type': xblock type id
** 'fields': scope.content (and possibly other) field values.
** 'edit_info': dictionary:
*** 'edited_by': user_id whose edit caused this version of the definition,
*** 'edited_on': datetime of the change causing this version
*** 'previous_version': the definition_id of the previous version of this definition
*** 'original_version': definition_id of the root of the previous version relation on this
definition. Acts as a pseudo-object identifier.
"""
import copy
import datetime
import hashlib
import logging
import six
from contracts import contract, new_contract
from importlib import import_module
from mongodb_proxy import autoretry_read
from path import Path as path
from pytz import UTC
from bson.objectid import ObjectId
from xblock.core import XBlock
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.course_module import CourseSummary
from xmodule.library_content_module import LibrarySummary
from xmodule.errortracker import null_error_tracker
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import (
BlockUsageLocator, DefinitionLocator, CourseLocator, LibraryLocator, VersionTree, LocalId,
)
from ccx_keys.locator import CCXLocator, CCXBlockUsageLocator
from xmodule.modulestore.exceptions import InsufficientSpecificationError, VersionConflictError, DuplicateItemError, \
DuplicateCourseError, MultipleCourseBlocksFound
from xmodule.modulestore import (
inheritance, ModuleStoreWriteBase, ModuleStoreEnum,
BulkOpsRecord, BulkOperationsMixin, SortedAssetList, BlockData
)
from ..exceptions import ItemNotFoundError
from .caching_descriptor_system import CachingDescriptorSystem
from xmodule.partitions.partitions_service import PartitionService
from xmodule.modulestore.split_mongo.mongo_connection import MongoConnection, DuplicateKeyError
from xmodule.modulestore.split_mongo import BlockKey, CourseEnvelope
from xmodule.modulestore.store_utilities import DETACHED_XBLOCK_TYPES
from xmodule.error_module import ErrorDescriptor
from collections import defaultdict
from types import NoneType
from xmodule.assetstore import AssetMetadata
log = logging.getLogger(__name__)
# ==============================================================================
#
# Known issue:
# Inheritance for cached kvs doesn't work on edits. Use case.
# 1) attribute foo is inheritable
# 2) g.children = [p], p.children = [a]
# 3) g.foo = 1 on load
# 4) if g.foo > 0, if p.foo > 0, if a.foo > 0 all eval True
# 5) p.foo = -1
# 6) g.foo > 0, p.foo <= 0 all eval True BUT
# 7) BUG: a.foo > 0 still evals True but should be False
# 8) reread and everything works right
# 9) p.del(foo), p.foo > 0 is True! works
# 10) BUG: a.foo < 0!
# Local fix wont' permanently work b/c xblock may cache a.foo...
#
# ==============================================================================
# When blacklists are this, all children should be excluded
EXCLUDE_ALL = '*'
new_contract('BlockUsageLocator', BlockUsageLocator)
new_contract('BlockKey', BlockKey)
new_contract('XBlock', XBlock)
class SplitBulkWriteRecord(BulkOpsRecord):
def __init__(self):
super(SplitBulkWriteRecord, self).__init__()
self.initial_index = None
self.index = None
self.structures = {}
self.structures_in_db = set()
# dict(version_guid, dict(BlockKey, module))
self.modules = defaultdict(dict)
self.definitions = {}
self.definitions_in_db = set()
self.course_key = None
# TODO: This needs to track which branches have actually been modified/versioned,
# so that copying one branch to another doesn't update the original branch.
@property
def dirty_branches(self):
"""
Return a list of which branch version ids differ from what was stored
in the database at the beginning of this bulk operation.
"""
# If no course index has been set, then no branches have changed
if self.index is None:
return []
# If there was no index in the database to start with, then all branches
# are dirty by definition
if self.initial_index is None:
return self.index.get('versions', {}).keys()
# Return branches whose ids differ between self.index and self.initial_index
return [
branch
for branch, _id
in self.index.get('versions', {}).items()
if self.initial_index.get('versions', {}).get(branch) != _id
]
def structure_for_branch(self, branch):
return self.structures.get(self.index.get('versions', {}).get(branch))
def set_structure_for_branch(self, branch, structure):
if self.index is not None:
self.index.setdefault('versions', {})[branch] = structure['_id']
self.structures[structure['_id']] = structure
def __repr__(self):
return u"SplitBulkWriteRecord<{!r}, {!r}, {!r}, {!r}, {!r}>".format(
self._active_count,
self.initial_index,
self.index,
self.structures,
self.structures_in_db,
)
class SplitBulkWriteMixin(BulkOperationsMixin):
"""
This implements the :meth:`bulk_operations` modulestore semantics for the :class:`SplitMongoModuleStore`.
In particular, it implements :meth:`_begin_bulk_operation` and
:meth:`_end_bulk_operation` to provide the external interface, and then exposes a set of methods
for interacting with course_indexes and structures that can be used by :class:`SplitMongoModuleStore`.
Internally, this mixin records the set of all active bulk operations (keyed on the active course),
and only writes those values to ``self.mongo_connection`` when :meth:`_end_bulk_operation` is called.
If a bulk write operation isn't active, then the changes are immediately written to the underlying
mongo_connection.
"""
_bulk_ops_record_type = SplitBulkWriteRecord
def _get_bulk_ops_record(self, course_key, ignore_case=False):
"""
Return the :class:`.SplitBulkWriteRecord` for this course.
"""
# handle split specific things and defer to super otherwise
if course_key is None:
return self._bulk_ops_record_type()
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError(u'{!r} is not a CourseLocator or LibraryLocator'.format(course_key))
# handle version_guid based retrieval locally
if course_key.org is None or course_key.course is None or course_key.run is None:
return self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
# handle ignore case and general use
return super(SplitBulkWriteMixin, self)._get_bulk_ops_record(
course_key.replace(branch=None, version_guid=None), ignore_case
)
def _clear_bulk_ops_record(self, course_key):
"""
Clear the record for this course
"""
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError('{!r} is not a CourseLocator or LibraryLocator'.format(course_key))
if course_key.org and course_key.course and course_key.run:
del self._active_bulk_ops.records[course_key.replace(branch=None, version_guid=None)]
else:
del self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
def _start_outermost_bulk_operation(self, bulk_write_record, course_key, ignore_case=False):
"""
Begin a bulk write operation on course_key.
"""
bulk_write_record.initial_index = self.db_connection.get_course_index(course_key, ignore_case=ignore_case)
# Ensure that any edits to the index don't pollute the initial_index
bulk_write_record.index = copy.deepcopy(bulk_write_record.initial_index)
bulk_write_record.course_key = course_key
def _end_outermost_bulk_operation(self, bulk_write_record, structure_key):
"""
End the active bulk write operation on structure_key (course or library key).
"""
dirty = False
# If the content is dirty, then update the database
for _id in bulk_write_record.structures.viewkeys() - bulk_write_record.structures_in_db:
dirty = True
try:
self.db_connection.insert_structure(bulk_write_record.structures[_id], bulk_write_record.course_key)
except DuplicateKeyError:
# We may not have looked up this structure inside this bulk operation, and thus
# didn't realize that it was already in the database. That's OK, the store is
# append only, so if it's already been written, we can just keep going.
log.debug("Attempted to insert duplicate structure %s", _id)
for _id in bulk_write_record.definitions.viewkeys() - bulk_write_record.definitions_in_db:
dirty = True
try:
self.db_connection.insert_definition(bulk_write_record.definitions[_id], bulk_write_record.course_key)
except DuplicateKeyError:
# We may not have looked up this definition inside this bulk operation, and thus
# didn't realize that it was already in the database. That's OK, the store is
# append only, so if it's already been written, we can just keep going.
log.debug("Attempted to insert duplicate definition %s", _id)
if bulk_write_record.index is not None and bulk_write_record.index != bulk_write_record.initial_index:
dirty = True
if bulk_write_record.initial_index is None:
self.db_connection.insert_course_index(bulk_write_record.index, bulk_write_record.course_key)
else:
self.db_connection.update_course_index(
bulk_write_record.index,
from_index=bulk_write_record.initial_index,
course_context=bulk_write_record.course_key
)
return dirty
def get_course_index(self, course_key, ignore_case=False):
"""
Return the index for course_key.
"""
if self._is_in_bulk_operation(course_key, ignore_case):
return self._get_bulk_ops_record(course_key, ignore_case).index
else:
return self.db_connection.get_course_index(course_key, ignore_case)
def delete_course_index(self, course_key):
"""
Delete the course index from cache and the db
"""
if self._is_in_bulk_operation(course_key, False):
self._clear_bulk_ops_record(course_key)
self.db_connection.delete_course_index(course_key)
def insert_course_index(self, course_key, index_entry):
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.index = index_entry
else:
self.db_connection.insert_course_index(index_entry, course_key)
def update_course_index(self, course_key, updated_index_entry):
"""
Change the given course's index entry.
Note, this operation can be dangerous and break running courses.
Does not return anything useful.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.index = updated_index_entry
else:
self.db_connection.update_course_index(updated_index_entry, course_context=course_key)
def get_structure(self, course_key, version_guid):
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
structure = bulk_write_record.structures.get(version_guid)
# The structure hasn't been loaded from the db yet, so load it
if structure is None:
structure = self.db_connection.get_structure(version_guid, course_key)
bulk_write_record.structures[version_guid] = structure
if structure is not None:
bulk_write_record.structures_in_db.add(version_guid)
return structure
else:
# cast string to ObjectId if necessary
version_guid = course_key.as_object_id(version_guid)
return self.db_connection.get_structure(version_guid, course_key)
def update_structure(self, course_key, structure):
"""
Update a course structure, respecting the current bulk operation status
(no data will be written to the database if a bulk operation is active.)
"""
self._clear_cache(structure['_id'])
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.structures[structure['_id']] = structure
else:
self.db_connection.insert_structure(structure, course_key)
def get_cached_block(self, course_key, version_guid, block_id):
"""
If there's an active bulk_operation, see if it's cached this module and just return it
Don't do any extra work to get the ones which are not cached. Make the caller do the work & cache them.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
return bulk_write_record.modules[version_guid].get(block_id, None)
else:
return None
def cache_block(self, course_key, version_guid, block_key, block):
"""
The counterpart to :method `get_cached_block` which caches a block.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.modules[version_guid][block_key] = block
def decache_block(self, course_key, version_guid, block_key):
"""
Write operations which don't write from blocks must remove the target blocks from the cache.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
try:
del bulk_write_record.modules[version_guid][block_key]
except KeyError:
pass
def get_definition(self, course_key, definition_guid):
"""
Retrieve a single definition by id, respecting the active bulk operation
on course_key.
Args:
course_key (:class:`.CourseKey`): The course being operated on
definition_guid (str or ObjectID): The id of the definition to load
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
definition = bulk_write_record.definitions.get(definition_guid)
# The definition hasn't been loaded from the db yet, so load it
if definition is None:
definition = self.db_connection.get_definition(definition_guid, course_key)
bulk_write_record.definitions[definition_guid] = definition
if definition is not None:
bulk_write_record.definitions_in_db.add(definition_guid)
return definition
else:
# cast string to ObjectId if necessary
definition_guid = course_key.as_object_id(definition_guid)
return self.db_connection.get_definition(definition_guid, course_key)
def get_definitions(self, course_key, ids):
"""
Return all definitions that specified in ``ids``.
If a definition with the same id is in both the cache and the database,
the cached version will be preferred.
Arguments:
course_key (:class:`.CourseKey`): The course that these definitions are being loaded
for (to respect bulk operations).
ids (list): A list of definition ids
"""
definitions = []
ids = set(ids)
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
# Only query for the definitions that aren't already cached.
for definition in bulk_write_record.definitions.values():
definition_id = definition.get('_id')
if definition_id in ids:
ids.remove(definition_id)
definitions.append(definition)
if len(ids):
# Query the db for the definitions.
defs_from_db = list(self.db_connection.get_definitions(list(ids), course_key))
defs_dict = {d.get('_id'): d for d in defs_from_db}
# Add the retrieved definitions to the cache.
bulk_write_record.definitions_in_db.update(defs_dict.iterkeys())
bulk_write_record.definitions.update(defs_dict)
definitions.extend(defs_from_db)
return definitions
def update_definition(self, course_key, definition):
"""
Update a definition, respecting the current bulk operation status
(no data will be written to the database if a bulk operation is active.)
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.definitions[definition['_id']] = definition
else:
self.db_connection.insert_definition(definition, course_key)
def version_structure(self, course_key, structure, user_id):
"""
Copy the structure and update the history info (edited_by, edited_on, previous_version)
"""
if course_key.branch is None:
raise InsufficientSpecificationError(course_key)
bulk_write_record = self._get_bulk_ops_record(course_key)
# If we have an active bulk write, and it's already been edited, then just use that structure
if bulk_write_record.active and course_key.branch in bulk_write_record.dirty_branches:
return bulk_write_record.structure_for_branch(course_key.branch)
# Otherwise, make a new structure
new_structure = copy.deepcopy(structure)
new_structure['_id'] = ObjectId()
new_structure['previous_version'] = structure['_id']
new_structure['edited_by'] = user_id
new_structure['edited_on'] = datetime.datetime.now(UTC)
new_structure['schema_version'] = self.SCHEMA_VERSION
# If we're in a bulk write, update the structure used there, and mark it as dirty
if bulk_write_record.active:
bulk_write_record.set_structure_for_branch(course_key.branch, new_structure)
return new_structure
def version_block(self, block_data, user_id, update_version):
"""
Update the block_data object based on it having been edited.
"""
if block_data.edit_info.update_version == update_version:
return
original_usage = block_data.edit_info.original_usage
original_usage_version = block_data.edit_info.original_usage_version
block_data.edit_info.edited_on = datetime.datetime.now(UTC)
block_data.edit_info.edited_by = user_id
block_data.edit_info.previous_version = block_data.edit_info.update_version
block_data.edit_info.update_version = update_version
if original_usage:
block_data.edit_info.original_usage = original_usage
block_data.edit_info.original_usage_version = original_usage_version
def find_matching_course_indexes(self, branch=None, search_targets=None, org_target=None, course_keys=None):
"""
Find the course_indexes which have the specified branch and search_targets. An optional org_target
can be specified to apply an ORG filter to return only the courses that are part of
that ORG.
Returns:
a Cursor if there are no changes in flight or a list if some have changed in current bulk op
"""
indexes = self.db_connection.find_matching_course_indexes(
branch,
search_targets,
org_target,
course_keys=course_keys)
indexes = self._add_indexes_from_active_records(
indexes,
branch,
search_targets,
org_target,
course_keys=course_keys
)
return indexes
def _add_indexes_from_active_records(
self,
course_indexes,
branch=None,
search_targets=None,
org_target=None,
course_keys=None
):
"""
Add any being built but not yet persisted or in the process of being updated
"""
def _replace_or_append_index(altered_index):
"""
If the index is already in indexes, replace it. Otherwise, append it.
"""
for index, existing in enumerate(course_indexes):
if all(existing[attr] == altered_index[attr] for attr in ['org', 'course', 'run']):
course_indexes[index] = altered_index
return
course_indexes.append(altered_index)
for _, record in self._active_records:
if branch and branch not in record.index.get('versions', {}):
continue
if search_targets:
if any(
'search_targets' not in record.index or
field not in record.index['search_targets'] or
record.index['search_targets'][field] != value
for field, value in search_targets.iteritems()
):
continue
# if we've specified a filter by org,
# make sure we've honored that filter when
# integrating in-transit records
if org_target:
if record.index['org'] != org_target:
continue
if course_keys:
index_exists_in_active_records = False
for course_key in course_keys:
if all(record.index[key_attr] == getattr(course_key, key_attr)
for key_attr in ['org', 'course', 'run']):
index_exists_in_active_records = True
break
if not index_exists_in_active_records:
continue
if not hasattr(course_indexes, 'append'): # Just in time conversion to list from cursor
course_indexes = list(course_indexes)
_replace_or_append_index(record.index)
return course_indexes
def find_courselike_blocks_by_id(self, ids, block_type):
"""
Find all structures that specified in `ids`. Return blocks matching with block_type.
Arguments:
ids (list): A list of structure ids
block_type: type of block to return
"""
ids = set(ids)
return self.db_connection.find_courselike_blocks_by_id(list(ids), block_type)
def find_structures_by_id(self, ids):
"""
Return all structures that specified in ``ids``.
If a structure with the same id is in both the cache and the database,
the cached version will be preferred.
Arguments:
ids (list): A list of structure ids
"""
structures = []
ids = set(ids)
for _, record in self._active_records:
for structure in record.structures.values():
structure_id = structure.get('_id')
if structure_id in ids:
ids.remove(structure_id)
structures.append(structure)
structures.extend(self.db_connection.find_structures_by_id(list(ids)))
return structures
def find_structures_derived_from(self, ids):
"""
Return all structures that were immediately derived from a structure listed in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
found_structure_ids = set()
structures = []
for _, record in self._active_records:
for structure in record.structures.values():
if structure.get('previous_version') in ids:
structures.append(structure)
if '_id' in structure:
found_structure_ids.add(structure['_id'])
structures.extend(
structure
for structure in self.db_connection.find_structures_derived_from(ids)
if structure['_id'] not in found_structure_ids
)
return structures
def find_ancestor_structures(self, original_version, block_key):
"""
Find all structures that originated from ``original_version`` that contain ``block_key``.
Any structure found in the cache will be preferred to a structure with the same id from the database.
Arguments:
original_version (str or ObjectID): The id of a structure
block_key (BlockKey): The id of the block in question
"""
found_structure_ids = set()
structures = []
for _, record in self._active_records:
for structure in record.structures.values():
if 'original_version' not in structure:
continue
if structure['original_version'] != original_version:
continue
if block_key not in structure.get('blocks', {}):
continue
if 'update_version' not in structure['blocks'][block_key].get('edit_info', {}):
continue
structures.append(structure)
found_structure_ids.add(structure['_id'])
structures.extend(
structure
for structure in self.db_connection.find_ancestor_structures(original_version, block_key)
if structure['_id'] not in found_structure_ids
)
return structures
class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
"""
A Mongodb backed ModuleStore supporting versions, inheritance,
and sharing.
"""
SCHEMA_VERSION = 1
# a list of field names to store in course index search_targets. Note, this will
# only record one value per key. If branches disagree, the last one set wins.
# It won't recompute the value on operations such as update_course_index (e.g., to revert to a prev
# version) but those functions will have an optional arg for setting these.
SEARCH_TARGET_DICT = ['wiki_slug']
DEFAULT_ROOT_LIBRARY_BLOCK_TYPE = 'library'
DEFAULT_ROOT_COURSE_BLOCK_TYPE = 'course'
def __init__(self, contentstore, doc_store_config, fs_root, render_template,
default_class=None,
error_tracker=null_error_tracker,
i18n_service=None, fs_service=None, user_service=None,
services=None, signal_handler=None, **kwargs):
"""
:param doc_store_config: must have a host, db, and collection entries. Other common entries: port, tz_aware.
"""
super(SplitMongoModuleStore, self).__init__(contentstore, **kwargs)
self.db_connection = MongoConnection(**doc_store_config)
if default_class is not None:
module_path, __, class_name = default_class.rpartition('.')
class_ = getattr(import_module(module_path), class_name)
self.default_class = class_
else:
self.default_class = None
self.fs_root = path(fs_root)
self.error_tracker = error_tracker
self.render_template = render_template
self.services = services or {}
if i18n_service is not None:
self.services["i18n"] = i18n_service
if fs_service is not None:
self.services["fs"] = fs_service
if user_service is not None:
self.services["user"] = user_service
if self.request_cache is not None:
self.services["request_cache"] = self.request_cache
self.signal_handler = signal_handler
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
self.db_connection.close_connections()
def mongo_wire_version(self):
"""
Returns the wire version for mongo. Only used to unit tests which instrument the connection.
"""
return self.db_connection.mongo_wire_version
def _drop_database(self, database=True, collections=True, connections=True):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
If database is True, then this should drop the entire database.
Otherwise, if collections is True, then this should drop all of the collections used
by this modulestore.
Otherwise, the modulestore should remove all data from the collections.
If connections is True, then close the connection to the database as well.
"""
# drop the assets
super(SplitMongoModuleStore, self)._drop_database(database, collections, connections)
self.db_connection._drop_database(database, collections, connections) # pylint: disable=protected-access
def cache_items(self, system, base_block_ids, course_key, depth=0, lazy=True):
"""
Handles caching of items once inheritance and any other one time
per course per fetch operations are done.
Arguments:
system: a CachingDescriptorSystem
base_block_ids: list of BlockIds to fetch
course_key: the destination course providing the context
depth: how deep below these to prefetch
lazy: whether to load definitions now or later
"""
with self.bulk_operations(course_key, emit_signals=False):
new_module_data = {}
for block_id in base_block_ids:
new_module_data = self.descendants(
system.course_entry.structure['blocks'],
block_id,
depth,
new_module_data
)
# This method supports lazy loading, where the descendent definitions aren't loaded
# until they're actually needed.
if not lazy:
# Non-lazy loading: Load all descendants by id.
descendent_definitions = self.get_definitions(
course_key,
[
block.definition
for block in new_module_data.itervalues()
]
)
# Turn definitions into a map.
definitions = {definition['_id']: definition
for definition in descendent_definitions}
for block in new_module_data.itervalues():
if block.definition in definitions:
definition = definitions[block.definition]
# convert_fields gets done later in the runtime's xblock_from_json
block.fields.update(definition.get('fields'))
block.definition_loaded = True
system.module_data.update(new_module_data)
return system.module_data
@contract(course_entry=CourseEnvelope, block_keys="list(BlockKey)", depth="int | None")
def _load_items(self, course_entry, block_keys, depth=0, **kwargs):
"""
Load & cache the given blocks from the course. May return the blocks in any order.
Load the definitions into each block if lazy is in kwargs and is False;
otherwise, do not load the definitions - they'll be loaded later when needed.
"""
lazy = kwargs.pop('lazy', True)
should_cache_items = not lazy
runtime = self._get_cache(course_entry.structure['_id'])
if runtime is None:
runtime = self.create_runtime(course_entry, lazy)
self._add_cache(course_entry.structure['_id'], runtime)
should_cache_items = True
if should_cache_items:
self.cache_items(runtime, block_keys, course_entry.course_key, depth, lazy)
with self.bulk_operations(course_entry.course_key, emit_signals=False):
return [runtime.load_item(block_key, course_entry, **kwargs) for block_key in block_keys]
def _get_cache(self, course_version_guid):
"""
Find the descriptor cache for this course if it exists
:param course_version_guid:
"""
if self.request_cache is None:
return None
return self.request_cache.data.setdefault('course_cache', {}).get(course_version_guid)
def _add_cache(self, course_version_guid, system):
"""
Save this cache for subsequent access
:param course_version_guid:
:param system:
"""
if self.request_cache is not None:
self.request_cache.data.setdefault('course_cache', {})[course_version_guid] = system
return system
def _clear_cache(self, course_version_guid=None):
"""
Should only be used by testing or something which implements transactional boundary semantics.
:param course_version_guid: if provided, clear only this entry
"""
if self.request_cache is None:
return
if course_version_guid:
try:
del self.request_cache.data.setdefault('course_cache', {})[course_version_guid]
except KeyError:
pass
else:
self.request_cache.data['course_cache'] = {}
def _lookup_course(self, course_key, head_validation=True):
"""
Decode the locator into the right series of db access. Does not
return the CourseDescriptor! It returns the actual db json from
structures.
Semantics: if course id and branch given, then it will get that branch. If
also give a version_guid, it will see if the current head of that branch == that guid. If not
it raises VersionConflictError (the version now differs from what it was when you got your
reference) unless you specify head_validation = False, in which case it will return the
revision (if specified) by the course_key.
:param course_key: any subclass of CourseLocator
"""
if not course_key.version_guid:
head_validation = True
if head_validation and course_key.org and course_key.course and course_key.run:
if course_key.branch is None:
raise InsufficientSpecificationError(course_key)
# use the course id
index = self.get_course_index(course_key)
if index is None:
raise ItemNotFoundError(course_key)
if course_key.branch not in index['versions']:
raise ItemNotFoundError(course_key)
version_guid = index['versions'][course_key.branch]
if course_key.version_guid is not None and version_guid != course_key.version_guid:
# This may be a bit too touchy but it's hard to infer intent
raise VersionConflictError(course_key, version_guid)
elif course_key.version_guid is None:
raise InsufficientSpecificationError(course_key)
else:
# TODO should this raise an exception if branch was provided?
version_guid = course_key.version_guid
entry = self.get_structure(course_key, version_guid)
if entry is None:
raise ItemNotFoundError('Structure: {}'.format(version_guid))
# b/c more than one course can use same structure, the 'org', 'course',
# 'run', and 'branch' are not intrinsic to structure
# and the one assoc'd w/ it by another fetch may not be the one relevant to this fetch; so,
# add it in the envelope for the structure.
return CourseEnvelope(course_key.replace(version_guid=version_guid), entry)
def _get_courselike_blocks_for_branch(self, branch, **kwargs):
"""
Internal generator for fetching lists of courselike without loading them.
"""
version_guids, id_version_map = self.collect_ids_from_matching_indexes(branch, **kwargs)
if not version_guids:
return
block_type = SplitMongoModuleStore.DEFAULT_ROOT_LIBRARY_BLOCK_TYPE \
if branch == 'library' else SplitMongoModuleStore.DEFAULT_ROOT_COURSE_BLOCK_TYPE
for entry in self.find_courselike_blocks_by_id(version_guids, block_type):
for course_index in id_version_map[entry['_id']]:
yield entry, course_index
def _get_structures_for_branch(self, branch, **kwargs):
"""
Internal generator for fetching lists of courses, libraries, etc.
"""
version_guids, id_version_map = self.collect_ids_from_matching_indexes(branch, **kwargs)
if not version_guids:
return
for entry in self.find_structures_by_id(version_guids):
for course_index in id_version_map[entry['_id']]:
yield entry, course_index
def collect_ids_from_matching_indexes(self, branch, **kwargs):
"""
Find the course_indexes which have the specified branch. Extract `version_guids`
from the course_indexes.
"""
matching_indexes = self.find_matching_course_indexes(
branch,
search_targets=None,
org_target=kwargs.get('org'),
course_keys=kwargs.get('course_keys')
)
# collect ids and then query for those
version_guids = []
id_version_map = defaultdict(list)
for course_index in matching_indexes:
version_guid = course_index['versions'][branch]
version_guids.append(version_guid)
id_version_map[version_guid].append(course_index)
return version_guids, id_version_map
def _get_structures_for_branch_and_locator(self, branch, locator_factory, **kwargs):
"""
Internal generator for fetching lists of courses, libraries, etc.
:param str branch: Branch to fetch structures from
:param type locator_factory: Factory to create locator from structure info and branch
"""
result = []
for entry, structure_info in self._get_structures_for_branch(branch, **kwargs):
locator = locator_factory(structure_info, branch)
envelope = CourseEnvelope(locator, entry)
root = entry['root']
structures_list = self._load_items(envelope, [root], depth=0, **kwargs)
if not isinstance(structures_list[0], ErrorDescriptor):
result.append(structures_list[0])
return result
def _create_course_locator(self, course_info, branch):
"""
Creates course locator using course_info dict and branch
"""
return CourseLocator(
org=course_info['org'],
course=course_info['course'],
run=course_info['run'],
branch=branch,
)
def _create_library_locator(self, library_info, branch):
"""
Creates library locator using library_info dict and branch
"""
return LibraryLocator(
org=library_info['org'],
library=library_info['course'],
branch=branch,
)
@autoretry_read()
def get_courses(self, branch, **kwargs):
"""
Returns a list of course descriptors matching any given qualifiers.
qualifiers should be a dict of keywords matching the db fields or any
legal query for mongo to use against the active_versions collection.
Note, this is to find the current head of the named branch type.
To get specific versions via guid use get_course.
:param branch: the branch for which to return courses.
"""
# get the blocks for each course index (s/b the root)
return self._get_structures_for_branch_and_locator(branch, self._create_course_locator, **kwargs)
@autoretry_read()
def get_course_summaries(self, branch, **kwargs):
"""
Returns a list of `CourseSummary` which matching any given qualifiers.
qualifiers should be a dict of keywords matching the db fields or any
legal query for mongo to use against the active_versions collection.
Note, this is to find the current head of the named branch type.
To get specific versions via guid use get_course.
:param branch: the branch for which to return courses.
"""
def extract_course_summary(course):
"""
Extract course information from the course block for split.
"""
return {
field: course.fields[field]
for field in CourseSummary.course_info_fields
if field in course.fields
}
courses_summaries = []
for entry, structure_info in self._get_courselike_blocks_for_branch(branch, **kwargs):
course_locator = self._create_course_locator(structure_info, branch=None)
course_block = [
block_data
for block_key, block_data in entry['blocks'].items()
if block_key.type == "course"
]
if not course_block:
raise ItemNotFoundError
if len(course_block) > 1:
raise MultipleCourseBlocksFound(
"Expected 1 course block to be found in the course, but found {0}".format(len(course_block))
)
course_summary = extract_course_summary(course_block[0])
courses_summaries.append(
CourseSummary(course_locator, **course_summary)
)
return courses_summaries
@autoretry_read()
def get_library_summaries(self, **kwargs):
"""
Returns a list of `LibrarySummary` objects.
kwargs can be valid db fields to match against active_versions
collection e.g org='example_org'.
"""
branch = 'library'
libraries_summaries = []
for entry, structure_info in self._get_courselike_blocks_for_branch(branch, **kwargs):
library_locator = self._create_library_locator(structure_info, branch=None)
library_block = [
block_data
for block_key, block_data in entry['blocks'].items()
if block_key.type == "library"
]
if not library_block:
raise ItemNotFoundError
if len(library_block) > 1:
raise MultipleLibraryBlocksFound(
"Expected 1 library block, but found {0}".format(len(library_block))
)
library_block_fields = library_block[0].fields
display_name = ''
if 'display_name' in library_block_fields:
display_name = library_block_fields['display_name']
libraries_summaries.append(
LibrarySummary(library_locator, display_name)
)
return libraries_summaries
def get_libraries(self, branch="library", **kwargs):
"""
Returns a list of "library" root blocks matching any given qualifiers.
TODO: better way of identifying library index entry vs. course index entry.
"""
return self._get_structures_for_branch_and_locator(branch, self._create_library_locator, **kwargs)
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
return CourseLocator(org, course, run)
def make_course_usage_key(self, course_key):
"""
Return a valid :class:`~opaque_keys.edx.keys.UsageKey` for this modulestore
that matches the supplied course_key.
"""
locator_cls = CCXBlockUsageLocator if isinstance(course_key, CCXLocator) else BlockUsageLocator
return locator_cls(course_key, 'course', 'course')
def _get_structure(self, structure_id, depth, head_validation=True, **kwargs):
"""
Gets Course or Library by locator
"""
structure_entry = self._lookup_course(structure_id, head_validation=head_validation)
root = structure_entry.structure['root']
result = self._load_items(structure_entry, [root], depth, **kwargs)
return result[0]
def get_course(self, course_id, depth=0, **kwargs):
"""
Gets the course descriptor for the course identified by the locator
"""
if not isinstance(course_id, CourseLocator) or course_id.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_id)
return self._get_structure(course_id, depth, **kwargs)
def get_library(self, library_id, depth=0, head_validation=True, **kwargs):
"""
Gets the 'library' root block for the library identified by the locator
"""
if not isinstance(library_id, LibraryLocator):
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(library_id)
return self._get_structure(library_id, depth, head_validation=head_validation, **kwargs)
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
Does this course exist in this modulestore. This method does not verify that the branch &/or
version in the course_id exists. Use get_course_index_info to check that.
Returns the course_id of the course if it was found, else None
Note: we return the course_id instead of a boolean here since the found course may have
a different id than the given course_id when ignore_case is True.
"""
if not isinstance(course_id, CourseLocator) or course_id.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
return False
course_index = self.get_course_index(course_id, ignore_case)
return CourseLocator(course_index['org'], course_index['course'], course_index['run'], course_id.branch) if course_index else None
def has_library(self, library_id, ignore_case=False, **kwargs):
"""
Does this library exist in this modulestore. This method does not verify that the branch &/or
version in the library_id exists.
Returns the library_id of the course if it was found, else None.
"""
if not isinstance(library_id, LibraryLocator):
return None
index = self.get_course_index(library_id, ignore_case)
if index:
return LibraryLocator(index['org'], index['course'], library_id.branch)
return None
def has_item(self, usage_key):
"""
Returns True if usage_key exists in its course. Returns false if
the course or the block w/in the course do not exist for the given version.
raises InsufficientSpecificationError if the usage_key does not id a block
"""
if not isinstance(usage_key, BlockUsageLocator) or usage_key.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
return False
if usage_key.block_id is None:
raise InsufficientSpecificationError(usage_key)
try:
course_structure = self._lookup_course(usage_key.course_key).structure
except ItemNotFoundError:
# this error only occurs if the course does not exist
return False
return self._get_block_from_structure(course_structure, BlockKey.from_usage_key(usage_key)) is not None
@contract(returns='XBlock')
def get_item(self, usage_key, depth=0, **kwargs):
"""
depth (int): An argument that some module stores may use to prefetch
descendants of the queried modules for more efficient results later
in the request. The depth is counted in the number of
calls to get_children() to cache. None indicates to cache all
descendants.
raises InsufficientSpecificationError or ItemNotFoundError
"""
if not isinstance(usage_key, BlockUsageLocator) or usage_key.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(usage_key)
with self.bulk_operations(usage_key.course_key):
course = self._lookup_course(usage_key.course_key)
items = self._load_items(course, [BlockKey.from_usage_key(usage_key)], depth, **kwargs)
if len(items) == 0:
raise ItemNotFoundError(usage_key)
elif len(items) > 1:
log.debug("Found more than one item for '{}'".format(usage_key))
return items[0]
def get_items(self, course_locator, settings=None, content=None, qualifiers=None, include_orphans=True, **kwargs):
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_locator
NOTE: don't use this to look for courses as the course_locator is required. Use get_courses.
Args:
course_locator (CourseLocator): the course identifier
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as qualifiers below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as qualifiers below.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
For substring matching pass a regex object.
For split,
you can search by ``edited_by``, ``edited_on`` providing a function testing limits.
include_orphans (boolean): Returns all items in a course, including orphans if present.
True - This would return all items irrespective of course in tree checking. It may fetch orphans
if present in the course.
False - if we want only those items which are in the course tree. This would ensure no orphans are
fetched.
"""
if not isinstance(course_locator, CourseKey) or course_locator.deprecated:
# The supplied courselike key is of the wrong type, so it can't possibly be stored in this modulestore.
return []
course = self._lookup_course(course_locator)
items = []
qualifiers = qualifiers.copy() if qualifiers else {} # copy the qualifiers (destructively manipulated here)
def _block_matches_all(block_data):
"""
Check that the block matches all the criteria
"""
# do the checks which don't require loading any additional data
if ( # pylint: disable=bad-continuation
self._block_matches(block_data, qualifiers) and
self._block_matches(block_data.fields, settings)
):
if content:
definition_block = self.get_definition(course_locator, block_data.definition)
return self._block_matches(definition_block['fields'], content)
else:
return True
if settings is None:
settings = {}
if 'name' in qualifiers:
# odd case where we don't search just confirm
block_name = qualifiers.pop('name')
block_ids = []
for block_id, block in course.structure['blocks'].iteritems():
# Don't do an in comparison blindly; first check to make sure
# that the name qualifier we're looking at isn't a plain string;
# if it is a string, then it should match exactly. If it's other
# than a string, we check whether it contains the block ID; this
# is so a list or other iterable can be passed with multiple
# valid qualifiers.
if isinstance(block_name, six.string_types):
name_matches = block_id.id == block_name
else:
name_matches = block_id.id in block_name
if name_matches and _block_matches_all(block):
block_ids.append(block_id)
return self._load_items(course, block_ids, **kwargs)
if 'category' in qualifiers:
qualifiers['block_type'] = qualifiers.pop('category')
# don't expect caller to know that children are in fields
if 'children' in qualifiers:
settings['children'] = qualifiers.pop('children')
# No need of these caches unless include_orphans is set to False
path_cache = None
parents_cache = None
if not include_orphans:
path_cache = {}
parents_cache = self.build_block_key_to_parents_mapping(course.structure)
for block_id, value in course.structure['blocks'].iteritems():
if _block_matches_all(value):
if not include_orphans:
if ( # pylint: disable=bad-continuation
block_id.type in DETACHED_XBLOCK_TYPES or
self.has_path_to_root(block_id, course, path_cache, parents_cache)
):
items.append(block_id)
else:
items.append(block_id)
if len(items) > 0:
return self._load_items(course, items, depth=0, **kwargs)
else:
return []
def build_block_key_to_parents_mapping(self, structure):
"""
Given a structure, builds block_key to parents mapping for all block keys in structure
and returns it
:param structure: db json of course structure
:return dict: a dictionary containing mapping of block_keys against their parents.
"""
children_to_parents = defaultdict(list)
for parent_key, value in structure['blocks'].iteritems():
for child_key in value.fields.get('children', []):
children_to_parents[child_key].append(parent_key)
return children_to_parents
def has_path_to_root(self, block_key, course, path_cache=None, parents_cache=None):
"""
Check recursively if an xblock has a path to the course root
:param block_key: BlockKey of the component whose path is to be checked
:param course: actual db json of course from structures
:param path_cache: a dictionary that records which modules have a path to the root so that we don't have to
double count modules if we're computing this for a list of modules in a course.
:param parents_cache: a dictionary containing mapping of block_key to list of its parents. Optionally, this
should be built for course structure to make this method faster.
:return Bool: whether or not component has path to the root
"""
if path_cache and block_key in path_cache:
return path_cache[block_key]
if parents_cache is None:
xblock_parents = self._get_parents_from_structure(block_key, course.structure)
else:
xblock_parents = parents_cache[block_key]
if len(xblock_parents) == 0 and block_key.type in ["course", "library"]:
# Found, xblock has the path to the root
if path_cache is not None:
path_cache[block_key] = True
return True
has_path = any(
self.has_path_to_root(xblock_parent, course, path_cache, parents_cache)
for xblock_parent in xblock_parents
)
if path_cache is not None:
path_cache[block_key] = has_path
return has_path
def get_parent_location(self, locator, **kwargs):
"""
Return the location (Locators w/ block_ids) for the parent of this location in this
course. Could use get_items(location, {'children': block_id}) but this is slightly faster.
NOTE: the locator must contain the block_id, and this code does not actually ensure block_id exists
:param locator: BlockUsageLocator restricting search scope
"""
if not isinstance(locator, BlockUsageLocator) or locator.deprecated:
# The supplied locator is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(locator)
course = self._lookup_course(locator.course_key)
all_parent_ids = self._get_parents_from_structure(BlockKey.from_usage_key(locator), course.structure)
# Check and verify the found parent_ids are not orphans; Remove parent which has no valid path
# to the course root
parent_ids = [
valid_parent
for valid_parent in all_parent_ids
if self.has_path_to_root(valid_parent, course)
]
if len(parent_ids) == 0:
return None
# find alphabetically least
parent_ids.sort(key=lambda parent: (parent.type, parent.id))
return BlockUsageLocator.make_relative(
locator,
block_type=parent_ids[0].type,
block_id=parent_ids[0].id,
)
def get_orphans(self, course_key, **kwargs):
"""
Return an array of all of the orphans in the course.
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
detached_categories = [name for name, __ in XBlock.load_tagged_classes("detached")]
course = self._lookup_course(course_key)
items = set(course.structure['blocks'].keys())
items.remove(course.structure['root'])
blocks = course.structure['blocks']
for block_id, block_data in blocks.iteritems():
items.difference_update(BlockKey(*child) for child in block_data.fields.get('children', []))
if block_data.block_type in detached_categories:
items.discard(block_id)
return [
course_key.make_usage_key(block_type=block_id.type, block_id=block_id.id)
for block_id in items
]
def get_course_index_info(self, course_key):
"""
The index records the initial creation of the indexed course and tracks the current version
heads. This function is primarily for test verification but may serve some
more general purpose.
:param course_key: must have a org, course, and run set
:return {'org': string,
versions: {'draft': the head draft version id,
'published': the head published version id if any,
},
'edited_by': who created the course originally (named edited for consistency),
'edited_on': when the course was originally created
}
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
if not (course_key.course and course_key.run and course_key.org):
return None
index = self.get_course_index(course_key)
return index
# TODO figure out a way to make this info accessible from the course descriptor
def get_course_history_info(self, course_key):
"""
Because xblocks doesn't give a means to separate the course structure's meta information from
the course xblock's, this method will get that info for the structure as a whole.
:param course_key:
:return {'original_version': the version guid of the original version of this course,
'previous_version': the version guid of the previous version,
'edited_by': who made the last change,
'edited_on': when the change was made
}
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
course = self._lookup_course(course_key).structure
return {
'original_version': course['original_version'],
'previous_version': course['previous_version'],
'edited_by': course['edited_by'],
'edited_on': course['edited_on']
}
def get_definition_history_info(self, definition_locator, course_context=None):
"""
Because xblocks doesn't give a means to separate the definition's meta information from
the usage xblock's, this method will get that info for the definition
:return {'original_version': the version guid of the original version of this course,
'previous_version': the version guid of the previous version,
'edited_by': who made the last change,
'edited_on': when the change was made
}
"""
if not isinstance(definition_locator, DefinitionLocator) or definition_locator.deprecated:
# The supplied locator is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(definition_locator)
definition = self.db_connection.get_definition(definition_locator.definition_id, course_context)
if definition is None:
return None
return definition['edit_info']
def get_course_successors(self, course_locator, version_history_depth=1):
"""
Find the version_history_depth next versions of this course. Return as a VersionTree
Mostly makes sense when course_locator uses a version_guid, but because it finds all relevant
next versions, these do include those created for other courses.
:param course_locator:
"""
if not isinstance(course_locator, CourseLocator) or course_locator.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_locator)
if version_history_depth < 1:
return None
if course_locator.version_guid is None:
course = self._lookup_course(course_locator)
version_guid = course.structure['_id']
course_locator = course_locator.for_version(version_guid)
else:
version_guid = course_locator.version_guid
# TODO if depth is significant, it may make sense to get all that have the same original_version
# and reconstruct the subtree from version_guid
next_entries = self.find_structures_derived_from([version_guid])
# must only scan cursor's once
next_versions = [struct for struct in next_entries]
result = {version_guid: [CourseLocator(version_guid=struct['_id']) for struct in next_versions]}
depth = 1
while depth < version_history_depth and len(next_versions) > 0:
depth += 1
next_entries = self.find_structures_derived_from([struct['_id'] for struct in next_versions])
next_versions = [struct for struct in next_entries]
for course_structure in next_versions:
result.setdefault(course_structure['previous_version'], []).append(
CourseLocator(version_guid=struct['_id']))
return VersionTree(course_locator, result)
def get_block_generations(self, block_locator):
"""
Find the history of this block. Return as a VersionTree of each place the block changed (except
deletion).
The block's history tracks its explicit changes but not the changes in its children starting
from when the block was created.
"""
# course_agnostic means we don't care if the head and version don't align, trust the version
course_struct = self._lookup_course(block_locator.course_key.course_agnostic()).structure
block_key = BlockKey.from_usage_key(block_locator)
all_versions_with_block = self.find_ancestor_structures(
original_version=course_struct['original_version'],
block_key=block_key
)
# find (all) root versions and build map {previous: {successors}..}
possible_roots = []
result = {}
for version in all_versions_with_block:
block_payload = self._get_block_from_structure(version, block_key)
if version['_id'] == block_payload.edit_info.update_version:
if block_payload.edit_info.previous_version is None:
# this was when this block was created
possible_roots.append(block_payload.edit_info.update_version)
else: # map previous to {update..}
result.setdefault(block_payload.edit_info.previous_version, set()).add(
block_payload.edit_info.update_version)
# more than one possible_root means usage was added and deleted > 1x.
if len(possible_roots) > 1:
# find the history segment including block_locator's version
element_to_find = self._get_block_from_structure(course_struct, block_key).edit_info.update_version
if element_to_find in possible_roots:
possible_roots = [element_to_find]
for possibility in possible_roots:
if self._find_local_root(element_to_find, possibility, result):
possible_roots = [possibility]
break
elif len(possible_roots) == 0:
return None
# convert the results value sets to locators
for k, versions in result.iteritems():
result[k] = [
block_locator.for_version(version)
for version in versions
]
return VersionTree(
block_locator.for_version(possible_roots[0]),
result
)
def get_definition_successors(self, definition_locator, version_history_depth=1):
"""
Find the version_history_depth next versions of this definition. Return as a VersionTree
"""
# TODO implement
pass
def get_block_original_usage(self, usage_key):
"""
If a block was inherited into another structure using copy_from_template,
this will return the original block usage locator and version from
which the copy was inherited.
Returns usage_key, version if the data is available, otherwise returns (None, None)
"""
blocks = self._lookup_course(usage_key.course_key).structure['blocks']
block = blocks.get(BlockKey.from_usage_key(usage_key))
if block and block.edit_info.original_usage is not None:
usage_key = BlockUsageLocator.from_string(block.edit_info.original_usage)
return usage_key, block.edit_info.original_usage_version
return None, None
def create_definition_from_data(self, course_key, new_def_data, category, user_id):
"""
Pull the definition fields out of descriptor and save to the db as a new definition
w/o a predecessor and return the new id.
:param user_id: request.user object
"""
new_def_data = self._serialize_fields(category, new_def_data)
new_id = ObjectId()
document = {
'_id': new_id,
"block_type": category,
"fields": new_def_data,
"edit_info": {
"edited_by": user_id,
"edited_on": datetime.datetime.now(UTC),
"previous_version": None,
"original_version": new_id,
},
'schema_version': self.SCHEMA_VERSION,
}
self.update_definition(course_key, document)
definition_locator = DefinitionLocator(category, new_id)
return definition_locator
def update_definition_from_data(self, course_key, definition_locator, new_def_data, user_id):
"""
See if new_def_data differs from the persisted version. If so, update
the persisted version and return the new id.
:param user_id: request.user
"""
def needs_saved():
for key, value in new_def_data.iteritems():
if key not in old_definition['fields'] or value != old_definition['fields'][key]:
return True
for key, value in old_definition.get('fields', {}).iteritems():
if key not in new_def_data:
return True
# if this looks in cache rather than fresh fetches, then it will probably not detect
# actual change b/c the descriptor and cache probably point to the same objects
old_definition = self.get_definition(course_key, definition_locator.definition_id)
if old_definition is None:
raise ItemNotFoundError(definition_locator)
new_def_data = self._serialize_fields(old_definition['block_type'], new_def_data)
if needs_saved():
definition_locator = self._update_definition_from_data(course_key, old_definition, new_def_data, user_id)
return definition_locator, True
else:
return definition_locator, False
def _update_definition_from_data(self, course_key, old_definition, new_def_data, user_id):
"""
Update the persisted version of the given definition and return the
locator of the new definition. Does not check if data differs from the
previous version.
"""
new_definition = copy.deepcopy(old_definition)
new_definition['_id'] = ObjectId()
new_definition['fields'] = new_def_data
new_definition['edit_info']['edited_by'] = user_id
new_definition['edit_info']['edited_on'] = datetime.datetime.now(UTC)
# previous version id
new_definition['edit_info']['previous_version'] = old_definition['_id']
new_definition['schema_version'] = self.SCHEMA_VERSION
self.update_definition(course_key, new_definition)
return DefinitionLocator(new_definition['block_type'], new_definition['_id'])
def _generate_block_key(self, course_blocks, category):
"""
Generate a somewhat readable block id unique w/in this course using the category
:param course_blocks: the current list of blocks.
:param category:
"""
# NOTE: a potential bug is that a block is deleted and another created which gets the old
# block's id. a possible fix is to cache the last serial in a dict in the structure
# {category: last_serial...}
# A potential confusion is if the name incorporates the parent's name, then if the child
# moves, its id won't change and will be confusing
serial = 1
while True:
potential_key = BlockKey(category, "{}{}".format(category, serial))
if potential_key not in course_blocks:
return potential_key
serial += 1
@contract(returns='XBlock')
def create_item(self, user_id, course_key, block_type, block_id=None, definition_locator=None, fields=None,
asides=None, force=False, **kwargs):
"""
Add a descriptor to persistence as an element
of the course. Return the resulting post saved version with populated locators.
:param course_key: If it has a version_guid and a course org + course + run + branch, this
method ensures that the version is the head of the given course branch before making the change.
raises InsufficientSpecificationError if there is no course locator.
raises VersionConflictError if the version_guid of the course_or_parent_locator is not the head
of the its course unless force is true.
:param force: fork the structure and don't update the course draftVersion if the above
:param continue_revision: for multistep transactions, continue revising the given version rather than creating
a new version. Setting force to True conflicts with setting this to True and will cause a VersionConflictError
:param definition_locator: should either be None to indicate this is a brand new definition or
a pointer to the existing definition to which this block should point or from which this was derived
or a LocalId to indicate that it's new.
If fields does not contain any Scope.content, then definition_locator must have a value meaning that this
block points
to the existing definition. If fields contains Scope.content and definition_locator is not None, then
the Scope.content fields are assumed to be a new payload for definition_locator.
:param block_id: if provided, must not already exist in the structure. Provides the block id for the
new item in this structure. Otherwise, one is computed using the category appended w/ a few digits.
This method creates a new version of the course structure unless the course has a bulk_write operation
active.
It creates and inserts the new block, makes the block point
to the definition which may be new or a new version of an existing or an existing.
Rules for course locator:
* If the course locator specifies a org and course and run and either it doesn't
specify version_guid or the one it specifies == the current head of the branch,
it progresses the course to point
to the new head and sets the active version to point to the new head
* If the locator has a org and course and run but its version_guid != current head, it raises VersionConflictError.
NOTE: using a version_guid will end up creating a new version of the course. Your new item won't be in
the course id'd by version_guid but instead in one w/ a new version_guid. Ensure in this case that you get
the new version_guid from the locator in the returned object!
"""
with self.bulk_operations(course_key):
# split handles all the fields in one dict not separated by scope
fields = fields or {}
fields.update(kwargs.pop('metadata', {}) or {})
definition_data = kwargs.pop('definition_data', {})
if definition_data:
if not isinstance(definition_data, dict):
definition_data = {'data': definition_data} # backward compatibility to mongo's hack
fields.update(definition_data)
# find course_index entry if applicable and structures entry
index_entry = self._get_index_if_valid(course_key, force)
structure = self._lookup_course(course_key).structure
partitioned_fields = self.partition_fields_by_scope(block_type, fields)
new_def_data = partitioned_fields.get(Scope.content, {})
# persist the definition if persisted != passed
if definition_locator is None or isinstance(definition_locator.definition_id, LocalId):
definition_locator = self.create_definition_from_data(course_key, new_def_data, block_type, user_id)
elif new_def_data:
definition_locator, _ = self.update_definition_from_data(course_key, definition_locator, new_def_data, user_id)
# copy the structure and modify the new one
new_structure = self.version_structure(course_key, structure, user_id)
new_id = new_structure['_id']
# generate usage id
if block_id is not None:
block_key = BlockKey(block_type, block_id)
if block_key in new_structure['blocks']:
raise DuplicateItemError(block_id, self, 'structures')
else:
block_key = self._generate_block_key(new_structure['blocks'], block_type)
block_fields = partitioned_fields.get(Scope.settings, {})
if Scope.children in partitioned_fields:
block_fields.update(partitioned_fields[Scope.children])
self._update_block_in_structure(new_structure, block_key, self._new_block(
user_id,
block_type,
block_fields,
definition_locator.definition_id,
new_id,
asides=asides
))
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
# see if any search targets changed
if fields is not None:
self._update_search_targets(index_entry, fields)
self._update_head(course_key, index_entry, course_key.branch, new_id)
item_loc = BlockUsageLocator(
course_key.version_agnostic(),
block_type=block_type,
block_id=block_key.id,
)
else:
item_loc = BlockUsageLocator(
CourseLocator(version_guid=new_id),
block_type=block_type,
block_id=block_key.id,
)
if isinstance(course_key, LibraryLocator):
self._flag_library_updated_event(course_key)
# reconstruct the new_item from the cache
return self.get_item(item_loc)
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, asides=None, **kwargs):
"""
Creates and saves a new xblock that as a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifying the
block that this item should be parented under
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
asides (dict): A dictionary specifying initial values for some or all aside fields
in the newly created block
"""
with self.bulk_operations(parent_usage_key.course_key):
xblock = self.create_item(
user_id, parent_usage_key.course_key, block_type, block_id=block_id, fields=fields, asides=asides,
**kwargs)
# skip attach to parent if xblock has 'detached' tag
if 'detached' in xblock._class_tags: # pylint: disable=protected-access
return xblock
# don't version the structure as create_item handled that already.
new_structure = self._lookup_course(xblock.location.course_key).structure
# add new block as child and update parent's version
block_id = BlockKey.from_usage_key(parent_usage_key)
if block_id not in new_structure['blocks']:
raise ItemNotFoundError(parent_usage_key)
parent = new_structure['blocks'][block_id]
# Originally added to support entrance exams (settings.FEATURES.get('ENTRANCE_EXAMS'))
if kwargs.get('position') is None:
parent.fields.setdefault('children', []).append(BlockKey.from_usage_key(xblock.location))
else:
parent.fields.setdefault('children', []).insert(
kwargs.get('position'),
BlockKey.from_usage_key(xblock.location)
)
if parent.edit_info.update_version != new_structure['_id']:
# if the parent hadn't been previously changed in this bulk transaction, indicate that it's
# part of the bulk transaction
self.version_block(parent, user_id, new_structure['_id'])
self.decache_block(parent_usage_key.course_key, new_structure['_id'], block_id)
# db update
self.update_structure(parent_usage_key.course_key, new_structure)
# don't need to update the index b/c create_item did it for this version
return xblock
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs):
"""
See :meth: `.ModuleStoreWrite.clone_course` for documentation.
In split, other than copying the assets, this is cheap as it merely creates a new version of the
existing course.
"""
source_index = self.get_course_index_info(source_course_id)
if source_index is None:
raise ItemNotFoundError("Cannot find a course at {0}. Aborting".format(source_course_id))
with self.bulk_operations(dest_course_id):
new_course = self.create_course(
dest_course_id.org, dest_course_id.course, dest_course_id.run,
user_id,
fields=fields,
versions_dict=source_index['versions'],
search_targets=source_index['search_targets'],
skip_auto_publish=True,
**kwargs
)
# don't copy assets until we create the course in case something's awry
super(SplitMongoModuleStore, self).clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
return new_course
DEFAULT_ROOT_COURSE_BLOCK_ID = 'course'
DEFAULT_ROOT_LIBRARY_BLOCK_ID = 'library'
def create_course(
self, org, course, run, user_id, master_branch=None, fields=None,
versions_dict=None, search_targets=None, root_category='course',
root_block_id=None, **kwargs
):
"""
Create a new entry in the active courses index which points to an existing or new structure. Returns
the course root of the resulting entry (the location has the course id)
Arguments:
org (str): the organization that owns the course
course (str): the course number of the course
run (str): the particular run of the course (e.g. 2013_T1)
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
course + run: If there are duplicates, this method will raise DuplicateCourseError
fields: if scope.settings fields provided, will set the fields of the root course object in the
new course. If both
settings fields and a starting version are provided (via versions_dict), it will generate a successor version
to the given version,
and update the settings fields with any provided values (via update not setting).
fields (content): if scope.content fields provided, will update the fields of the new course
xblock definition to this. Like settings fields,
if provided, this will cause a new version of any given version as well as a new version of the
definition (which will point to the existing one if given a version). If not provided and given
a version_dict, it will reuse the same definition as that version's course
(obvious since it's reusing the
course). If not provided and no version_dict is given, it will be empty and get the field defaults
when
loaded.
master_branch: the tag (key) for the version name in the dict which is the DRAFT version. Not the actual
version guid, but what to call it.
search_targets: a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
versions_dict: the starting version ids where the keys are the tags such as DRAFT and PUBLISHED
and the values are structure guids. If provided, the new course will reuse this version (unless you also
provide any fields overrides, see above). if not provided, will create a mostly empty course
structure with just a category course root xblock.
"""
# either need to assert this or have a default
assert master_branch is not None
# check course and run's uniqueness
locator = CourseLocator(org=org, course=course, run=run, branch=master_branch)
return self._create_courselike(
locator, user_id, master_branch, fields, versions_dict,
search_targets, root_category, root_block_id, **kwargs
)
def _create_courselike(
self, locator, user_id, master_branch, fields=None,
versions_dict=None, search_targets=None, root_category='course',
root_block_id=None, **kwargs
):
"""
Internal code for creating a course or library
"""
index = self.get_course_index(locator, ignore_case=True)
if index is not None:
raise DuplicateCourseError(locator, index)
partitioned_fields = self.partition_fields_by_scope(root_category, fields)
block_fields = partitioned_fields[Scope.settings]
if Scope.children in partitioned_fields:
block_fields.update(partitioned_fields[Scope.children])
definition_fields = self._serialize_fields(root_category, partitioned_fields.get(Scope.content, {}))
# build from inside out: definition, structure, index entry
# if building a wholly new structure
if versions_dict is None or master_branch not in versions_dict:
# create new definition and structure
definition_id = self.create_definition_from_data(locator, definition_fields, root_category, user_id).definition_id
draft_structure = self._new_structure(
user_id,
BlockKey(
root_category,
root_block_id or SplitMongoModuleStore.DEFAULT_ROOT_COURSE_BLOCK_ID,
),
block_fields,
definition_id
)
new_id = draft_structure['_id']
if versions_dict is None:
versions_dict = {master_branch: new_id}
else:
versions_dict[master_branch] = new_id
elif block_fields or definition_fields: # pointing to existing course w/ some overrides
# just get the draft_version structure
draft_version = CourseLocator(version_guid=versions_dict[master_branch])
draft_structure = self._lookup_course(draft_version).structure
draft_structure = self.version_structure(locator, draft_structure, user_id)
new_id = draft_structure['_id']
root_block = draft_structure['blocks'][draft_structure['root']]
if block_fields is not None:
root_block.fields.update(self._serialize_fields(root_category, block_fields))
if definition_fields is not None:
old_def = self.get_definition(locator, root_block.definition)
new_fields = old_def['fields']
new_fields.update(definition_fields)
definition_id = self._update_definition_from_data(locator, old_def, new_fields, user_id).definition_id
root_block.definition = definition_id
root_block.edit_info.edited_on = datetime.datetime.now(UTC)
root_block.edit_info.edited_by = user_id
root_block.edit_info.previous_version = root_block.edit_info.update_version
root_block.edit_info.update_version = new_id
versions_dict[master_branch] = new_id
else: # Pointing to an existing course structure
new_id = versions_dict[master_branch]
draft_version = CourseLocator(version_guid=new_id)
draft_structure = self._lookup_course(draft_version).structure
locator = locator.replace(version_guid=new_id)
with self.bulk_operations(locator):
self.update_structure(locator, draft_structure)
index_entry = {
'_id': ObjectId(),
'org': locator.org,
'course': locator.course,
'run': locator.run,
'edited_by': user_id,
'edited_on': datetime.datetime.now(UTC),
'versions': versions_dict,
'schema_version': self.SCHEMA_VERSION,
'search_targets': search_targets or {},
}
if fields is not None:
self._update_search_targets(index_entry, fields)
self.insert_course_index(locator, index_entry)
# expensive hack to persist default field values set in __init__ method (e.g., wiki_slug)
if isinstance(locator, LibraryLocator):
course = self.get_library(locator, **kwargs)
else:
course = self.get_course(locator, **kwargs)
return self.update_item(course, user_id, **kwargs)
def create_library(self, org, library, user_id, fields, **kwargs):
"""
Create a new library. Arguments are similar to create_course().
"""
kwargs["fields"] = fields
kwargs["master_branch"] = kwargs.get("master_branch", ModuleStoreEnum.BranchName.library)
kwargs["root_category"] = kwargs.get("root_category", "library")
kwargs["root_block_id"] = kwargs.get("root_block_id", "library")
locator = LibraryLocator(org=org, library=library, branch=kwargs["master_branch"])
return self._create_courselike(locator, user_id, **kwargs)
def update_item(self, descriptor, user_id, allow_not_found=False, force=False, **kwargs):
"""
Save the descriptor's fields. it doesn't descend the course dag to save the children.
Return the new descriptor (updated location).
raises ItemNotFoundError if the location does not exist.
Creates a new course version. If the descriptor's location has a org and course and run, it moves the course head
pointer. If the version_guid of the descriptor points to a non-head version and there's been an intervening
change to this item, it raises a VersionConflictError unless force is True. In the force case, it forks
the course but leaves the head pointer where it is (this change will not be in the course head).
The implementation tries to detect which, if any changes, actually need to be saved and thus won't version
the definition, structure, nor course if they didn't change.
"""
partitioned_fields = self.partition_xblock_fields_by_scope(descriptor)
return self._update_item_from_fields(
user_id, descriptor.location.course_key, BlockKey.from_usage_key(descriptor.location),
partitioned_fields, descriptor.definition_locator, allow_not_found, force, **kwargs
) or descriptor
def _update_item_from_fields(self, user_id, course_key, block_key, partitioned_fields, # pylint: disable=too-many-statements
definition_locator, allow_not_found, force, asides=None, **kwargs):
"""
Broke out guts of update_item for short-circuited internal use only
"""
with self.bulk_operations(course_key):
if allow_not_found and isinstance(block_key.id, (LocalId, NoneType)):
fields = {}
for subfields in partitioned_fields.itervalues():
fields.update(subfields)
return self.create_item(
user_id, course_key, block_key.type, fields=fields, asides=asides, force=force
)
original_structure = self._lookup_course(course_key).structure
index_entry = self._get_index_if_valid(course_key, force)
original_entry = self._get_block_from_structure(original_structure, block_key)
if original_entry is None:
if allow_not_found:
fields = {}
for subfields in partitioned_fields.itervalues():
fields.update(subfields)
return self.create_item(user_id, course_key, block_key.type, block_id=block_key.id, fields=fields,
asides=asides, force=force)
else:
raise ItemNotFoundError(course_key.make_usage_key(block_key.type, block_key.id))
is_updated = False
definition_fields = partitioned_fields[Scope.content]
if definition_locator is None:
definition_locator = DefinitionLocator(original_entry.block_type, original_entry.definition)
if definition_fields:
definition_locator, is_updated = self.update_definition_from_data(
course_key, definition_locator, definition_fields, user_id
)
# check metadata
settings = partitioned_fields[Scope.settings]
settings = self._serialize_fields(block_key.type, settings)
if not is_updated:
is_updated = self._compare_settings(settings, original_entry.fields)
# check children
if partitioned_fields.get(Scope.children, {}): # purposely not 'is not None'
serialized_children = [BlockKey.from_usage_key(child) for child in partitioned_fields[Scope.children]['children']]
is_updated = is_updated or original_entry.fields.get('children', []) != serialized_children
if is_updated:
settings['children'] = serialized_children
asides_data_to_update = None
if asides:
asides_data_to_update, asides_updated = self._get_asides_to_update_from_structure(original_structure,
block_key, asides)
else:
asides_updated = False
# if updated, rev the structure
if is_updated or asides_updated:
new_structure = self.version_structure(course_key, original_structure, user_id)
block_data = self._get_block_from_structure(new_structure, block_key)
block_data.definition = definition_locator.definition_id
block_data.fields = settings
if asides_updated:
block_data.asides = asides_data_to_update
new_id = new_structure['_id']
# source_version records which revision a block was copied from. In this method, we're updating
# the block, so it's no longer a direct copy, and we can remove the source_version reference.
block_data.edit_info.source_version = None
self.version_block(block_data, user_id, new_id)
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
self._update_search_targets(index_entry, definition_fields)
self._update_search_targets(index_entry, settings)
if isinstance(course_key, LibraryLocator):
course_key = LibraryLocator(
org=index_entry['org'],
library=index_entry['course'],
branch=course_key.branch,
version_guid=new_id
)
else:
course_key = CourseLocator(
org=index_entry['org'],
course=index_entry['course'],
run=index_entry['run'],
branch=course_key.branch,
version_guid=new_id
)
self._update_head(course_key, index_entry, course_key.branch, new_id)
elif isinstance(course_key, LibraryLocator):
course_key = LibraryLocator(version_guid=new_id)
else:
course_key = CourseLocator(version_guid=new_id)
if isinstance(course_key, LibraryLocator):
self._flag_library_updated_event(course_key)
# fetch and return the new item--fetching is unnecessary but a good qc step
new_locator = course_key.make_usage_key(block_key.type, block_key.id)
return self.get_item(new_locator, **kwargs)
else:
return None
def create_xblock(
self, runtime, course_key, block_type, block_id=None, fields=None,
definition_id=None, parent_xblock=None, **kwargs
):
"""
This method instantiates the correct subclass of XModuleDescriptor based
on the contents of json_data. It does not persist it and can create one which
has no usage id.
parent_xblock is used to compute inherited metadata as well as to append the new xblock.
json_data:
- 'block_type': the xmodule block_type
- 'fields': a dict of locally set fields (not inherited) in json format not pythonic typed format!
- 'definition': the object id of the existing definition
"""
assert runtime is not None
xblock_class = runtime.load_block_type(block_type)
json_data = {
'block_type': block_type,
'fields': {},
}
if definition_id is not None:
json_data['definition'] = definition_id
if parent_xblock is None:
# If no parent, then nothing to inherit.
inherited_settings = {}
else:
inherited_settings = parent_xblock.xblock_kvs.inherited_settings.copy()
if fields is not None:
for field_name in inheritance.InheritanceMixin.fields:
if field_name in fields:
inherited_settings[field_name] = fields[field_name]
new_block = runtime.xblock_from_json(
xblock_class,
course_key,
BlockKey(block_type, block_id) if block_id else None,
BlockData(**json_data),
**kwargs
)
for field_name, value in (fields or {}).iteritems():
setattr(new_block, field_name, value)
if parent_xblock is not None:
parent_xblock.children.append(new_block.scope_ids.usage_id)
# decache pending children field settings
parent_xblock.save()
return new_block
def persist_xblock_dag(self, xblock, user_id, force=False):
"""
create or update the xblock and all of its children. The xblock's location must specify a course.
If it doesn't specify a usage_id, then it's presumed to be new and need creation. This function
descends the children performing the same operation for any that are xblocks. Any children which
are block_ids just update the children pointer.
All updates go into the same course version (bulk updater).
Updates the objects which came in w/ updated location and definition_location info.
returns the post-persisted version of the incoming xblock. Note that its children will be ids not
objects.
:param xblock: the head of the dag
:param user_id: who's doing the change
"""
# find course_index entry if applicable and structures entry
course_key = xblock.location.course_key
with self.bulk_operations(course_key):
index_entry = self._get_index_if_valid(course_key, force)
structure = self._lookup_course(course_key).structure
new_structure = self.version_structure(course_key, structure, user_id)
new_id = new_structure['_id']
is_updated = self._persist_subdag(course_key, xblock, user_id, new_structure['blocks'], new_id)
if is_updated:
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
self._update_head(course_key, index_entry, xblock.location.branch, new_id)
# fetch and return the new item--fetching is unnecessary but a good qc step
return self.get_item(xblock.location.for_version(new_id))
else:
return xblock
def _persist_subdag(self, course_key, xblock, user_id, structure_blocks, new_id):
# persist the definition if persisted != passed
partitioned_fields = self.partition_xblock_fields_by_scope(xblock)
new_def_data = self._serialize_fields(xblock.category, partitioned_fields[Scope.content])
is_updated = False
if xblock.definition_locator is None or isinstance(xblock.definition_locator.definition_id, LocalId):
xblock.definition_locator = self.create_definition_from_data(
course_key, new_def_data, xblock.category, user_id
)
is_updated = True
elif new_def_data:
xblock.definition_locator, is_updated = self.update_definition_from_data(
course_key, xblock.definition_locator, new_def_data, user_id
)
if isinstance(xblock.scope_ids.usage_id.block_id, LocalId):
# generate an id
is_new = True
is_updated = True
block_id = getattr(xblock.scope_ids.usage_id.block_id, 'block_id', None)
if block_id is None:
block_key = self._generate_block_key(structure_blocks, xblock.scope_ids.block_type)
else:
block_key = BlockKey(xblock.scope_ids.block_type, block_id)
new_usage_id = xblock.scope_ids.usage_id.replace(block_id=block_key.id)
xblock.scope_ids = xblock.scope_ids._replace(usage_id=new_usage_id)
else:
is_new = False
block_key = BlockKey(xblock.scope_ids.block_type, xblock.scope_ids.usage_id.block_id)
children = []
if xblock.has_children:
for child in xblock.children:
if isinstance(child.block_id, LocalId):
child_block = xblock.system.get_block(child)
is_updated = self._persist_subdag(course_key, child_block, user_id, structure_blocks, new_id) or is_updated
children.append(BlockKey.from_usage_key(child_block.location))
else:
children.append(BlockKey.from_usage_key(child))
is_updated = is_updated or structure_blocks[block_key].fields['children'] != children
block_fields = partitioned_fields[Scope.settings]
block_fields = self._serialize_fields(xblock.category, block_fields)
if not is_new and not is_updated:
is_updated = self._compare_settings(block_fields, structure_blocks[block_key].fields)
if children:
block_fields['children'] = children
if is_updated:
if is_new:
block_info = self._new_block(
user_id,
xblock.category,
block_fields,
xblock.definition_locator.definition_id,
new_id,
raw=True
)
else:
block_info = structure_blocks[block_key]
block_info.fields = block_fields
block_info.definition = xblock.definition_locator.definition_id
self.version_block(block_info, user_id, new_id)
structure_blocks[block_key] = block_info
return is_updated
def _compare_settings(self, settings, original_fields):
"""
Return True if the settings are not == to the original fields
:param settings:
:param original_fields:
"""
original_keys = original_fields.keys()
if 'children' in original_keys:
original_keys.remove('children')
if len(settings) != len(original_keys):
return True
else:
new_keys = settings.keys()
for key in original_keys:
if key not in new_keys or original_fields[key] != settings[key]:
return True
def copy(self, user_id, source_course, destination_course, subtree_list=None, blacklist=None):
"""
Copies each xblock in subtree_list and those blocks descendants excluding blacklist
from source_course to destination_course.
To delete a block in the destination_course, copy its parent and blacklist the other
sibs to keep them from being copies. You can also just call delete_item on the destination.
Ensures that each subtree occurs in the same place in destination as it does in source. If any
of the source's subtree parents are missing from destination, it raises ItemNotFound([parent_ids]).
To determine the same relative order vis-a-vis published siblings,
publishing may involve changing the order of previously published siblings. For example,
if publishing `[c, d]` and source parent has children `[a, b, c, d, e]` and destination parent
currently has children `[e, b]`, there's no obviously correct resulting order; thus, publish will
reorder destination to `[b, c, d, e]` to make it conform with the source.
:param source_course: a CourseLocator (can be a version or course w/ branch)
:param destination_course: a CourseLocator which must be an existing course but branch doesn't have
to exist yet. (The course must exist b/c Locator doesn't have everything necessary to create it).
Note, if the branch doesn't exist, then the source_course structure's root must be in subtree_list;
otherwise, the publish will violate the parents must exist rule.
:param subtree_list: a list of usage keys whose subtrees to publish.
:param blacklist: a list of usage keys to not change in the destination: i.e., don't add
if not there, don't update if there.
Raises:
ItemNotFoundError: if it cannot find the course. if the request is to publish a
subtree but the ancestors up to and including the course root are not published.
"""
# get the destination's index, and source and destination structures.
with self.bulk_operations(source_course):
source_structure = self._lookup_course(source_course).structure
with self.bulk_operations(destination_course):
index_entry = self.get_course_index(destination_course)
if index_entry is None:
# brand new course
raise ItemNotFoundError(destination_course)
if destination_course.branch not in index_entry['versions']:
# must be copying the dag root if there's no current dag
root_block_key = source_structure['root']
if not any(root_block_key == BlockKey.from_usage_key(subtree) for subtree in subtree_list):
raise ItemNotFoundError(u'Must publish course root {}'.format(root_block_key))
root_source = source_structure['blocks'][root_block_key]
# create branch
destination_structure = self._new_structure(
user_id, root_block_key,
# leave off the fields b/c the children must be filtered
definition_id=root_source.definition,
)
else:
destination_structure = self._lookup_course(destination_course).structure
destination_structure = self.version_structure(destination_course, destination_structure, user_id)
if blacklist != EXCLUDE_ALL:
blacklist = [BlockKey.from_usage_key(shunned) for shunned in blacklist or []]
# iterate over subtree list filtering out blacklist.
orphans = set()
destination_blocks = destination_structure['blocks']
for subtree_root in subtree_list:
if BlockKey.from_usage_key(subtree_root) != source_structure['root']:
# find the parents and put root in the right sequence
parents = self._get_parents_from_structure(BlockKey.from_usage_key(subtree_root), source_structure)
parent_found = False
for parent in parents:
# If a parent isn't found in the destination_blocks, it's possible it was renamed
# in the course export. Continue and only throw an exception if *no* parents are found.
if parent in destination_blocks:
parent_found = True
orphans.update(
self._sync_children(
source_structure['blocks'][parent],
destination_blocks[parent],
BlockKey.from_usage_key(subtree_root)
)
)
if len(parents) and not parent_found:
raise ItemNotFoundError(parents)
# update/create the subtree and its children in destination (skipping blacklist)
orphans.update(
self._copy_subdag(
user_id, destination_structure['_id'],
BlockKey.from_usage_key(subtree_root),
source_structure['blocks'],
destination_blocks,
blacklist
)
)
# remove any remaining orphans
for orphan in orphans:
# orphans will include moved as well as deleted xblocks. Only delete the deleted ones.
self._delete_if_true_orphan(orphan, destination_structure)
# update the db
self.update_structure(destination_course, destination_structure)
self._update_head(destination_course, index_entry, destination_course.branch, destination_structure['_id'])
@contract(source_keys="list(BlockUsageLocator)", dest_usage=BlockUsageLocator)
def copy_from_template(self, source_keys, dest_usage, user_id, head_validation=True):
"""
Flexible mechanism for inheriting content from an external course/library/etc.
Will copy all of the XBlocks whose keys are passed as `source_course` so that they become
children of the XBlock whose key is `dest_usage`. Any previously existing children of
`dest_usage` that haven't been replaced/updated by this copy_from_template operation will
be deleted.
Unlike `copy()`, this does not care whether the resulting blocks are positioned similarly
in their new course/library. However, the resulting blocks will be in the same relative
order as `source_keys`.
If any of the blocks specified already exist as children of the destination block, they
will be updated rather than duplicated or replaced. If they have Scope.settings field values
overriding inherited default values, those overrides will be preserved.
IMPORTANT: This method does not preserve block_id - in other words, every block that is
copied will be assigned a new block_id. This is because we assume that the same source block
may be copied into one course in multiple places. However, it *is* guaranteed that every
time this method is called for the same source block and dest_usage, the same resulting
block id will be generated.
:param source_keys: a list of BlockUsageLocators. Order is preserved.
:param dest_usage: The BlockUsageLocator that will become the parent of an inherited copy
of all the xblocks passed in `source_keys`.
:param user_id: The user who will get credit for making this change.
"""
# Preload the block structures for all source courses/libraries/etc.
# so that we can access descendant information quickly
source_structures = {}
for key in source_keys:
course_key = key.course_key
if course_key.branch is None:
raise ItemNotFoundError("branch is required for all source keys when using copy_from_template")
if course_key not in source_structures:
with self.bulk_operations(course_key):
source_structures[course_key] = self._lookup_course(
course_key, head_validation=head_validation
).structure
destination_course = dest_usage.course_key
with self.bulk_operations(destination_course):
index_entry = self.get_course_index(destination_course)
if index_entry is None:
raise ItemNotFoundError(destination_course)
dest_structure = self._lookup_course(destination_course).structure
old_dest_structure_version = dest_structure['_id']
dest_structure = self.version_structure(destination_course, dest_structure, user_id)
# Set of all descendent block IDs of dest_usage that are to be replaced:
block_key = BlockKey(dest_usage.block_type, dest_usage.block_id)
orig_descendants = set(self.descendants(dest_structure['blocks'], block_key, depth=None, descendent_map={}))
# The descendants() method used above adds the block itself, which we don't consider a descendant.
orig_descendants.remove(block_key)
new_descendants = self._copy_from_template(
source_structures, source_keys, dest_structure, block_key, user_id, head_validation
)
# Update the edit info:
dest_info = dest_structure['blocks'][block_key]
# Update the edit_info:
dest_info.edit_info.previous_version = dest_info.edit_info.update_version
dest_info.edit_info.update_version = old_dest_structure_version
dest_info.edit_info.edited_by = user_id
dest_info.edit_info.edited_on = datetime.datetime.now(UTC)
orphans = orig_descendants - new_descendants
for orphan in orphans:
del dest_structure['blocks'][orphan]
self.update_structure(destination_course, dest_structure)
self._update_head(destination_course, index_entry, destination_course.branch, dest_structure['_id'])
# Return usage locators for all the new children:
return [
destination_course.make_usage_key(*k)
for k in dest_structure['blocks'][block_key].fields['children']
]
def _copy_from_template(
self, source_structures, source_keys, dest_structure, new_parent_block_key, user_id, head_validation
):
"""
Internal recursive implementation of copy_from_template()
Returns the new set of BlockKeys that are the new descendants of the block with key 'block_key'
"""
new_blocks = set()
new_children = list() # ordered list of the new children of new_parent_block_key
for usage_key in source_keys:
src_course_key = usage_key.course_key
hashable_source_id = src_course_key.for_version(None)
block_key = BlockKey(usage_key.block_type, usage_key.block_id)
source_structure = source_structures[src_course_key]
if block_key not in source_structure['blocks']:
raise ItemNotFoundError(usage_key)
source_block_info = source_structure['blocks'][block_key]
# Compute a new block ID. This new block ID must be consistent when this
# method is called with the same (source_key, dest_structure) pair
unique_data = "{}:{}:{}".format(
unicode(hashable_source_id).encode("utf-8"),
block_key.id,
new_parent_block_key.id,
)
new_block_id = hashlib.sha1(unique_data).hexdigest()[:20]
new_block_key = BlockKey(block_key.type, new_block_id)
# Now clone block_key to new_block_key:
new_block_info = copy.deepcopy(source_block_info)
# Note that new_block_info now points to the same definition ID entry as source_block_info did
existing_block_info = dest_structure['blocks'].get(new_block_key, BlockData())
# Inherit the Scope.settings values from 'fields' to 'defaults'
new_block_info.defaults = new_block_info.fields
# <workaround>
# CAPA modules store their 'markdown' value (an alternate representation of their content)
# in Scope.settings rather than Scope.content :-/
# markdown is a field that really should not be overridable - it fundamentally changes the content.
# capa modules also use a custom editor that always saves their markdown field to the metadata,
# even if it hasn't changed, which breaks our override system.
# So until capa modules are fixed, we special-case them and remove their markdown fields,
# forcing the inherited version to use XML only.
if usage_key.block_type == 'problem' and 'markdown' in new_block_info.defaults:
del new_block_info.defaults['markdown']
# </workaround>
# Preserve any existing overrides
new_block_info.fields = existing_block_info.fields
if 'children' in new_block_info.defaults:
del new_block_info.defaults['children'] # Will be set later
new_block_info.edit_info = existing_block_info.edit_info
new_block_info.edit_info.previous_version = new_block_info.edit_info.update_version
new_block_info.edit_info.update_version = dest_structure['_id']
# Note we do not set 'source_version' - it's only used for copying identical blocks
# from draft to published as part of publishing workflow.
# Setting it to the source_block_info structure version here breaks split_draft's has_changes() method.
new_block_info.edit_info.edited_by = user_id
new_block_info.edit_info.edited_on = datetime.datetime.now(UTC)
new_block_info.edit_info.original_usage = unicode(usage_key.replace(branch=None, version_guid=None))
new_block_info.edit_info.original_usage_version = source_block_info.edit_info.update_version
dest_structure['blocks'][new_block_key] = new_block_info
children = source_block_info.fields.get('children')
if children:
children = [src_course_key.make_usage_key(child.type, child.id) for child in children]
new_blocks |= self._copy_from_template(
source_structures, children, dest_structure, new_block_key, user_id, head_validation
)
new_blocks.add(new_block_key)
# And add new_block_key to the list of new_parent_block_key's new children:
new_children.append(new_block_key)
# Update the children of new_parent_block_key
dest_structure['blocks'][new_parent_block_key].fields['children'] = new_children
return new_blocks
def delete_item(self, usage_locator, user_id, force=False):
"""
Delete the block or tree rooted at block (if delete_children) and any references w/in the course to the block
from a new version of the course structure.
returns CourseLocator for new version
raises ItemNotFoundError if the location does not exist.
raises ValueError if usage_locator points to the structure root
Creates a new course version. If the descriptor's location has a org, a course, and a run, it moves the course head
pointer. If the version_guid of the descriptor points to a non-head version and there's been an intervening
change to this item, it raises a VersionConflictError unless force is True. In the force case, it forks
the course but leaves the head pointer where it is (this change will not be in the course head).
"""
if not isinstance(usage_locator, BlockUsageLocator) or usage_locator.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(usage_locator)
with self.bulk_operations(usage_locator.course_key):
original_structure = self._lookup_course(usage_locator.course_key).structure
block_key = BlockKey.from_usage_key(usage_locator)
if original_structure['root'] == block_key:
raise ValueError("Cannot delete the root of a course")
if block_key not in original_structure['blocks']:
raise ValueError("Cannot delete block_key {} from course {}, because that block does not exist.".format(
block_key,
usage_locator,
))
index_entry = self._get_index_if_valid(usage_locator.course_key, force)
new_structure = self.version_structure(usage_locator.course_key, original_structure, user_id)
new_blocks = new_structure['blocks']
new_id = new_structure['_id']
parent_block_keys = self._get_parents_from_structure(block_key, original_structure)
for parent_block_key in parent_block_keys:
parent_block = new_blocks[parent_block_key]
parent_block.fields['children'].remove(block_key)
parent_block.edit_info.edited_on = datetime.datetime.now(UTC)
parent_block.edit_info.edited_by = user_id
parent_block.edit_info.previous_version = parent_block.edit_info.update_version
parent_block.edit_info.update_version = new_id
# remove the source_version reference
parent_block.edit_info.source_version = None
self.decache_block(usage_locator.course_key, new_id, parent_block_key)
self._remove_subtree(BlockKey.from_usage_key(usage_locator), new_blocks)
# update index if appropriate and structures
self.update_structure(usage_locator.course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(usage_locator.course_key, index_entry, usage_locator.branch, new_id)
result = usage_locator.course_key.for_version(new_id)
else:
result = CourseLocator(version_guid=new_id)
if isinstance(usage_locator.course_key, LibraryLocator):
self._flag_library_updated_event(usage_locator.course_key)
self._emit_item_deleted_signal(usage_locator, user_id)
return result
@contract(root_block_key=BlockKey, blocks='dict(BlockKey: BlockData)')
def _remove_subtree(self, root_block_key, blocks):
"""
Remove the subtree rooted at root_block_key
We do this breadth-first to make sure that we don't remove
any children that may have parents that we don't want to delete.
"""
# create mapping from each child's key to its parents' keys
child_parent_map = defaultdict(set)
for block_key, block_data in blocks.iteritems():
for child in block_data.fields.get('children', []):
child_parent_map[BlockKey(*child)].add(block_key)
to_delete = {root_block_key}
tier = {root_block_key}
while tier:
next_tier = set()
for block_key in tier:
for child in blocks[block_key].fields.get('children', []):
child_block_key = BlockKey(*child)
parents = child_parent_map[child_block_key]
# Make sure we want to delete all of the child's parents
# before slating it for deletion
if parents.issubset(to_delete):
next_tier.add(child_block_key)
tier = next_tier
to_delete.update(tier)
for block_key in to_delete:
del blocks[block_key]
def delete_course(self, course_key, user_id):
"""
Remove the given course from the course index.
Only removes the course from the index. The data remains. You can use create_course
with a versions hash to restore the course; however, the edited_on and
edited_by won't reflect the originals, of course.
"""
# this is the only real delete in the system. should it do something else?
log.info(u"deleting course from split-mongo: %s", course_key)
self.delete_course_index(course_key)
# We do NOT call the super class here since we need to keep the assets
# in case the course is later restored.
# super(SplitMongoModuleStore, self).delete_course(course_key, user_id)
self._emit_course_deleted_signal(course_key)
@contract(block_map="dict(BlockKey: dict)", block_key=BlockKey)
def inherit_settings(
self, block_map, block_key, inherited_settings_map, inheriting_settings=None, inherited_from=None
):
"""
Updates block_data with any inheritable setting set by an ancestor and recurses to children.
"""
if block_key not in block_map:
return
block_data = block_map[block_key]
if inheriting_settings is None:
inheriting_settings = {}
if inherited_from is None:
inherited_from = []
# the currently passed down values take precedence over any previously cached ones
# NOTE: this should show the values which all fields would have if inherited: i.e.,
# not set to the locally defined value but to value set by nearest ancestor who sets it
inherited_settings_map.setdefault(block_key, {}).update(inheriting_settings)
# update the inheriting w/ what should pass to children
inheriting_settings = inherited_settings_map[block_key].copy()
block_fields = block_data.fields
for field_name in inheritance.InheritanceMixin.fields:
if field_name in block_fields:
inheriting_settings[field_name] = block_fields[field_name]
for child in block_fields.get('children', []):
try:
if child in inherited_from:
raise Exception(u'Infinite loop detected when inheriting to {}, having already inherited from {}'.format(child, inherited_from))
self.inherit_settings(
block_map,
BlockKey(*child),
inherited_settings_map,
inheriting_settings,
inherited_from + [child]
)
except KeyError:
# here's where we need logic for looking up in other structures when we allow cross pointers
# but it's also getting this during course creation if creating top down w/ children set or
# migration where the old mongo published had pointers to privates
pass
def descendants(self, block_map, block_id, depth, descendent_map):
"""
adds block and its descendants out to depth to descendent_map
Depth specifies the number of levels of descendants to return
(0 => this usage only, 1 => this usage and its children, etc...)
A depth of None returns all descendants
"""
if block_id not in block_map:
return descendent_map
if block_id not in descendent_map:
descendent_map[block_id] = block_map[block_id]
if depth is None or depth > 0:
depth = depth - 1 if depth is not None else None
for child in descendent_map[block_id].fields.get('children', []):
descendent_map = self.descendants(block_map, child, depth, descendent_map)
return descendent_map
def get_modulestore_type(self, course_key=None):
"""
Returns an enumeration-like type reflecting the type of this modulestore, per ModuleStoreEnum.Type.
Args:
course_key: just for signature compatibility
"""
return ModuleStoreEnum.Type.split
def _find_course_assets(self, course_key):
"""
Split specific lookup
"""
try:
course_assets = self._lookup_course(course_key).structure.get('assets', {})
except (InsufficientSpecificationError, VersionConflictError) as err:
log.warning(u'Error finding assets for org "%s" course "%s" on asset '
u'request. Either version of course_key is None or invalid.',
course_key.org, course_key.course)
return {}
return course_assets
def _update_course_assets(self, user_id, asset_key, update_function):
"""
A wrapper for functions wanting to manipulate assets. Gets and versions the structure,
passes the mutable array for either 'assets' or 'thumbnails' as well as the idx to the function for it to
update, then persists the changed data back into the course.
The update function can raise an exception if it doesn't want to actually do the commit. The
surrounding method probably should catch that exception.
"""
with self.bulk_operations(asset_key.course_key):
original_structure = self._lookup_course(asset_key.course_key).structure
index_entry = self._get_index_if_valid(asset_key.course_key)
new_structure = self.version_structure(asset_key.course_key, original_structure, user_id)
course_assets = new_structure.setdefault('assets', {})
asset_type = asset_key.asset_type
all_assets = SortedAssetList(iterable=[])
# Assets should be pre-sorted, so add them efficiently without sorting.
# extend() will raise a ValueError if the passed-in list is not sorted.
all_assets.extend(course_assets.setdefault(asset_type, []))
asset_idx = all_assets.find(asset_key)
all_assets_updated = update_function(all_assets, asset_idx)
new_structure['assets'][asset_type] = all_assets_updated.as_list()
# update index if appropriate and structures
self.update_structure(asset_key.course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(asset_key.course_key, index_entry, asset_key.branch, new_structure['_id'])
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Saves a list of AssetMetadata to the modulestore. The list can be composed of multiple
asset types. This method is optimized for multiple inserts at once - it only re-saves the structure
at the end of all saves/updates.
"""
# Determine course key to use in bulk operation. Use the first asset assuming that
# all assets will be for the same course.
asset_key = asset_metadata_list[0].asset_id
course_key = asset_key.course_key
with self.bulk_operations(course_key):
original_structure = self._lookup_course(course_key).structure
index_entry = self._get_index_if_valid(course_key)
new_structure = self.version_structure(course_key, original_structure, user_id)
course_assets = new_structure.setdefault('assets', {})
assets_by_type = self._save_assets_by_type(
course_key, asset_metadata_list, course_assets, user_id, import_only
)
for asset_type, assets in assets_by_type.iteritems():
new_structure['assets'][asset_type] = assets.as_list()
# update index if appropriate and structures
self.update_structure(course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(course_key, index_entry, asset_key.branch, new_structure['_id'])
def save_asset_metadata(self, asset_metadata, user_id, import_only=False):
"""
Saves or updates a single asset. Simply makes it a list and calls the list save above.
"""
return self.save_asset_metadata_list([asset_metadata, ], user_id, import_only)
@contract(asset_key='AssetKey', attr_dict=dict)
def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id):
"""
Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr_dict (dict): attribute: value pairs to set
Raises:
ItemNotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
def _internal_method(all_assets, asset_idx):
"""
Update the found item
"""
if asset_idx is None:
raise ItemNotFoundError(asset_key)
# Form an AssetMetadata.
mdata = AssetMetadata(asset_key, asset_key.path)
mdata.from_storable(all_assets[asset_idx])
mdata.update(attr_dict)
# Generate a Mongo doc from the metadata and update the course asset info.
all_assets[asset_idx] = mdata.to_storable()
return all_assets
self._update_course_assets(user_id, asset_key, _internal_method)
@contract(asset_key='AssetKey')
def delete_asset_metadata(self, asset_key, user_id):
"""
Internal; deletes a single asset's metadata.
Arguments:
asset_key (AssetKey): key containing original asset filename
Returns:
Number of asset metadata entries deleted (0 or 1)
"""
def _internal_method(all_asset_info, asset_idx):
"""
Remove the item if it was found
"""
if asset_idx is None:
raise ItemNotFoundError(asset_key)
all_asset_info.pop(asset_idx)
return all_asset_info
try:
self._update_course_assets(user_id, asset_key, _internal_method)
return 1
except ItemNotFoundError:
return 0
@contract(source_course_key='CourseKey', dest_course_key='CourseKey')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
"""
source_structure = self._lookup_course(source_course_key).structure
with self.bulk_operations(dest_course_key):
original_structure = self._lookup_course(dest_course_key).structure
index_entry = self._get_index_if_valid(dest_course_key)
new_structure = self.version_structure(dest_course_key, original_structure, user_id)
new_structure['assets'] = source_structure.get('assets', {})
new_structure['thumbnails'] = source_structure.get('thumbnails', [])
# update index if appropriate and structures
self.update_structure(dest_course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(dest_course_key, index_entry, dest_course_key.branch, new_structure['_id'])
def fix_not_found(self, course_locator, user_id):
"""
Only intended for rather low level methods to use. Goes through the children attrs of
each block removing any whose block_id is not a member of the course.
:param course_locator: the course to clean
"""
original_structure = self._lookup_course(course_locator).structure
index_entry = self._get_index_if_valid(course_locator)
new_structure = self.version_structure(course_locator, original_structure, user_id)
for block in new_structure['blocks'].itervalues():
if 'children' in block.fields:
block.fields['children'] = [
block_id for block_id in block.fields['children']
if block_id in new_structure['blocks']
]
self.update_structure(course_locator, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(course_locator, index_entry, course_locator.branch, new_structure['_id'])
def convert_references_to_keys(self, course_key, xblock_class, jsonfields, blocks):
"""
Convert the given serialized fields to the deserialized values by finding all references
and converting them.
:param jsonfields: the serialized copy of the xblock's fields
"""
@contract(block_key="BlockUsageLocator | seq[2]")
def robust_usage_key(block_key):
"""
create a course_key relative usage key for the block_key. If the block_key is in blocks,
use its correct category; otherwise, use 'unknown'.
The purpose for this is that some operations add pointers as they build up the
structure without worrying about order of creation. Because the category of the
usage_key is for the most part inert, it's better to hack a value than to work
out a dependency graph algorithm for those functions which may prereference blocks.
"""
# if this was taken from cache, then its fields are already converted
if isinstance(block_key, BlockUsageLocator):
return block_key.map_into_course(course_key)
elif not isinstance(block_key, BlockKey):
block_key = BlockKey(*block_key)
try:
return course_key.make_usage_key(
block_key.type, block_key.id
)
except KeyError:
return course_key.make_usage_key('unknown', block_key.id)
xblock_class = self.mixologist.mix(xblock_class)
# Make a shallow copy, so that we aren't manipulating a cached field dictionary
output_fields = dict(jsonfields)
for field_name, value in output_fields.iteritems():
if value:
try:
field = xblock_class.fields.get(field_name)
except AttributeError:
continue
if isinstance(field, Reference):
output_fields[field_name] = robust_usage_key(value)
elif isinstance(field, ReferenceList):
output_fields[field_name] = [robust_usage_key(ele) for ele in value]
elif isinstance(field, ReferenceValueDict):
for key, subvalue in value.iteritems():
value[key] = robust_usage_key(subvalue)
return output_fields
def _get_index_if_valid(self, course_key, force=False):
"""
If the course_key identifies a course and points to its draft (or plausibly its draft),
then return the index entry.
raises VersionConflictError if not the right version
:param course_key: a CourseLocator
:param force: if false, raises VersionConflictError if the current head of the course != the one identified
by course_key
"""
if course_key.org is None or course_key.course is None or course_key.run is None or course_key.branch is None:
return None
else:
index_entry = self.get_course_index(course_key)
is_head = (
course_key.version_guid is None or
index_entry['versions'][course_key.branch] == course_key.version_guid
)
if is_head or force:
return index_entry
else:
raise VersionConflictError(
course_key,
index_entry['versions'][course_key.branch]
)
def _find_local_root(self, element_to_find, possibility, tree):
if possibility not in tree:
return False
if element_to_find in tree[possibility]:
return True
for subtree in tree[possibility]:
if self._find_local_root(element_to_find, subtree, tree):
return True
return False
def _update_search_targets(self, index_entry, fields):
"""
Update the index entry if any of the given fields are in SEARCH_TARGET_DICT. (doesn't save
the changes, just changes them in the entry dict)
:param index_entry:
:param fields: a dictionary of fields and values usually only those explicitly set and already
ready for persisting (e.g., references converted to block_ids)
"""
for field_name, field_value in fields.iteritems():
if field_name in self.SEARCH_TARGET_DICT:
index_entry.setdefault('search_targets', {})[field_name] = field_value
def _update_head(self, course_key, index_entry, branch, new_id):
"""
Update the active index for the given course's branch to point to new_id
:param index_entry:
:param course_locator:
:param new_id:
"""
if not isinstance(new_id, ObjectId):
raise TypeError('new_id must be an ObjectId, but is {!r}'.format(new_id))
index_entry['versions'][branch] = new_id
self.update_course_index(course_key, index_entry)
def partition_xblock_fields_by_scope(self, xblock):
"""
Return a dictionary of scopes mapped to this xblock's explicitly set fields w/o any conversions
"""
# explicitly_set_fields_by_scope converts to json; so, avoiding it
# the existing partition_fields_by_scope works on a dict not an xblock
result = defaultdict(dict)
for field in xblock.fields.itervalues():
if field.is_set_on(xblock):
result[field.scope][field.name] = field.read_from(xblock)
return result
def _serialize_fields(self, category, fields):
"""
Convert any references to their serialized form. Handle some references already being unicoded
because the client passed them that way and nothing above this layer did the necessary deserialization.
Remove any fields which split or its kvs computes or adds but does not want persisted.
:param fields: a dict of fields
"""
assert isinstance(fields, dict)
xblock_class = XBlock.load_class(category, self.default_class)
xblock_class = self.mixologist.mix(xblock_class)
def reference_block_id(reference):
"""
Handle client possibly setting field to strings rather than keys to get the block_id
"""
# perhaps replace by fixing the views or Field Reference*.from_json to return a Key
if isinstance(reference, basestring):
reference = BlockUsageLocator.from_string(reference)
elif isinstance(reference, BlockKey):
return reference
return BlockKey.from_usage_key(reference)
for field_name, value in fields.iteritems():
if value is not None:
if isinstance(xblock_class.fields[field_name], Reference):
fields[field_name] = reference_block_id(value)
elif isinstance(xblock_class.fields[field_name], ReferenceList):
fields[field_name] = [
reference_block_id(ele) for ele in value
]
elif isinstance(xblock_class.fields[field_name], ReferenceValueDict):
for key, subvalue in value.iteritems():
value[key] = reference_block_id(subvalue)
# should this recurse down dicts and lists just in case they contain datetime?
elif not isinstance(value, datetime.datetime): # don't convert datetimes!
fields[field_name] = xblock_class.fields[field_name].to_json(value)
return fields
def _new_structure(self, user_id, root_block_key, block_fields=None, definition_id=None):
"""
Internal function: create a structure element with no previous version. Must provide the root id
but not necessarily the info needed to create it (for the use case of publishing). If providing
root_category, must also provide block_fields and definition_id
"""
new_id = ObjectId()
if root_block_key is not None:
if block_fields is None:
block_fields = {}
blocks = {
root_block_key: self._new_block(
user_id, root_block_key.type, block_fields, definition_id, new_id
)
}
else:
blocks = {}
return {
'_id': new_id,
'root': root_block_key,
'previous_version': None,
'original_version': new_id,
'edited_by': user_id,
'edited_on': datetime.datetime.now(UTC),
'blocks': blocks,
'schema_version': self.SCHEMA_VERSION,
}
@contract(block_key=BlockKey)
def _get_parents_from_structure(self, block_key, structure):
"""
Given a structure, find block_key's parent in that structure. Note returns
the encoded format for parent
"""
return [
parent_block_key
for parent_block_key, value in structure['blocks'].iteritems()
if block_key in value.fields.get('children', [])
]
def _sync_children(self, source_parent, destination_parent, new_child):
"""
Reorder destination's children to the same as source's and remove any no longer in source.
Return the removed ones as orphans (a set).
"""
destination_reordered = []
destination_children = set(destination_parent.fields['children'])
source_children = source_parent.fields['children']
orphans = destination_children - set(source_children)
for child in source_children:
if child == new_child or child in destination_children:
destination_reordered.append(child)
destination_parent.fields['children'] = destination_reordered
return orphans
@contract(
block_key=BlockKey,
source_blocks="dict(BlockKey: *)",
destination_blocks="dict(BlockKey: *)",
blacklist="list(BlockKey) | str",
)
def _copy_subdag(self, user_id, destination_version, block_key, source_blocks, destination_blocks, blacklist):
"""
Update destination_blocks for the sub-dag rooted at block_key to be like the one in
source_blocks excluding blacklist.
Return any newly discovered orphans (as a set)
"""
orphans = set()
destination_block = destination_blocks.get(block_key)
new_block = source_blocks[block_key]
if destination_block:
# reorder children to correspond to whatever order holds for source.
# remove any which source no longer claims (put into orphans)
# add any which are being copied
source_children = new_block.fields.get('children', [])
existing_children = destination_block.fields.get('children', [])
destination_reordered = SparseList()
for child in existing_children:
try:
index = source_children.index(child)
destination_reordered[index] = child
except ValueError:
orphans.add(BlockKey(*child))
if blacklist != EXCLUDE_ALL:
for index, child in enumerate(source_children):
if child not in blacklist:
destination_reordered[index] = child
# the history of the published leaps between publications and only points to
# previously published versions.
previous_version = destination_block.edit_info.update_version
destination_block = copy.deepcopy(new_block)
destination_block.fields['children'] = destination_reordered.compact_list()
destination_block.edit_info.previous_version = previous_version
destination_block.edit_info.update_version = destination_version
destination_block.edit_info.edited_by = user_id
destination_block.edit_info.edited_on = datetime.datetime.now(UTC)
else:
destination_block = self._new_block(
user_id, new_block.block_type,
self._filter_blacklist(copy.copy(new_block.fields), blacklist),
new_block.definition,
destination_version,
raw=True,
asides=new_block.asides,
block_defaults=new_block.defaults
)
# Extend the block's new edit_info with any extra edit_info fields from the source (e.g. original_usage):
for key, val in new_block.edit_info.to_storable().iteritems():
if getattr(destination_block.edit_info, key) is None:
setattr(destination_block.edit_info, key, val)
# If the block we are copying from was itself a copy, then just
# reference the original source, rather than the copy.
destination_block.edit_info.source_version = (
new_block.edit_info.source_version or new_block.edit_info.update_version
)
if blacklist != EXCLUDE_ALL:
for child in destination_block.fields.get('children', []):
if child not in blacklist:
orphans.update(
self._copy_subdag(
user_id, destination_version, BlockKey(*child), source_blocks, destination_blocks, blacklist
)
)
destination_blocks[block_key] = destination_block
return orphans
@contract(blacklist='list(BlockKey) | str')
def _filter_blacklist(self, fields, blacklist):
"""
Filter out blacklist from the children field in fields. Will construct a new list for children;
so, no need to worry about copying the children field, but it will modify fiels.
"""
if blacklist == EXCLUDE_ALL:
fields['children'] = []
else:
fields['children'] = [child for child in fields.get('children', []) if BlockKey(*child) not in blacklist]
return fields
@contract(orphan=BlockKey)
def _delete_if_true_orphan(self, orphan, structure):
"""
Delete the orphan and any of its descendants which no longer have parents.
"""
if len(self._get_parents_from_structure(orphan, structure)) == 0:
orphan_data = structure['blocks'].pop(orphan)
for child in orphan_data.fields.get('children', []):
self._delete_if_true_orphan(BlockKey(*child), structure)
@contract(returns=BlockData)
def _new_block(self, user_id, category, block_fields, definition_id, new_id, raw=False,
asides=None, block_defaults=None):
"""
Create the core document structure for a block.
:param block_fields: the settings and children scoped fields as a dict or son
:param definition_id: the pointer to the content scoped fields
:param new_id: the structure's version id
:param raw: true if this block already has all references serialized
:param asides: dict information related to the connected xblock asides
"""
if not raw:
block_fields = self._serialize_fields(category, block_fields)
if not asides:
asides = {}
document = {
'block_type': category,
'definition': definition_id,
'fields': block_fields,
'asides': asides,
'edit_info': {
'edited_on': datetime.datetime.now(UTC),
'edited_by': user_id,
'previous_version': None,
'update_version': new_id
}
}
if block_defaults:
document['defaults'] = block_defaults
return BlockData(**document)
@contract(block_key=BlockKey, returns='BlockData | None')
def _get_block_from_structure(self, structure, block_key):
"""
Encodes the block key before retrieving it from the structure to ensure it can
be a json dict key.
"""
return structure['blocks'].get(block_key)
@contract(block_key=BlockKey)
def _get_asides_to_update_from_structure(self, structure, block_key, asides):
"""
Get list of aside fields that should be updated/inserted
"""
block = self._get_block_from_structure(structure, block_key)
if asides:
updated = False
tmp_new_asides_data = {}
for asd in asides:
aside_type = asd['aside_type']
tmp_new_asides_data[aside_type] = asd
result_list = []
for i, aside in enumerate(block.asides):
if aside['aside_type'] in tmp_new_asides_data:
result_list.append(tmp_new_asides_data.pop(aside['aside_type']))
updated = True
else:
result_list.append(aside)
if tmp_new_asides_data:
for _, asd in tmp_new_asides_data.iteritems():
result_list.append(asd)
updated = True
return result_list, updated
else:
return block.asides, False
@contract(block_key=BlockKey, content=BlockData)
def _update_block_in_structure(self, structure, block_key, content):
"""
Encodes the block key before accessing it in the structure to ensure it can
be a json dict key.
"""
structure['blocks'][block_key] = content
@autoretry_read()
def find_courses_by_search_target(self, field_name, field_value):
"""
Find all the courses which cached that they have the given field with the given value.
Returns: list of branch-agnostic course_keys
"""
entries = self.find_matching_course_indexes(
search_targets={field_name: field_value}
)
return [
CourseLocator(entry['org'], entry['course'], entry['run']) # Branch agnostic
for entry in entries
]
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
return self.find_courses_by_search_target('wiki_slug', wiki_slug)
def heartbeat(self):
"""
Check that the db is reachable.
"""
return {ModuleStoreEnum.Type.split: self.db_connection.heartbeat()}
def create_runtime(self, course_entry, lazy):
"""
Create the proper runtime for this course
"""
services = self.services
services["partitions"] = PartitionService(course_entry.course_key)
return CachingDescriptorSystem(
modulestore=self,
course_entry=course_entry,
module_data={},
lazy=lazy,
default_class=self.default_class,
error_tracker=self.error_tracker,
render_template=self.render_template,
mixins=self.xblock_mixins,
select=self.xblock_select,
disabled_xblock_types=self.disabled_xblock_types,
services=services,
)
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
self.db_connection.ensure_indexes()
class SparseList(list):
"""
Enable inserting items into a list in arbitrary order and then retrieving them.
"""
# taken from http://stackoverflow.com/questions/1857780/sparse-assignment-list-in-python
def __setitem__(self, index, value):
"""
Add value to the list ensuring the list is long enough to accommodate it at the given index
"""
missing = index - len(self) + 1
if missing > 0:
self.extend([None] * missing)
list.__setitem__(self, index, value)
def compact_list(self):
"""
Return as a regular lists w/ all Nones removed
"""
return [ele for ele in self if ele is not None]
|
edx/course-discovery
|
refs/heads/master
|
course_discovery/apps/course_metadata/migrations/0136_drupalpublishuuidconfig.py
|
1
|
# Generated by Django 1.11.15 on 2018-12-05 21:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0135_remove_personwork'),
]
operations = [
migrations.CreateModel(
name='DrupalPublishUuidConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course_run_ids', models.TextField(default=None, verbose_name='Course Run IDs')),
],
options={
'abstract': False,
},
),
]
|
FenceAtMHacks/flaskbackend
|
refs/heads/master
|
fence-api/flask/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.py
|
203
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import logging
__version__ = '0.2.0'
class DistlibException(Exception):
pass
try:
from logging import NullHandler
except ImportError: # pragma: no cover
class NullHandler(logging.Handler):
def handle(self, record): pass
def emit(self, record): pass
def createLock(self): self.lock = None
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
|
QUSpilPrgm/glad
|
refs/heads/master
|
glad/lang/nim/loader/gl.py
|
3
|
from glad.lang.common.loader import BaseLoader
from glad.lang.nim.loader import LOAD_OPENGL_DLL
_OPENGL_LOADER = \
LOAD_OPENGL_DLL % {'pre':'private', 'init':'open_gl',
'proc':'get_proc', 'terminate':'close_gl'} + '''
bool gladLoadGL()
bool status = false
if(open_gl())
status = gladLoadGL(x => get_proc(x))
close_gl()
return status
'''
_OPENGL_HAS_EXT_LT3 = '''proc hasExt(extname: string): bool =
if extname == nil:
return false
var extensions = $cast[cstring](glGetString(GL_EXTENSIONS))
if extensions == nil:
return false
var
loc, terminatorLoc: int
terminator: char
while true:
loc = extensions.find(extname)
if loc < 0:
return false
terminatorLoc = loc + extname.len
terminator = extensions[terminatorLoc]
if (loc == 0 or extensions[loc - 1] == ' ') and
(terminator == ' ' or terminator == '\\0'):
return true
extensions = extensions[terminatorLoc..^1]
'''
_OPENGL_HAS_EXT_GTE3 = '''proc hasExt(extname: string): bool =
if extname == nil:
return false
if glVersionMajor < 3:
var extensions = $cast[cstring](glGetString(GL_EXTENSIONS))
if extensions == nil:
return false
var
loc, terminatorLoc: int
terminator: char
while true:
loc = extensions.find(extname)
if loc < 0:
return false
terminatorLoc = loc + extname.len
terminator = extensions[terminatorLoc]
if (loc == 0 or extensions[loc - 1] == ' ') and
(terminator == ' ' or terminator == '\\0'):
return true
extensions = extensions[terminatorLoc..^1]
else:
var
num: GLint
s: cstring
glGetIntegerv(GL_NUM_EXTENSIONS, num.addr)
for i in 0..num-1:
s = cast[cstring](glGetStringi(GL_EXTENSIONS, GLuint(i)))
if s == extname:
return true
'''
_FIND_VERSION = ''' # Thank you @elmindreda
# https://github.com/elmindreda/greg/blob/master/templates/greg.c.in#L176
# https://github.com/glfw/glfw/blob/master/src/context.c#L36
var prefixes = ["OpenGL ES-CM ", "OpenGL ES-CL ", "OpenGL ES "]
var version = glVersion
for p in prefixes:
if version.startsWith(p):
version = version.replace(p)
break
var major = ord(glVersion[0]) - ord('0')
var minor = ord(glVersion[2]) - ord('0')
glVersionMajor = major
glVersionMinor = minor
'''
_BEGIN_LOAD = ''' glGetString = cast[proc (name: GLenum): ptr GLubyte {.cdecl.}](load("glGetString"))
if glGetString == nil: return false
var glVersion = cast[cstring](glGetString(GL_VERSION))
if glVersion == nil: return false
'''
class OpenGLNimLoader(BaseLoader):
def write_header_end(self, fobj):
pass
def write_header(self, fobj):
pass
def write(self, fobj):
pass
# TODO
# if not self.disabled and 'gl' in self.apis:
# fobj.write(_OPENGL_LOADER)
def write_begin_load(self, fobj):
fobj.write(_BEGIN_LOAD)
def write_end_load(self, fobj):
fobj.write('\n return glVersionMajor != 0 or glVersionMinor != 0\n')
def write_find_core(self, fobj):
fobj.write(_FIND_VERSION)
def write_has_ext(self, fobj, apiversion):
if apiversion.major == 1 and apiversion.minor == 0:
return
if apiversion.major < 3:
fobj.write(_OPENGL_HAS_EXT_LT3)
else:
fobj.write(_OPENGL_HAS_EXT_GTE3)
|
glovebx/odoo
|
refs/heads/8.0
|
addons/website_project/controllers/__init__.py
|
7372
|
import main
|
vitor-alves/pixel-canvas-bot
|
refs/heads/master
|
packages/urllib3/util/__init__.py
|
204
|
from __future__ import absolute_import
# For backwards compatibility, provide imports that used to be here.
from .connection import is_connection_dropped
from .request import make_headers
from .response import is_fp_closed
from .ssl_ import (
SSLContext,
HAS_SNI,
IS_PYOPENSSL,
IS_SECURETRANSPORT,
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
from .timeout import (
current_time,
Timeout,
)
from .retry import Retry
from .url import (
get_host,
parse_url,
split_first,
Url,
)
from .wait import (
wait_for_read,
wait_for_write
)
__all__ = (
'HAS_SNI',
'IS_PYOPENSSL',
'IS_SECURETRANSPORT',
'SSLContext',
'Retry',
'Timeout',
'Url',
'assert_fingerprint',
'current_time',
'is_connection_dropped',
'is_fp_closed',
'get_host',
'parse_url',
'make_headers',
'resolve_cert_reqs',
'resolve_ssl_version',
'split_first',
'ssl_wrap_socket',
'wait_for_read',
'wait_for_write'
)
|
cloudera/hue
|
refs/heads/master
|
desktop/core/ext-py/kazoo-2.8.0/kazoo/recipe/lease.py
|
3
|
"""Zookeeper lease implementations
:Maintainer: Lars Albertsson <lars.albertsson@gmail.com>
:Maintainer: Jyrki Pulliainen <jyrki@spotify.com>
:Status: Beta
"""
import datetime
import json
import socket
from kazoo.exceptions import CancelledError
class NonBlockingLease(object):
"""Exclusive lease that does not block.
An exclusive lease ensures that only one client at a time owns the lease.
The client may renew the lease without losing it by obtaining a new lease
with the same path and same identity. The lease object evaluates to True
if the lease was obtained.
A common use case is a situation where a task should only run on a single
host. In this case, the clients that did not obtain the lease should exit
without performing the protected task.
The lease stores time stamps using client clocks, and will therefore only
work if client clocks are roughly synchronised. It uses UTC, and works
across time zones and daylight savings.
Example usage: with a :class:`~kazoo.client.KazooClient` instance::
zk = KazooClient()
zk.start()
# Hold lease over an hour in order to keep job on same machine,
# with failover if it dies.
lease = zk.NonBlockingLease(
"/db_leases/hourly_cleanup", datetime.timedelta(minutes = 70),
identifier = "DB hourly cleanup on " + socket.gethostname())
if lease:
do_hourly_database_cleanup()
"""
# Bump when storage format changes
_version = 1
_date_format = "%Y-%m-%dT%H:%M:%S"
_byte_encoding = 'utf-8'
def __init__(self, client, path, duration, identifier=None,
utcnow=datetime.datetime.utcnow):
"""Create a non-blocking lease.
:param client: A :class:`~kazoo.client.KazooClient` instance.
:param path: The lease path to use.
:param duration: Duration during which the lease is reserved. A
:class:`~datetime.timedelta` instance.
:param identifier: Unique name to use for this lease holder. Reuse in
order to renew the lease. Defaults to
:meth:`socket.gethostname()`.
:param utcnow: Clock function, by default returning
:meth:`datetime.datetime.utcnow()`. Used for testing.
"""
ident = identifier or socket.gethostname()
self.obtained = False
self._attempt_obtaining(client, path, duration, ident, utcnow)
def _attempt_obtaining(self, client, path, duration, ident, utcnow):
client.ensure_path(path)
holder_path = path + "/lease_holder"
lock = client.Lock(path, ident)
try:
with lock:
now = utcnow()
if client.exists(holder_path):
raw, _ = client.get(holder_path)
data = self._decode(raw)
if data["version"] != self._version:
# We need an upgrade, let someone else take the lease
return
current_end = datetime.datetime.strptime(data['end'],
self._date_format)
if data['holder'] != ident and now < current_end:
# Another client is still holding the lease
return
client.delete(holder_path)
end_lease = (now + duration).strftime(self._date_format)
new_data = {'version': self._version, 'holder': ident,
'end': end_lease}
client.create(holder_path, self._encode(new_data))
self.obtained = True
except CancelledError:
pass
def _encode(self, data_dict):
return json.dumps(data_dict).encode(self._byte_encoding)
def _decode(self, raw):
return json.loads(raw.decode(self._byte_encoding))
# Python 2.x
def __nonzero__(self):
return self.obtained
# Python 3.x
def __bool__(self):
return self.obtained
class MultiNonBlockingLease(object):
"""Exclusive lease for multiple clients.
This type of lease is useful when a limited set of hosts should run a
particular task. It will attempt to obtain leases trying a sequence of
ZooKeeper lease paths.
:param client: A :class:`~kazoo.client.KazooClient` instance.
:param count: Number of host leases allowed.
:param path: ZooKeeper path under which lease files are stored.
:param duration: Duration during which the lease is reserved. A
:class:`~datetime.timedelta` instance.
:param identifier: Unique name to use for this lease holder. Reuse in order
to renew the lease.
Defaults do :meth:`socket.gethostname()`.
:param utcnow: Clock function, by default returning
:meth:`datetime.datetime.utcnow()`. Used for testing.
"""
def __init__(self, client, count, path, duration, identifier=None,
utcnow=datetime.datetime.utcnow):
self.obtained = False
for num in range(count):
ls = NonBlockingLease(client, '%s/%d' % (path, num), duration,
identifier=identifier, utcnow=utcnow)
if ls:
self.obtained = True
break
# Python 2.x
def __nonzero__(self):
return self.obtained
# Python 3.x
def __bool__(self):
return self.obtained
|
FRC-Team-3140/north-american-happiness
|
refs/heads/master
|
lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/big5prober.py
|
206
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
|
salv-orlando/MyRepo
|
refs/heads/bp/xenapi-security-groups
|
nova/tests/api/openstack/test_common.py
|
1
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
from lxml import etree
import webob.exc
import xml.dom.minidom as minidom
from webob import Request
from nova import test
from nova.api.openstack import common
from nova.api.openstack import xmlutil
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
class LimiterTest(test.TestCase):
"""
Unit tests for the `nova.api.openstack.common.limited` method which takes
in a list of items and, depending on the 'offset' and 'limit' GET params,
returns a subset or complete set of the given items.
"""
def setUp(self):
""" Run before each test. """
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
self.medium = range(1000)
self.large = range(10000)
def test_limiter_offset_zero(self):
""" Test offset key works with 0. """
req = Request.blank('/?offset=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
""" Test offset key works with a medium sized number. """
req = Request.blank('/?offset=10')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), self.small[10:])
self.assertEqual(common.limited(self.medium, req), self.medium[10:])
self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
""" Test offset key works with a number over 1000 (max_limit). """
req = Request.blank('/?offset=1001')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), [])
self.assertEqual(common.limited(self.medium, req), [])
self.assertEqual(
common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
""" Test offset key works with a blank offset. """
req = Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
""" Test offset key works with a BAD offset. """
req = Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
""" Test request with no offset or limit """
req = Request.blank('/')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
""" Test limit of zero. """
req = Request.blank('/?limit=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_medium(self):
""" Test limit of 10. """
req = Request.blank('/?limit=10')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium[:10])
self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
""" Test limit of 3000. """
req = Request.blank('/?limit=3000')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
""" Test request with both limit and offset. """
items = range(2000)
req = Request.blank('/?offset=1&limit=3')
self.assertEqual(common.limited(items, req), items[1:4])
req = Request.blank('/?offset=3&limit=0')
self.assertEqual(common.limited(items, req), items[3:1003])
req = Request.blank('/?offset=3&limit=1500')
self.assertEqual(common.limited(items, req), items[3:1003])
req = Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req), [])
def test_limiter_custom_max_limit(self):
""" Test a max_limit other than 1000. """
items = range(2000)
req = Request.blank('/?offset=1&limit=3')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[1:4])
req = Request.blank('/?offset=3&limit=0')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = Request.blank('/?offset=3&limit=2500')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req, max_limit=2000), [])
def test_limiter_negative_limit(self):
""" Test a negative limit. """
req = Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
""" Test a negative offset. """
req = Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
class PaginationParamsTest(test.TestCase):
"""
Unit tests for the `nova.api.openstack.common.get_pagination_params`
method which takes in a request object and returns 'marker' and 'limit'
GET params.
"""
def test_no_params(self):
""" Test no params. """
req = Request.blank('/')
self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
""" Test valid marker param. """
req = Request.blank('/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual(common.get_pagination_params(req),
{'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
def test_valid_limit(self):
""" Test valid limit param. """
req = Request.blank('/?limit=10')
self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
""" Test invalid limit param. """
req = Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
""" Test valid limit and marker parameters. """
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = Request.blank('/?limit=20&marker=%s' % marker)
self.assertEqual(common.get_pagination_params(req),
{'marker': marker, 'limit': 20})
class MiscFunctionsTest(test.TestCase):
def test_remove_version_from_href(self):
fixture = 'http://www.testsite.com/v1.1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_2(self):
fixture = 'http://www.testsite.com/v1.1/'
expected = 'http://www.testsite.com/'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_3(self):
fixture = 'http://www.testsite.com/v10.10'
expected = 'http://www.testsite.com'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_4(self):
fixture = 'http://www.testsite.com/v1.1/images/v10.5'
expected = 'http://www.testsite.com/images/v10.5'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_bad_request(self):
fixture = 'http://www.testsite.com/1.1/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_2(self):
fixture = 'http://www.testsite.com/v/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_3(self):
fixture = 'http://www.testsite.com/v1.1images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_get_id_from_href_with_int_url(self):
fixture = 'http://www.testsite.com/dir/45'
actual = common.get_id_from_href(fixture)
expected = '45'
self.assertEqual(actual, expected)
def test_get_id_from_href_with_int(self):
fixture = '45'
actual = common.get_id_from_href(fixture)
expected = '45'
self.assertEqual(actual, expected)
def test_get_id_from_href_with_int_url_query(self):
fixture = 'http://www.testsite.com/dir/45?asdf=jkl'
actual = common.get_id_from_href(fixture)
expected = '45'
self.assertEqual(actual, expected)
def test_get_id_from_href_with_uuid_url(self):
fixture = 'http://www.testsite.com/dir/abc123'
actual = common.get_id_from_href(fixture)
expected = "abc123"
self.assertEqual(actual, expected)
def test_get_id_from_href_with_uuid_url_query(self):
fixture = 'http://www.testsite.com/dir/abc123?asdf=jkl'
actual = common.get_id_from_href(fixture)
expected = "abc123"
self.assertEqual(actual, expected)
def test_get_id_from_href_with_uuid(self):
fixture = 'abc123'
actual = common.get_id_from_href(fixture)
expected = 'abc123'
self.assertEqual(actual, expected)
def test_get_version_from_href(self):
fixture = 'http://www.testsite.com/v1.1/images'
expected = '1.1'
actual = common.get_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_get_version_from_href_2(self):
fixture = 'http://www.testsite.com/v1.1'
expected = '1.1'
actual = common.get_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_get_version_from_href_default(self):
fixture = 'http://www.testsite.com/images'
expected = '1.0'
actual = common.get_version_from_href(fixture)
self.assertEqual(actual, expected)
class MetadataXMLDeserializationTest(test.TestCase):
deserializer = common.MetadataXMLDeserializer()
def test_create(self):
request_body = """
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key='123'>asdf</meta>
<meta key='567'>jkl;</meta>
</metadata>"""
output = self.deserializer.deserialize(request_body, 'create')
expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
self.assertEquals(output, expected)
def test_create_empty(self):
request_body = """
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
output = self.deserializer.deserialize(request_body, 'create')
expected = {"body": {"metadata": {}}}
self.assertEquals(output, expected)
def test_update_all(self):
request_body = """
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key='123'>asdf</meta>
<meta key='567'>jkl;</meta>
</metadata>"""
output = self.deserializer.deserialize(request_body, 'update_all')
expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
self.assertEquals(output, expected)
def test_update(self):
request_body = """
<meta xmlns="http://docs.openstack.org/compute/api/v1.1"
key='123'>asdf</meta>"""
output = self.deserializer.deserialize(request_body, 'update')
expected = {"body": {"meta": {"123": "asdf"}}}
self.assertEquals(output, expected)
class MetadataXMLSerializationTest(test.TestCase):
def test_xml_declaration(self):
serializer = common.MetadataXMLSerializer()
fixture = {
'metadata': {
'one': 'two',
'three': 'four',
},
}
output = serializer.serialize(fixture, 'index')
print output
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
def test_index(self):
serializer = common.MetadataXMLSerializer()
fixture = {
'metadata': {
'one': 'two',
'three': 'four',
},
}
output = serializer.serialize(fixture, 'index')
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 2)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
def test_index_null(self):
serializer = common.MetadataXMLSerializer()
fixture = {
'metadata': {
None: None,
},
}
output = serializer.serialize(fixture, 'index')
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
def test_index_unicode(self):
serializer = common.MetadataXMLSerializer()
fixture = {
'metadata': {
u'three': u'Jos\xe9',
},
}
output = serializer.serialize(fixture, 'index')
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(metadata_elem.text.strip(), meta_value)
def test_show(self):
serializer = common.MetadataXMLSerializer()
fixture = {
'meta': {
'one': 'two',
},
}
output = serializer.serialize(fixture, 'show')
print output
root = etree.XML(output)
meta_dict = fixture['meta']
(meta_key, meta_value) = meta_dict.items()[0]
self.assertEqual(str(root.get('key')), str(meta_key))
self.assertEqual(root.text.strip(), meta_value)
def test_update_all(self):
serializer = common.MetadataXMLSerializer()
fixture = {
'metadata': {
'key6': 'value6',
'key4': 'value4',
},
}
output = serializer.serialize(fixture, 'update_all')
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 2)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
def test_update_item(self):
serializer = common.MetadataXMLSerializer()
fixture = {
'meta': {
'one': 'two',
},
}
output = serializer.serialize(fixture, 'update')
print output
root = etree.XML(output)
meta_dict = fixture['meta']
(meta_key, meta_value) = meta_dict.items()[0]
self.assertEqual(str(root.get('key')), str(meta_key))
self.assertEqual(root.text.strip(), meta_value)
def test_create(self):
serializer = common.MetadataXMLSerializer()
fixture = {
'metadata': {
'key9': 'value9',
'key2': 'value2',
'key1': 'value1',
},
}
output = serializer.serialize(fixture, 'create')
print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 3)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="key2">value2</meta>
<meta key="key9">value9</meta>
<meta key="key1">value1</meta>
</metadata>
""".replace(" ", "").replace("\n", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_delete(self):
serializer = common.MetadataXMLSerializer()
output = serializer.serialize(None, 'delete')
self.assertEqual(output, '')
|
sestrella/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/google/gcp_kms_crypto_key.py
|
13
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_kms_crypto_key
description:
- A `CryptoKey` represents a logical key that can be used for cryptographic operations.
short_description: Creates a GCP CryptoKey
version_added: '2.9'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
name:
description:
- The resource name for the CryptoKey.
required: true
type: str
labels:
description:
- Labels with user-defined metadata to apply to this resource.
required: false
type: dict
purpose:
description:
- Immutable purpose of CryptoKey. See U(https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose)
for inputs.
- 'Some valid choices include: "ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT"'
required: false
default: ENCRYPT_DECRYPT
type: str
rotation_period:
description:
- Every time this period passes, generate a new CryptoKeyVersion and set it as
the primary.
- The first rotation will take place after the specified period. The rotation
period has the format of a decimal number with up to 9 fractional digits, followed
by the letter `s` (seconds). It must be greater than a day (ie, 86400).
required: false
type: str
version_template:
description:
- A template describing settings for new crypto key versions.
required: false
type: dict
suboptions:
algorithm:
description:
- The algorithm to use when creating a version based on this template.
- See the [algorithm reference](U(https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm))
for possible inputs.
required: true
type: str
protection_level:
description:
- The protection level to use when creating a version based on this template.
- 'Some valid choices include: "SOFTWARE", "HSM"'
required: false
type: str
key_ring:
description:
- The KeyRing that this key belongs to.
- 'Format: `''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}''`.'
required: true
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys)'
- 'Creating a key: U(https://cloud.google.com/kms/docs/creating-keys#create_a_key)'
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a key ring
gcp_kms_key_ring:
name: key-key-ring
location: us-central1
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: keyring
- name: create a crypto key
gcp_kms_crypto_key:
name: test_object
key_ring: projects/{{ gcp_project }}/locations/us-central1/keyRings/key-key-ring
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- The resource name for the CryptoKey.
returned: success
type: str
creationTime:
description:
- The time that this resource was created on the server.
- This is in RFC3339 text format.
returned: success
type: str
labels:
description:
- Labels with user-defined metadata to apply to this resource.
returned: success
type: dict
purpose:
description:
- Immutable purpose of CryptoKey. See U(https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose)
for inputs.
returned: success
type: str
rotationPeriod:
description:
- Every time this period passes, generate a new CryptoKeyVersion and set it as the
primary.
- The first rotation will take place after the specified period. The rotation period
has the format of a decimal number with up to 9 fractional digits, followed by
the letter `s` (seconds). It must be greater than a day (ie, 86400).
returned: success
type: str
versionTemplate:
description:
- A template describing settings for new crypto key versions.
returned: success
type: complex
contains:
algorithm:
description:
- The algorithm to use when creating a version based on this template.
- See the [algorithm reference](U(https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm))
for possible inputs.
returned: success
type: str
protectionLevel:
description:
- The protection level to use when creating a version based on this template.
returned: success
type: str
keyRing:
description:
- The KeyRing that this key belongs to.
- 'Format: `''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}''`.'
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
labels=dict(type='dict'),
purpose=dict(default='ENCRYPT_DECRYPT', type='str'),
rotation_period=dict(type='str'),
version_template=dict(type='dict', options=dict(algorithm=dict(required=True, type='str'), protection_level=dict(type='str'))),
key_ring=dict(required=True, type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloudkms']
state = module.params['state']
fetch = fetch_resource(module, self_link(module))
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), fetch)
fetch = fetch_resource(module, self_link(module))
changed = True
else:
delete(module, self_link(module))
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, create_link(module))
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link):
auth = GcpSession(module, 'kms')
return return_if_object(module, auth.post(link, resource_to_request(module)))
def update(module, link, fetch):
auth = GcpSession(module, 'kms')
params = {'updateMask': updateMask(resource_to_request(module), response_to_hash(module, fetch))}
request = resource_to_request(module)
return return_if_object(module, auth.patch(link, request, params=params))
def updateMask(request, response):
update_mask = []
if request.get('labels') != response.get('labels'):
update_mask.append('labels')
if request.get('rotationPeriod') != response.get('rotationPeriod'):
update_mask.append('rotationPeriod')
if request.get('versionTemplate') != response.get('versionTemplate'):
update_mask.append('versionTemplate')
return ','.join(update_mask)
def delete(module, link):
module.fail_json(msg="KeyRings cannot be deleted")
def resource_to_request(module):
request = {
u'labels': module.params.get('labels'),
u'purpose': module.params.get('purpose'),
u'rotationPeriod': module.params.get('rotation_period'),
u'versionTemplate': CryptoKeyVersiontemplate(module.params.get('version_template', {}), module).to_request(),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, allow_not_found=True):
auth = GcpSession(module, 'kms')
return return_if_object(module, auth.get(link), allow_not_found)
def self_link(module):
return "https://cloudkms.googleapis.com/v1/{key_ring}/cryptoKeys/{name}".format(**module.params)
def collection(module):
return "https://cloudkms.googleapis.com/v1/{key_ring}/cryptoKeys".format(**module.params)
def create_link(module):
return "https://cloudkms.googleapis.com/v1/{key_ring}/cryptoKeys?cryptoKeyId={name}".format(**module.params)
def return_if_object(module, response, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
result = decode_response(result, module)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
request = decode_response(request, module)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'name': module.params.get('name'),
u'creationTime': response.get(u'creationTime'),
u'labels': response.get(u'labels'),
u'purpose': module.params.get('purpose'),
u'rotationPeriod': response.get(u'rotationPeriod'),
u'versionTemplate': CryptoKeyVersiontemplate(response.get(u'versionTemplate', {}), module).from_response(),
}
def decode_response(response, module):
if 'name' in response:
response['name'] = response['name'].split('/')[-1]
return response
class CryptoKeyVersiontemplate(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'algorithm': self.request.get('algorithm'), u'protectionLevel': self.request.get('protection_level')})
def from_response(self):
return remove_nones_from_dict({u'algorithm': self.request.get(u'algorithm'), u'protectionLevel': self.module.params.get('protection_level')})
if __name__ == '__main__':
main()
|
vilorious/pyload
|
refs/heads/stable
|
module/plugins/hoster/ShragleCom.py
|
15
|
# -*- coding: utf-8 -*-
from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo
class ShragleCom(DeadHoster):
__name__ = "ShragleCom"
__type__ = "hoster"
__version__ = "0.23"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?(cloudnator|shragle)\.com/files/(?P<ID>.+?)/'
__config__ = [] #@TODO: Remove in 0.4.10
__description__ = """Cloudnator.com (Shragle.com) hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("RaNaN", "RaNaN@pyload.org"),
("zoidberg", "zoidberg@mujmail.cz")]
getInfo = create_getInfo(ShragleCom)
|
SnabbCo/neutron
|
refs/heads/master
|
neutron/plugins/vmware/vshield/edge_appliance_driver.py
|
3
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 VMware, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Kaiwei Fan, VMware, Inc.
# @author: Bo Link, VMware, Inc.
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.vshield.common import (
constants as vcns_const)
from neutron.plugins.vmware.vshield.common import constants as common_constants
from neutron.plugins.vmware.vshield.common import exceptions
from neutron.plugins.vmware.vshield.tasks import constants
from neutron.plugins.vmware.vshield.tasks import tasks
LOG = logging.getLogger(__name__)
class EdgeApplianceDriver(object):
def __init__(self):
# store the last task per edge that has the latest config
self.updated_task = {
'nat': {},
'route': {},
}
def _assemble_edge(self, name, appliance_size="compact",
deployment_container_id=None, datacenter_moid=None,
enable_aesni=True, hypervisor_assist=False,
enable_fips=False, remote_access=False):
edge = {
'name': name,
'fqdn': name,
'hypervisorAssist': hypervisor_assist,
'type': 'gatewayServices',
'enableAesni': enable_aesni,
'enableFips': enable_fips,
'cliSettings': {
'remoteAccess': remote_access
},
'appliances': {
'applianceSize': appliance_size
},
'vnics': {
'vnics': []
}
}
if deployment_container_id:
edge['appliances']['deploymentContainerId'] = (
deployment_container_id)
if datacenter_moid:
edge['datacenterMoid'] = datacenter_moid,
return edge
def _assemble_edge_appliance(self, resource_pool_id, datastore_id):
appliance = {}
if resource_pool_id:
appliance['resourcePoolId'] = resource_pool_id
if datastore_id:
appliance['datastoreId'] = datastore_id
return appliance
def _assemble_edge_vnic(self, name, index, portgroup_id,
primary_address=None, subnet_mask=None,
secondary=None,
type="internal",
enable_proxy_arp=False,
enable_send_redirects=True,
is_connected=True,
mtu=1500):
vnic = {
'index': index,
'name': name,
'type': type,
'portgroupId': portgroup_id,
'mtu': mtu,
'enableProxyArp': enable_proxy_arp,
'enableSendRedirects': enable_send_redirects,
'isConnected': is_connected
}
if primary_address and subnet_mask:
address_group = {
'primaryAddress': primary_address,
'subnetMask': subnet_mask
}
if secondary:
address_group['secondaryAddresses'] = {
'ipAddress': secondary,
'type': 'IpAddressesDto'
}
vnic['addressGroups'] = {
'addressGroups': [address_group]
}
return vnic
def _edge_status_to_level(self, status):
if status == 'GREEN':
status_level = common_constants.RouterStatus.ROUTER_STATUS_ACTIVE
elif status in ('GREY', 'YELLOW'):
status_level = common_constants.RouterStatus.ROUTER_STATUS_DOWN
else:
status_level = common_constants.RouterStatus.ROUTER_STATUS_ERROR
return status_level
def _enable_loadbalancer(self, edge):
if not edge.get('featureConfigs') or (
not edge['featureConfigs'].get('features')):
edge['featureConfigs'] = {'features': []}
edge['featureConfigs']['features'].append(
{'featureType': 'loadbalancer_4.0',
'enabled': True})
def get_edge_status(self, edge_id):
try:
response = self.vcns.get_edge_status(edge_id)[1]
status_level = self._edge_status_to_level(
response['edgeStatus'])
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to get edge status:\n%s"),
e.response)
status_level = common_constants.RouterStatus.ROUTER_STATUS_ERROR
try:
desc = jsonutils.loads(e.response)
if desc.get('errorCode') == (
vcns_const.VCNS_ERROR_CODE_EDGE_NOT_RUNNING):
status_level = (
common_constants.RouterStatus.ROUTER_STATUS_DOWN)
except ValueError:
LOG.exception(e.response)
return status_level
def get_edges_statuses(self):
edges_status_level = {}
edges = self._get_edges()
for edge in edges['edgePage'].get('data', []):
edge_id = edge['id']
status = edge['edgeStatus']
edges_status_level[edge_id] = self._edge_status_to_level(status)
return edges_status_level
def _update_interface(self, task):
edge_id = task.userdata['edge_id']
config = task.userdata['config']
LOG.debug(_("VCNS: start updating vnic %s"), config)
try:
self.vcns.update_interface(edge_id, config)
except exceptions.VcnsApiException as e:
with excutils.save_and_reraise_exception():
LOG.exception(_("VCNS: Failed to update vnic %(config)s:\n"
"%(response)s"), {
'config': config,
'response': e.response})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("VCNS: Failed to update vnic %d"),
config['index'])
return constants.TaskStatus.COMPLETED
def update_interface(self, router_id, edge_id, index, network,
address=None, netmask=None, secondary=None,
jobdata=None):
LOG.debug(_("VCNS: update vnic %(index)d: %(addr)s %(netmask)s"), {
'index': index, 'addr': address, 'netmask': netmask})
if index == vcns_const.EXTERNAL_VNIC_INDEX:
name = vcns_const.EXTERNAL_VNIC_NAME
intf_type = 'uplink'
elif index == vcns_const.INTERNAL_VNIC_INDEX:
name = vcns_const.INTERNAL_VNIC_NAME
intf_type = 'internal'
else:
msg = _("Vnic %d currently not supported") % index
raise exceptions.VcnsGeneralException(msg)
config = self._assemble_edge_vnic(
name, index, network, address, netmask, secondary, type=intf_type)
userdata = {
'edge_id': edge_id,
'config': config,
'jobdata': jobdata
}
task_name = "update-interface-%s-%d" % (edge_id, index)
task = tasks.Task(task_name, router_id,
self._update_interface, userdata=userdata)
task.add_result_monitor(self.callbacks.interface_update_result)
self.task_manager.add(task)
return task
def _deploy_edge(self, task):
userdata = task.userdata
name = userdata['router_name']
LOG.debug(_("VCNS: start deploying edge %s"), name)
request = userdata['request']
try:
header = self.vcns.deploy_edge(request)[0]
objuri = header['location']
job_id = objuri[objuri.rfind("/") + 1:]
response = self.vcns.get_edge_id(job_id)[1]
edge_id = response['edgeId']
LOG.debug(_("VCNS: deploying edge %s"), edge_id)
userdata['edge_id'] = edge_id
status = constants.TaskStatus.PENDING
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("VCNS: deploy edge failed for router %s."),
name)
return status
def _status_edge(self, task):
edge_id = task.userdata['edge_id']
try:
response = self.vcns.get_edge_deploy_status(edge_id)[1]
task.userdata['retries'] = 0
system_status = response.get('systemStatus', None)
if system_status is None:
status = constants.TaskStatus.PENDING
elif system_status == 'good':
status = constants.TaskStatus.COMPLETED
else:
status = constants.TaskStatus.ERROR
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("VCNS: Edge %s status query failed."), edge_id)
except Exception:
retries = task.userdata.get('retries', 0) + 1
if retries < 3:
task.userdata['retries'] = retries
msg = _("VCNS: Unable to retrieve edge %(edge_id)s status. "
"Retry %(retries)d.") % {
'edge_id': edge_id,
'retries': retries}
LOG.exception(msg)
status = constants.TaskStatus.PENDING
else:
msg = _("VCNS: Unable to retrieve edge %s status. "
"Abort.") % edge_id
LOG.exception(msg)
status = constants.TaskStatus.ERROR
LOG.debug(_("VCNS: Edge %s status"), edge_id)
return status
def _result_edge(self, task):
router_name = task.userdata['router_name']
edge_id = task.userdata.get('edge_id')
if task.status != constants.TaskStatus.COMPLETED:
LOG.error(_("VCNS: Failed to deploy edge %(edge_id)s "
"for %(name)s, status %(status)d"), {
'edge_id': edge_id,
'name': router_name,
'status': task.status
})
else:
LOG.debug(_("VCNS: Edge %(edge_id)s deployed for "
"router %(name)s"), {
'edge_id': edge_id, 'name': router_name
})
def _delete_edge(self, task):
edge_id = task.userdata['edge_id']
LOG.debug(_("VCNS: start destroying edge %s"), edge_id)
status = constants.TaskStatus.COMPLETED
if edge_id:
try:
self.vcns.delete_edge(edge_id)
except exceptions.ResourceNotFound:
pass
except exceptions.VcnsApiException as e:
msg = _("VCNS: Failed to delete %(edge_id)s:\n"
"%(response)s") % {
'edge_id': edge_id, 'response': e.response}
LOG.exception(msg)
status = constants.TaskStatus.ERROR
except Exception:
LOG.exception(_("VCNS: Failed to delete %s"), edge_id)
status = constants.TaskStatus.ERROR
return status
def _get_edges(self):
try:
return self.vcns.get_edges()[1]
except exceptions.VcnsApiException as e:
with excutils.save_and_reraise_exception():
LOG.exception(_("VCNS: Failed to get edges:\n%s"), e.response)
def deploy_edge(self, router_id, name, internal_network, jobdata=None,
wait_for_exec=False, loadbalancer_enable=True):
task_name = 'deploying-%s' % name
edge_name = name
edge = self._assemble_edge(
edge_name, datacenter_moid=self.datacenter_moid,
deployment_container_id=self.deployment_container_id,
appliance_size='large', remote_access=True)
appliance = self._assemble_edge_appliance(self.resource_pool_id,
self.datastore_id)
if appliance:
edge['appliances']['appliances'] = [appliance]
vnic_external = self._assemble_edge_vnic(
vcns_const.EXTERNAL_VNIC_NAME, vcns_const.EXTERNAL_VNIC_INDEX,
self.external_network, type="uplink")
edge['vnics']['vnics'].append(vnic_external)
vnic_inside = self._assemble_edge_vnic(
vcns_const.INTERNAL_VNIC_NAME, vcns_const.INTERNAL_VNIC_INDEX,
internal_network,
vcns_const.INTEGRATION_EDGE_IPADDRESS,
vcns_const.INTEGRATION_SUBNET_NETMASK,
type="internal")
edge['vnics']['vnics'].append(vnic_inside)
if loadbalancer_enable:
self._enable_loadbalancer(edge)
userdata = {
'request': edge,
'router_name': name,
'jobdata': jobdata
}
task = tasks.Task(task_name, router_id,
self._deploy_edge,
status_callback=self._status_edge,
result_callback=self._result_edge,
userdata=userdata)
task.add_executed_monitor(self.callbacks.edge_deploy_started)
task.add_result_monitor(self.callbacks.edge_deploy_result)
self.task_manager.add(task)
if wait_for_exec:
# wait until the deploy task is executed so edge_id is available
task.wait(constants.TaskState.EXECUTED)
return task
def delete_edge(self, router_id, edge_id, jobdata=None):
task_name = 'delete-%s' % edge_id
userdata = {
'router_id': router_id,
'edge_id': edge_id,
'jobdata': jobdata
}
task = tasks.Task(task_name, router_id, self._delete_edge,
userdata=userdata)
task.add_result_monitor(self.callbacks.edge_delete_result)
self.task_manager.add(task)
return task
def _assemble_nat_rule(self, action, original_address,
translated_address,
vnic_index=vcns_const.EXTERNAL_VNIC_INDEX,
enabled=True):
nat_rule = {}
nat_rule['action'] = action
nat_rule['vnic'] = vnic_index
nat_rule['originalAddress'] = original_address
nat_rule['translatedAddress'] = translated_address
nat_rule['enabled'] = enabled
return nat_rule
def get_nat_config(self, edge_id):
try:
return self.vcns.get_nat_config(edge_id)[1]
except exceptions.VcnsApiException as e:
with excutils.save_and_reraise_exception():
LOG.exception(_("VCNS: Failed to get nat config:\n%s"),
e.response)
def _create_nat_rule(self, task):
# TODO(fank): use POST for optimization
# return rule_id for future reference
rule = task.userdata['rule']
LOG.debug(_("VCNS: start creating nat rules: %s"), rule)
edge_id = task.userdata['edge_id']
nat = self.get_nat_config(edge_id)
location = task.userdata['location']
del nat['version']
if location is None or location == vcns_const.APPEND:
nat['rules']['natRulesDtos'].append(rule)
else:
nat['rules']['natRulesDtos'].insert(location, rule)
try:
self.vcns.update_nat_config(edge_id, nat)
status = constants.TaskStatus.COMPLETED
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to create snat rule:\n%s"),
e.response)
status = constants.TaskStatus.ERROR
return status
def create_snat_rule(self, router_id, edge_id, src, translated,
jobdata=None, location=None):
LOG.debug(_("VCNS: create snat rule %(src)s/%(translated)s"), {
'src': src, 'translated': translated})
snat_rule = self._assemble_nat_rule("snat", src, translated)
userdata = {
'router_id': router_id,
'edge_id': edge_id,
'rule': snat_rule,
'location': location,
'jobdata': jobdata
}
task_name = "create-snat-%s-%s-%s" % (edge_id, src, translated)
task = tasks.Task(task_name, router_id, self._create_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks.snat_create_result)
self.task_manager.add(task)
return task
def _delete_nat_rule(self, task):
# TODO(fank): pass in rule_id for optimization
# handle routes update for optimization
edge_id = task.userdata['edge_id']
address = task.userdata['address']
addrtype = task.userdata['addrtype']
LOG.debug(_("VCNS: start deleting %(type)s rules: %(addr)s"), {
'type': addrtype, 'addr': address})
nat = self.get_nat_config(edge_id)
del nat['version']
status = constants.TaskStatus.COMPLETED
for nat_rule in nat['rules']['natRulesDtos']:
if nat_rule[addrtype] == address:
rule_id = nat_rule['ruleId']
try:
self.vcns.delete_nat_rule(edge_id, rule_id)
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to delete snat rule:\n"
"%s"), e.response)
status = constants.TaskStatus.ERROR
return status
def delete_snat_rule(self, router_id, edge_id, src, jobdata=None):
LOG.debug(_("VCNS: delete snat rule %s"), src)
userdata = {
'edge_id': edge_id,
'address': src,
'addrtype': 'originalAddress',
'jobdata': jobdata
}
task_name = "delete-snat-%s-%s" % (edge_id, src)
task = tasks.Task(task_name, router_id, self._delete_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks.snat_delete_result)
self.task_manager.add(task)
return task
def create_dnat_rule(self, router_id, edge_id, dst, translated,
jobdata=None, location=None):
# TODO(fank): use POST for optimization
# return rule_id for future reference
LOG.debug(_("VCNS: create dnat rule %(dst)s/%(translated)s"), {
'dst': dst, 'translated': translated})
dnat_rule = self._assemble_nat_rule(
"dnat", dst, translated)
userdata = {
'router_id': router_id,
'edge_id': edge_id,
'rule': dnat_rule,
'location': location,
'jobdata': jobdata
}
task_name = "create-dnat-%s-%s-%s" % (edge_id, dst, translated)
task = tasks.Task(task_name, router_id, self._create_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks.dnat_create_result)
self.task_manager.add(task)
return task
def delete_dnat_rule(self, router_id, edge_id, translated,
jobdata=None):
# TODO(fank): pass in rule_id for optimization
LOG.debug(_("VCNS: delete dnat rule %s"), translated)
userdata = {
'edge_id': edge_id,
'address': translated,
'addrtype': 'translatedAddress',
'jobdata': jobdata
}
task_name = "delete-dnat-%s-%s" % (edge_id, translated)
task = tasks.Task(task_name, router_id, self._delete_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks.dnat_delete_result)
self.task_manager.add(task)
return task
def _update_nat_rule(self, task):
# TODO(fank): use POST for optimization
# return rule_id for future reference
edge_id = task.userdata['edge_id']
if task != self.updated_task['nat'][edge_id]:
# this task does not have the latest config, abort now
# for speedup
return constants.TaskStatus.ABORT
rules = task.userdata['rules']
LOG.debug(_("VCNS: start updating nat rules: %s"), rules)
nat = {
'featureType': 'nat',
'rules': {
'natRulesDtos': rules
}
}
try:
self.vcns.update_nat_config(edge_id, nat)
status = constants.TaskStatus.COMPLETED
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to create snat rule:\n%s"),
e.response)
status = constants.TaskStatus.ERROR
return status
def update_nat_rules(self, router_id, edge_id, snats, dnats,
jobdata=None):
LOG.debug(_("VCNS: update nat rule\n"
"SNAT:%(snat)s\n"
"DNAT:%(dnat)s\n"), {
'snat': snats, 'dnat': dnats})
nat_rules = []
for dnat in dnats:
nat_rules.append(self._assemble_nat_rule(
'dnat', dnat['dst'], dnat['translated']))
nat_rules.append(self._assemble_nat_rule(
'snat', dnat['translated'], dnat['dst']))
for snat in snats:
nat_rules.append(self._assemble_nat_rule(
'snat', snat['src'], snat['translated']))
userdata = {
'edge_id': edge_id,
'rules': nat_rules,
'jobdata': jobdata,
}
task_name = "update-nat-%s" % edge_id
task = tasks.Task(task_name, router_id, self._update_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks.nat_update_result)
self.updated_task['nat'][edge_id] = task
self.task_manager.add(task)
return task
def _update_routes(self, task):
edge_id = task.userdata['edge_id']
if (task != self.updated_task['route'][edge_id] and
task.userdata.get('skippable', True)):
# this task does not have the latest config, abort now
# for speedup
return constants.TaskStatus.ABORT
gateway = task.userdata['gateway']
routes = task.userdata['routes']
LOG.debug(_("VCNS: start updating routes for %s"), edge_id)
static_routes = []
for route in routes:
static_routes.append({
"description": "",
"vnic": vcns_const.INTERNAL_VNIC_INDEX,
"network": route['cidr'],
"nextHop": route['nexthop']
})
request = {
"staticRoutes": {
"staticRoutes": static_routes
}
}
if gateway:
request["defaultRoute"] = {
"description": "default-gateway",
"gatewayAddress": gateway,
"vnic": vcns_const.EXTERNAL_VNIC_INDEX
}
try:
self.vcns.update_routes(edge_id, request)
status = constants.TaskStatus.COMPLETED
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to update routes:\n%s"),
e.response)
status = constants.TaskStatus.ERROR
return status
def update_routes(self, router_id, edge_id, gateway, routes,
skippable=True, jobdata=None):
if gateway:
gateway = gateway.split('/')[0]
userdata = {
'edge_id': edge_id,
'gateway': gateway,
'routes': routes,
'skippable': skippable,
'jobdata': jobdata
}
task_name = "update-routes-%s" % (edge_id)
task = tasks.Task(task_name, router_id, self._update_routes,
userdata=userdata)
task.add_result_monitor(self.callbacks.routes_update_result)
self.updated_task['route'][edge_id] = task
self.task_manager.add(task)
return task
def create_lswitch(self, name, tz_config, tags=None,
port_isolation=False, replication_mode="service"):
lsconfig = {
'display_name': utils.check_and_truncate(name),
"tags": tags or [],
"type": "LogicalSwitchConfig",
"_schema": "/ws.v1/schema/LogicalSwitchConfig",
"transport_zones": tz_config
}
if port_isolation is bool:
lsconfig["port_isolation_enabled"] = port_isolation
if replication_mode:
lsconfig["replication_mode"] = replication_mode
response = self.vcns.create_lswitch(lsconfig)[1]
return response
def delete_lswitch(self, lswitch_id):
self.vcns.delete_lswitch(lswitch_id)
def get_loadbalancer_config(self, edge_id):
try:
header, response = self.vcns.get_loadbalancer_config(
edge_id)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to get service config"))
return response
def enable_service_loadbalancer(self, edge_id):
config = self.get_loadbalancer_config(
edge_id)
if not config['enabled']:
config['enabled'] = True
try:
self.vcns.enable_service_loadbalancer(edge_id, config)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to enable loadbalancer "
"service config"))
|
mariosky/evo-drawings
|
refs/heads/master
|
venv/lib/python2.7/site-packages/numpy/lib/tests/test_arrayterator.py
|
223
|
from __future__ import division, absolute_import, print_function
from operator import mul
from functools import reduce
import numpy as np
from numpy.random import randint
from numpy.lib import Arrayterator
from numpy.testing import assert_
def test():
np.random.seed(np.arange(10))
# Create a random array
ndims = randint(5)+1
shape = tuple(randint(10)+1 for dim in range(ndims))
els = reduce(mul, shape)
a = np.arange(els)
a.shape = shape
buf_size = randint(2*els)
b = Arrayterator(a, buf_size)
# Check that each block has at most ``buf_size`` elements
for block in b:
assert_(len(block.flat) <= (buf_size or els))
# Check that all elements are iterated correctly
assert_(list(b.flat) == list(a.flat))
# Slice arrayterator
start = [randint(dim) for dim in shape]
stop = [randint(dim)+1 for dim in shape]
step = [randint(dim)+1 for dim in shape]
slice_ = tuple(slice(*t) for t in zip(start, stop, step))
c = b[slice_]
d = a[slice_]
# Check that each block has at most ``buf_size`` elements
for block in c:
assert_(len(block.flat) <= (buf_size or els))
# Check that the arrayterator is sliced correctly
assert_(np.all(c.__array__() == d))
# Check that all elements are iterated correctly
assert_(list(c.flat) == list(d.flat))
if __name__ == '__main__':
from numpy.testing import run_module_suite
run_module_suite()
|
guke001/QMarkdowner
|
refs/heads/master
|
dpkt/stun.py
|
15
|
# $Id: stun.py 47 2008-05-27 02:10:00Z jon.oberheide $
"""Simple Traversal of UDP through NAT."""
import struct
import dpkt
# STUN - RFC 3489
# http://tools.ietf.org/html/rfc3489
# Each packet has a 20 byte header followed by 0 or more attribute TLVs.
# Message Types
BINDING_REQUEST = 0x0001
BINDING_RESPONSE = 0x0101
BINDING_ERROR_RESPONSE = 0x0111
SHARED_SECRET_REQUEST = 0x0002
SHARED_SECRET_RESPONSE = 0x0102
SHARED_SECRET_ERROR_RESPONSE = 0x0112
# Message Attributes
MAPPED_ADDRESS = 0x0001
RESPONSE_ADDRESS = 0x0002
CHANGE_REQUEST = 0x0003
SOURCE_ADDRESS = 0x0004
CHANGED_ADDRESS = 0x0005
USERNAME = 0x0006
PASSWORD = 0x0007
MESSAGE_INTEGRITY = 0x0008
ERROR_CODE = 0x0009
UNKNOWN_ATTRIBUTES = 0x000a
REFLECTED_FROM = 0x000b
class STUN(dpkt.Packet):
__hdr__ = (
('type', 'H', 0),
('len', 'H', 0),
('xid', '16s', 0)
)
def tlv(buf):
n = 4
t, l = struct.unpack('>HH', buf[:n])
v = buf[n:n+l]
buf = buf[n+l:]
return (t,l,v, buf)
|
kordano/samba-ldb-mdb
|
refs/heads/master
|
examples/scripts/shares/python/modify_samba_config.py
|
90
|
#!/usr/bin/env python
######################################################################
##
## Simple add/delete/change share command script for Samba
##
## Copyright (C) Gerald Carter 2004.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
######################################################################
import sys, os
from SambaConfig import SambaConf
## ##
## check the command line args ##
## ##
delete_mode = False
if len(sys.argv) == 3:
delete_mode = True
print "Deleting share..."
elif len(sys.argv) == 5:
print "Adding/Updating share..."
else:
print "Usage: %s configfile share [path] [comments]" % sys.argv[0]
sys.exit(1)
## ##
## read and parse the config file ##
## ##
confFile = SambaConf()
confFile.ReadConfig( sys.argv[1] )
if not confFile.valid:
exit( 1 )
if delete_mode:
if not confFile.isService( sys.argv[2] ):
sys.stderr.write( "Asked to delete non-existent service! [%s]\n" % sys.argv[2] )
sys.exit( 1 )
confFile.DelService( sys.argv[2] )
else:
## make the path if it doesn't exist. Bail out if that fails
if ( not os.path.isdir(sys.argv[3]) ):
try:
os.makedirs( sys.argv[3] )
os.chmod( sys.argv[3], 0777 )
except os.error:
sys.exit( 1 )
## only add a new service -- if it already exists, then
## just set the options
if not confFile.isService( sys.argv[2] ):
confFile.AddService( sys.argv[2], ['##', '## Added by modify_samba_config.py', '##'] )
confFile.SetServiceOption( sys.argv[2], "path", sys.argv[3] )
confFile.SetServiceOption( sys.argv[2], "comment", sys.argv[4] )
confFile.SetServiceOption( sys.argv[2], "read only", "no" )
ret = confFile.Flush()
sys.exit( ret )
|
mainakibui/kobocat
|
refs/heads/master
|
onadata/apps/logger/management/commands/import_instances.py
|
13
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 coding=utf-8
import os
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _, ugettext_lazy
from onadata.libs.logger.import_tools import import_instances_from_zip,\
import_instances_from_path
class Command(BaseCommand):
args = 'username path'
help = ugettext_lazy("Import a zip file, a directory containing zip files "
"or a directory of ODK instances")
def _log_import(self, results):
total_count, success_count, errors = results
self.stdout.write(_(
"Total: %(total)d, Imported: %(imported)d, Errors: "
"%(errors)s\n------------------------------\n") % {
'total': total_count, 'imported': success_count,
'errors': errors})
def handle(self, *args, **kwargs):
if len(args) < 2:
raise CommandError(_("Usage: <command> username file/path."))
username = args[0]
path = args[1]
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError(_(
"The specified user '%s' does not exist.") % username)
# make sure path exists
if not os.path.exists(path):
raise CommandError(_(
"The specified path '%s' does not exist.") % path)
for dir, subdirs, files in os.walk(path):
# check if the dir has an odk directory
if "odk" in subdirs:
# dont walk further down this dir
subdirs.remove("odk")
self.stdout.write(_("Importing from dir %s..\n") % dir)
results = import_instances_from_path(dir, user)
self.log_import(results)
for file in files:
filepath = os.path.join(path, file)
if os.path.isfile(filepath) and\
os.path.splitext(filepath)[1].lower() == ".zip":
self.stdout.write(_(
"Importing from zip at %s..\n") % filepath)
results = import_instances_from_zip(filepath, user)
self.log_import(results)
|
akosel/incubator-airflow
|
refs/heads/master
|
tests/contrib/operators/test_s3_copy_object_operator.py
|
11
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import unittest
import boto3
from moto import mock_s3
from airflow.contrib.operators.s3_copy_object_operator import S3CopyObjectOperator
class TestS3CopyObjectOperator(unittest.TestCase):
def setUp(self):
self.source_bucket = "bucket1"
self.source_key = "path1/data.txt"
self.dest_bucket = "bucket2"
self.dest_key = "path2/data_copy.txt"
@mock_s3
def test_s3_copy_object_arg_combination_1(self):
conn = boto3.client('s3')
conn.create_bucket(Bucket=self.source_bucket)
conn.create_bucket(Bucket=self.dest_bucket)
conn.upload_fileobj(Bucket=self.source_bucket,
Key=self.source_key,
Fileobj=io.BytesIO(b"input"))
# there should be nothing found before S3CopyObjectOperator is executed
self.assertFalse('Contents' in conn.list_objects(Bucket=self.dest_bucket,
Prefix=self.dest_key))
t = S3CopyObjectOperator(task_id="test_task_s3_copy_object",
source_bucket_key=self.source_key,
source_bucket_name=self.source_bucket,
dest_bucket_key=self.dest_key,
dest_bucket_name=self.dest_bucket)
t.execute(None)
objects_in_dest_bucket = conn.list_objects(Bucket=self.dest_bucket,
Prefix=self.dest_key)
# there should be object found, and there should only be one object found
self.assertEqual(len(objects_in_dest_bucket['Contents']), 1)
# the object found should be consistent with dest_key specified earlier
self.assertEqual(objects_in_dest_bucket['Contents'][0]['Key'], self.dest_key)
@mock_s3
def test_s3_copy_object_arg_combination_2(self):
conn = boto3.client('s3')
conn.create_bucket(Bucket=self.source_bucket)
conn.create_bucket(Bucket=self.dest_bucket)
conn.upload_fileobj(Bucket=self.source_bucket,
Key=self.source_key,
Fileobj=io.BytesIO(b"input"))
# there should be nothing found before S3CopyObjectOperator is executed
self.assertFalse('Contents' in conn.list_objects(Bucket=self.dest_bucket,
Prefix=self.dest_key))
source_key_s3_url = "s3://{}/{}".format(self.source_bucket, self.source_key)
dest_key_s3_url = "s3://{}/{}".format(self.dest_bucket, self.dest_key)
t = S3CopyObjectOperator(task_id="test_task_s3_copy_object",
source_bucket_key=source_key_s3_url,
dest_bucket_key=dest_key_s3_url)
t.execute(None)
objects_in_dest_bucket = conn.list_objects(Bucket=self.dest_bucket,
Prefix=self.dest_key)
# there should be object found, and there should only be one object found
self.assertEqual(len(objects_in_dest_bucket['Contents']), 1)
# the object found should be consistent with dest_key specified earlier
self.assertEqual(objects_in_dest_bucket['Contents'][0]['Key'], self.dest_key)
|
aleksandra-tarkowska/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/migrated_app/migrations/__init__.py
|
12133432
| |
azurestandard/django
|
refs/heads/master
|
tests/regressiontests/i18n/other/__init__.py
|
12133432
| |
vivekgarhewal/med
|
refs/heads/master
|
demo-django/demo/__init__.py
|
12133432
| |
catacgc/ansible-modules-core
|
refs/heads/devel
|
system/__init__.py
|
12133432
| |
weiting-chen/manila
|
refs/heads/master
|
manila/tests/api/contrib/__init__.py
|
12133432
| |
davidzchen/tensorflow
|
refs/heads/master
|
tensorflow/python/training/monitored_session_test.py
|
7
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import os
import sys
import threading
import time
import traceback
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import distribute_coordinator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
def latest_summaries(base_dir):
"""Parse summary events from latest event file in base_dir."""
file_paths = glob.glob(os.path.join(base_dir, 'events.*'))
file_path = sorted(file_paths)[-1] if file_paths else None
latest_events = summary_io.summary_iterator(file_path) if file_path else []
return [e for e in latest_events if e.HasField('summary')]
class ScaffoldTest(test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.ready_for_local_init_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.VariableV1(1, name='my_var')
variables.VariableV1(
2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertEqual(None, scaffold.local_init_feed_dict)
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
with self.cached_session() as sess:
self.assertItemsEqual([b'my_var', b'my_local_var'],
sess.run(scaffold.ready_op))
self.assertItemsEqual([b'my_var'],
sess.run(scaffold.ready_for_local_init_op))
sess.run(scaffold.init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))
sess.run(scaffold.local_init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
constant_op.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertEqual(None, scaffold.local_init_feed_dict)
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
def test_caches_values(self):
with ops.Graph().as_default():
variables.VariableV1([1])
scaffold1 = monitored_session.Scaffold()
scaffold1.finalize()
scaffold2 = monitored_session.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.ready_for_local_init_op,
scaffold2.ready_for_local_init_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with ops.Graph().as_default():
variables.VariableV1([1])
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
with self.assertRaisesRegex(RuntimeError, 'More than one item'):
monitored_session.Scaffold().finalize()
def test_uses_passed_values(self):
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
local_init_feed_dict=8,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.ready_for_local_init_op)
self.assertEqual(7, scaffold.local_init_op)
self.assertEqual(8, scaffold.local_init_feed_dict)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with ops.Graph().as_default():
variables.VariableV1([1])
monitored_session.Scaffold().finalize()
with self.assertRaisesRegex(RuntimeError,
'Graph is finalized and cannot be modified'):
constant_op.constant([0])
def test_new_scaffold_from_default_scaffold(self):
scaffold1 = monitored_session.Scaffold()
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold2 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
local_init_feed_dict=8,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(2, scaffold2.init_op)
self.assertEqual(3, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(5, scaffold2.ready_op)
self.assertEqual(6, scaffold2.ready_for_local_init_op)
self.assertEqual(7, scaffold2.local_init_op)
self.assertEqual(8, scaffold2.local_init_feed_dict)
self.assertEqual(saver, scaffold2.saver)
def test_new_scaffold_from_existing_scaffold(self):
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold1 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
local_init_feed_dict=8,
saver=saver)
scaffold2 = monitored_session.Scaffold(
init_op=4,
init_feed_dict=6,
init_fn=lambda scaffold, sess: 8,
ready_op=10,
ready_for_local_init_op=12,
local_init_op=14,
local_init_feed_dict=15,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(4, scaffold2.init_op)
self.assertEqual(6, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(10, scaffold2.ready_op)
self.assertEqual(12, scaffold2.ready_for_local_init_op)
self.assertEqual(14, scaffold2.local_init_op)
self.assertEqual(15, scaffold2.local_init_feed_dict)
self.assertEqual(saver, scaffold2.saver)
def test_copy_from_scaffold_is_scaffold(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(
TypeError, 'copy_from_scaffold is not a Scaffold instance'):
monitored_session.Scaffold(copy_from_scaffold=1)
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session, coord): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_save_checkpoint_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(100, session.run(gstep))
def test_save_checkpoint_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_secs')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(10):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(11, session.run(gstep))
def test_summaries_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summaries_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=None,
save_summaries_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
def test_save_graph_def(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_graph_def')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=1,
save_graph_def=True) as session:
self.assertIn('graph.pbtxt', os.listdir(logdir))
self.assertLen(glob.glob(os.path.join(logdir, '*.meta')), 1)
session.run(new_gstep)
self.assertLen(glob.glob(os.path.join(logdir, '*.meta')), 2)
def test_save_graph_def_false(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_graph_def')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=1,
save_graph_def=False) as session:
self.assertNotIn('graph.pbtxt', os.listdir(logdir))
self.assertEmpty(glob.glob(os.path.join(logdir, '*.meta')))
session.run(new_gstep)
self.assertEmpty(glob.glob(os.path.join(logdir, '*.meta')))
class MockExtended(object):
def __init__(self, between_graph, should_init, should_checkpoint,
should_save_summary):
self.experimental_between_graph = between_graph
self.experimental_should_init = should_init
self.should_checkpoint = should_checkpoint
self.should_save_summary = should_save_summary
class MockStrategy(object):
def __init__(self,
between_graph=False,
should_init=True,
should_checkpoint=None,
should_save_summary=None):
self.extended = MockExtended(between_graph, should_init, should_checkpoint,
should_save_summary)
class MonitoredTrainingSessionWithDistributeCoordinatorTest(test.TestCase):
"""Test distribute coordinator controls summary saving and checkpointing."""
def test_summary_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_enabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summary_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
# No summary is saved.
summaries = latest_summaries(logdir)
self.assertEqual(len(summaries), 0)
def test_checkpoint_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_enabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(100, session.run(gstep))
def test_checkpoint_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
def test_checkpoint_hook_enable_on_non_chief_with_collective_ops(self):
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()
strategy.extended._is_chief = False
context = distribute_coordinator._WorkerContext(strategy, None, 'worker', 1)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
# But saved to a temporary directory.
checkpoint = checkpoint_management.latest_checkpoint(
os.path.join(logdir, 'tmp_worker_1'))
self.assertIsNotNone(checkpoint)
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(test.TestCase):
"""_WrappedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(sess.graph, wrapped_sess.graph)
self.assertEqual(sess.sess_str, wrapped_sess.sess_str)
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_uses_check_stop(self):
with self.cached_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_delegates_to_wrapped_session(self):
with self.cached_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
@test_util.run_deprecated_v1
def test_close_twice(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(test.TestCase):
"""_CoordinatedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(sess.graph, coord_sess.graph)
self.assertEqual(sess.sess_str, coord_sess.sess_str)
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_on_coord_stop(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegex(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_stop_threads_on_close_after_exception(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegex(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_propagates_exception_trace(self):
assertion = control_flow_ops.Assert(False, ['This should fail.'])
with self.cached_session() as sess:
coord = coordinator.Coordinator(clean_stop_exception_types=())
coord_sess = monitored_session._CoordinatedSession(sess, coord)
try:
coord_sess.run([assertion])
self.fail('No exception was raised by assertion.')
except errors_impl.InvalidArgumentError:
# Extract the name of the file where the exception was first raised.
_, _, exc_traceback = sys.exc_info()
tb = traceback.extract_tb(exc_traceback)
exc_source_file = tb[-1][0]
exc_source_basename = os.path.basename(exc_source_file)
# If it's monitored_session.py then the original stack trace was not
# correctly propagated.
self.assertIn(
exc_source_basename, ['session.py', 'monitored_session.py'],
'The exception was raised from an unrecognized file. This unit '
'test probably needs to be updated. Traceback:\n%s\n' % tb)
self.assertEqual(
exc_source_basename, 'session.py',
'Original stack trace was not propagated by MonitoredSession. '
'Traceback:\n%s' % tb)
class AbortAtNSession(object):
"""A mock session that aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise errors_impl.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class StopCoordinatorWithException(session_run_hook.SessionRunHook):
"""With this hook Coordinator throws an exception after N-runs."""
def __init__(self, calls_before_stopping, exception_to_raise=None):
self._started_the_side_thread_already = False
self._lock = threading.Lock()
self._stored_exception_event = threading.Event()
self._calls_before_stopping = calls_before_stopping
self._exception_to_raise = (exception_to_raise or errors_impl.AbortedError(
None, None, 'Aborted at N'))
def _maybe_stop_with_exception(self, coord):
while True:
with self._lock:
if self._calls_before_stopping == 0:
try:
raise self._exception_to_raise
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
self._stored_exception_event.set()
break
def after_create_session(self, session, coord):
if self._started_the_side_thread_already:
return
separate_thread = threading.Thread(
target=self._maybe_stop_with_exception, args=(coord,))
coord.register_thread(separate_thread)
separate_thread.start()
self._started_the_side_thread_already = True
# Coordinator will take care of joining `separate_thread`.
def after_run(self, run_context, run_values):
stopping_now = False
with self._lock:
self._calls_before_stopping -= 1
if self._calls_before_stopping == 0:
stopping_now = True
if stopping_now:
self._stored_exception_event.wait()
class FailTrainingAfterCoordinatorStopped(StopCoordinatorWithException):
"""With this hook training encounters an exception after N-runs."""
def __init__(self, calls_before_stopping):
StopCoordinatorWithException.__init__(self, calls_before_stopping)
self._coord = None
def after_create_session(self, session, coord):
self._coord = coord
return StopCoordinatorWithException.after_create_session(
self, session, coord)
def after_run(self, run_context, run_values):
StopCoordinatorWithException.after_run(self, run_context, run_values)
try:
# After a `run`, an exception could have been stored inside the
# coordinator.
self._coord.raise_requested_exception()
except errors_impl.AbortedError:
# In real world, the main thread may or may not know about the exception
# that stopped the coordinator. Because the coordinator has stopped, the
# main thread could have gotten stuck as well (for example, the
# coordinator was supposed to execute `FIFOQueue.enqueue` while the main
# thread is executing a blocking `FIFOQueue.dequeue`). After it got stuck,
# the session is going to get garbage collected after some time with:
raise errors_impl.CancelledError(None, None,
'Session got garbage-collected.')
class CountingSessionCreator(object):
"""A creator that counts the number of created sessions."""
def __init__(self, session):
self._initial_session = session
# We only have one session per test case. We can't re-create it, thus
# it shouldn't be closed.
self._initial_session.close = lambda *args: None
self._create_session_calls = 0
@property
def number_of_sessions_created(self):
return self._create_session_calls
def create_session(self):
self._create_session_calls += 1
return self._initial_session
class RecoverableSessionTest(test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(sess.graph, recoverable_sess.graph)
self.assertEqual(sess.sess_str, recoverable_sess.sess_str)
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
@test_util.run_deprecated_v1
def test_recovery(self):
with self.cached_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = constant_op.constant(0)
v = array_ops.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegex(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
@test_util.run_deprecated_v1
def test_recovery_from_coordinator_exception(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_step_fn_recovery_from_coordinator_exception_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
def create_raw_session_with_failing_coordinator(self, session_creator, hook):
"""Return MonitoredSession that triggers coordinator failures."""
session = monitored_session.MonitoredSession(session_creator, [hook])
# We would like to test a situation where during fetches through the
# raw session, the coordinator fails with an exception. To do that, we
# are going to use (raw_session + StopCoordinatorWithException) hook
# combination that is stored in
# `MonitoredSession._RecoverableSession._CoordinatedSession._sess`
# at this point:
session._tf_sess = lambda: session._sess._sess._sess
# `run()` on such a session is equivalent to `run()` on the raw session
# with separate coordinator threads independently stopping with an
# exception.
return session
@test_util.run_deprecated_v1
def test_step_fn_recovery_from_coordinator_exception_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
StopCoordinatorWithException(calls_before_stopping=2))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.session.run(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.')))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
FailTrainingAfterCoordinatorStopped(calls_before_stopping=2))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(test.TestCase):
"""Tests of _HookedSession."""
def testRunPassesAllArguments(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
session_run_hook.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
session_run_hook.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['begin'], 0)
self.assertEqual(hook.call_counter['after_create_session'], 0)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
another_tensor = constant_op.constant([5], name='another_tensor')
third_tensor = constant_op.constant([10], name='third_tensor')
mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])
mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])
self.evaluate(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
c_tensor = constant_op.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesRegex(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesRegex(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(session_run_hook.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(session_run_hook.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch, report_tensor_allocations_upon_oom):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self._report_tensor_allocations_upon_oom = (
report_tensor_allocations_upon_oom)
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs,
report_tensor_allocations_upon_oom=self
._report_tensor_allocations_upon_oom)
options.debug_options.debug_tensor_watch_opts.extend(
[self._debug_tensor_watch])
return session_run_hook.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with ops.Graph().as_default():
a_var = variables.VariableV1(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]
with monitored_session.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = saver_lib._get_saver_or_default().save(
session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]
with monitored_session.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = saver_lib._get_saver_or_default().save(
session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
@test_util.run_deprecated_v1
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold,
checkpoint_filename_with_path=checkpoint_management.
latest_checkpoint(logdir))) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_initialization_on_aborted_error(self):
# Tests that we silently retry on abort during initialization.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
self.init_raised_aborted_error = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
if not self.init_raised_aborted_error:
self.init_raised_aborted_error = True
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(
init_fn=_init_fn))) as session:
self.assertFalse(session.should_stop())
self.assertEqual(0, session.run(gstep))
self.assertTrue(self.init_raised_aborted_error)
def _retry_test(self, ex):
# Tests that we silently retry on error. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, ex)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_retry_on_aborted_error(self):
self._retry_test(errors_impl.AbortedError(None, None, 'Abort'))
def test_retry_on_unavailable_error(self):
self._retry_test(errors_impl.UnavailableError(None, None, 'Unavailable'))
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, errors_impl.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = monitored_session.MonitoredSession(hooks=[hook])
with self.assertRaisesRegex(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.MonitoredSession()
with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegex(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.MonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_graph_finalized_during_run_unfinalized_after_exit(self):
with ops.Graph().as_default() as g:
a_var = variables.VariableV1(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertFalse(g.finalized)
def test_keep_finalized_graph_as_finalized(self):
with ops.Graph().as_default() as g:
a_var = variables.VariableV1(0)
monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertTrue(g.finalized)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
watch_a = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a, False)
watch_b = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b, True)
with monitored_session.MonitoredSession(
hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual([
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[watch_a, watch_b]),
report_tensor_allocations_upon_oom=True),
], hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
hook_watch = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch, False)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
caller_watch = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=30000,
output_partition_graphs=True,
report_tensor_allocations_upon_oom=True)
caller_options.debug_options.debug_tensor_watch_opts.extend(
[caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual([
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[caller_watch, hook_watch]),
report_tensor_allocations_upon_oom=True),
], hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
@test_util.run_deprecated_v1
def test_with_statement_and_close(self):
# Test case for https://github.com/tensorflow/tensorflow/issues/12224
# where close() inside the with should have a better error message.
with self.assertRaisesRegex(RuntimeError, 'Session is already closed'):
with monitored_session.MonitoredSession() as session:
session.close()
def test_step_fn_example(self):
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
return value
with monitored_session.MonitoredSession() as session:
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
def test_step_function_stops(self):
with ops.Graph().as_default():
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession() as session:
self.assertEqual(None, session.run_step_fn(step_fn))
self.assertTrue(session.should_stop())
def test_step_request_stop_without_a_with_block(self):
with ops.Graph().as_default():
was_stop_iteration_raised = False
def step_fn(step_context):
step_context.request_stop()
session = monitored_session.MonitoredSession()
try:
self.assertEqual(None, session.run_step_fn(step_fn))
except StopIteration:
was_stop_iteration_raised = True
self.assertTrue(was_stop_iteration_raised)
self.assertFalse(session.should_stop())
def test_step_request_stop_in_a_loop(self):
with ops.Graph().as_default():
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession() as session:
while not session.should_stop():
_ = session.run_step_fn(step_fn)
self.fail('An exception should be raised on the line above.')
def test_step_request_stop_with_returning_a_type(self):
with ops.Graph().as_default():
def step_fn(step_context):
del step_context
return 'a type'
with monitored_session.MonitoredSession() as session:
self.assertEqual('a type', session.run_step_fn(step_fn))
def test_step_with_extra_arguments(self):
with ops.Graph().as_default():
def step_fn(step_context, extra_foo):
del step_context, extra_foo
with monitored_session.MonitoredSession() as session:
with self.assertRaisesRegex(
ValueError,
'`step_fn` may either have one `step_context` argument'):
self.assertEqual(None, session.run_step_fn(step_fn))
def test_step_fn_belongs_to_a_class(self):
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
class Model(object):
def step_fn(self, step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
with monitored_session.MonitoredSession() as session:
model = Model()
self.assertNear(3.2, session.run_step_fn(model.step_fn), 0.1)
def test_step_fn_belongs_to_a_class_and_has_extra_methods(self):
with ops.Graph().as_default():
class Model(object):
def step_fn(self, step_context, extra_foo):
del step_context, extra_foo
with monitored_session.MonitoredSession() as session:
with self.assertRaisesRegex(
ValueError,
'`step_fn` may either have one `step_context` argument'):
model = Model()
self.assertEqual(None, session.run_step_fn(model.step_fn))
def test_step_fn_with_hooks(self):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(0.0)
# This test highlights the interaction of hooks with
# `Monitoredsession.run_step_fn`. The order of execution of operations
# below is:
# 0. stage_0
# 1. stage_1_0 or stage_1_1 in an undefined order
# 2. stage_2
stage_0 = state_ops.assign_add(var, 0.3)
stage_1_0 = state_ops.assign_add(var, 0.7)
# The order of `stage_1_0` and `stage_1_1` is undefined by
# `MonitoredSession`, but we should be able to assert when both of them
# are complete. To obtain a consistent result of adding two different
# constants to `var`, we rely on a control dependency and
# `ResourceVariable`. Otherwise, it is possible that one of the
# additions overwrites the result of the other addition.
with ops.control_dependencies([stage_1_0]):
stage_1_1 = state_ops.assign_add(var, 0.5)
stage_2 = state_ops.assign_add(var, 1.1)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(fetches=stage_1_0)
def after_run(self, run_context, run_values):
self._testing.assertNear(0.3 + 0.5 + 0.7,
run_context.session.run(var), 0.1)
self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,
run_context.session.run(stage_2), 0.1)
def step_fn(step_context):
self.assertNear(0.3, step_context.session.run(stage_0), 0.1)
return step_context.run_with_hooks(fetches=stage_1_1)
with monitored_session.MonitoredSession(hooks=[Hook(self)]) as session:
self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))
def test_step_fn_has_the_same_hooks_behavior_without_recovery(self):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(0.0)
stage_0 = state_ops.assign_add(var, 0.3)
stage_1_0 = state_ops.assign_add(var, 0.7)
with ops.control_dependencies([stage_1_0]):
stage_1_1 = state_ops.assign_add(var, 0.5)
stage_2 = state_ops.assign_add(var, 1.1)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(fetches=stage_1_0)
def after_run(self, run_context, run_values):
self._testing.assertNear(0.3 + 0.5 + 0.7,
run_context.session.run(var), 0.1)
self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,
run_context.session.run(stage_2), 0.1)
def step_fn(step_context):
self.assertNear(0.3, step_context.session.run(stage_0), 0.1)
return step_context.run_with_hooks(fetches=stage_1_1)
with monitored_session.SingularMonitoredSession(
hooks=[Hook(self)]) as session:
self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))
def test_step_fn_with_hooks_and_request_stop(self):
with ops.Graph().as_default():
trace_the_hook = {'before_run': False, 'after_run': False}
class Hook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
trace_the_hook['before_run'] = True
def after_run(self, run_context, run_values):
trace_the_hook['after_run'] = True
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession(hooks=[Hook()]) as session:
self.assertEqual(None, session.run_step_fn(step_fn))
self.assertTrue(session.should_stop())
# `step_context.request_stop()` in a step_fn interrupts the flow of
# running the hooks.
self.assertFalse(trace_the_hook['before_run'])
self.assertFalse(trace_the_hook['after_run'])
def test_recovers_from_an_exception_in_step_fn(self):
trace_the_exception = {'run_already': False}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
with monitored_session.MonitoredSession() as session:
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.assertTrue(trace_the_exception['run_already'])
def test_recovers_from_an_exception_in_step_fn_after_hooks(self):
trace_the_exception = {'run_already': False, 'side_effect_counter': 0}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
graph_state = variables.VariableV1(0.0)
graph_side_effect = state_ops.assign_add(graph_state, 0.31)
def step_fn(step_context):
trace_the_exception['side_effect_counter'] += 1
step_context.session.run(graph_side_effect)
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return value
with self.cached_session() as test_session:
with monitored_session.MonitoredSession(
CountingSessionCreator(test_session)) as session:
session.run(variables.global_variables_initializer())
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.assertTrue(trace_the_exception['run_already'])
# Make sure the rest of the body of the step_fn is re-executed upon
# AbortedError:
self.assertEqual(2, trace_the_exception['side_effect_counter'])
self.assertNear(0.62, session.run(graph_state), 0.1)
def test_step_fn_doesnt_recover_when_it_wasnt_asked_to(self):
trace_the_exception = {'run_already': False}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
return value
with monitored_session.SingularMonitoredSession() as session:
with self.assertRaisesRegex(errors_impl.AbortedError, 'Abort'):
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.fail()
self.assertTrue(trace_the_exception['run_already'])
def test_step_fn_exception_from_before_run(self):
trace_the_exception = {'run_already': False, 'side_effect_counter': 0}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
vv = constant_op.constant(3.2)
graph_state = variables.VariableV1(0.0)
graph_side_effect = state_ops.assign_add(graph_state, 0.31)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return session_run_hook.SessionRunArgs(fetches=vv)
def after_run(self, run_context, run_values):
self._testing.assertNear(3.2, run_values.results, 0.1)
def step_fn(step_context):
trace_the_exception['side_effect_counter'] += 1
step_context.session.run(graph_side_effect)
return step_context.run_with_hooks(fetches=v, feed_dict={c: 1.3})
with self.cached_session() as test_session:
with monitored_session.MonitoredSession(
CountingSessionCreator(test_session),
hooks=[Hook(self)]) as session:
test_session.run(variables.global_variables_initializer())
self.assertNear(1.3, session.run_step_fn(step_fn), 0.1)
self.assertEqual(2, trace_the_exception['side_effect_counter'])
self.assertNear(0.62, session.run(graph_state), 0.1)
class SingularMonitoredSessionTest(test.TestCase):
"""Tests SingularMonitoredSession."""
def test_handles_initialization(self):
with ops.Graph().as_default():
a_var = variables.VariableV1(0)
with monitored_session.SingularMonitoredSession() as session:
# If it's not initialized, following statement raises an error.
self.assertEqual(0, session.run(a_var))
def test_do_not_handle_aborted_error(self):
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
class _RaiseAbortedHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
with self.assertRaises(errors_impl.AbortedError):
self.assertEqual(0, session.run(gstep))
with self.assertRaises(errors_impl.AbortedError):
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
self.assertEqual(0, session.run(gstep))
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.SingularMonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.SingularMonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.SingularMonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertEqual(None, session.raw_session())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.SingularMonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_raw_session(self):
with ops.Graph().as_default():
with monitored_session.SingularMonitoredSession() as session:
self.assertTrue(isinstance(session.raw_session(), session_lib.Session))
if __name__ == '__main__':
test.main()
|
HalCanary/skia-hc
|
refs/heads/master
|
infra/bots/recipes/perf_canvaskit.py
|
3
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Recipe which runs the Canvaskit tests using docker
DEPS = [
'checkout',
'docker',
'env',
'infra',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
'run',
'vars',
]
DOCKER_IMAGE = 'gcr.io/skia-public/perf-karma-chrome-tests:77.0.3865.120_v1'
INNER_KARMA_SCRIPT = 'skia/infra/canvaskit/perf_canvaskit.sh'
def RunSteps(api):
api.vars.setup()
checkout_root = api.path['start_dir']
out_dir = api.vars.swarming_out_dir
# The karma script is configured to look in ./canvaskit/bin/ for
# the test files to load, so we must copy them there (see Set up for docker).
copy_dest = checkout_root.join('skia', 'modules', 'canvaskit',
'canvaskit', 'bin')
base_dir = api.vars.build_dir
copies = {
base_dir.join('canvaskit.js'): copy_dest.join('canvaskit.js'),
base_dir.join('canvaskit.wasm'): copy_dest.join('canvaskit.wasm'),
}
recursive_read = [checkout_root.join('skia')]
args = [
'--builder', api.vars.builder_name,
'--git_hash', api.properties['revision'],
'--buildbucket_build_id', api.properties.get('buildbucket_build_id', ''),
'--bot_id', api.vars.swarming_bot_id,
'--task_id', api.vars.swarming_task_id,
'--browser', 'Chrome',
'--config', api.vars.configuration,
'--source_type', 'canvaskit',
]
if api.vars.is_trybot:
args.extend([
'--issue', api.vars.issue,
'--patchset', api.vars.patchset,
])
api.docker.run(
name='Performance tests of CanvasKit with Docker',
docker_image=DOCKER_IMAGE,
src_dir=checkout_root,
out_dir=out_dir,
script=checkout_root.join(INNER_KARMA_SCRIPT),
args=args,
docker_args=None,
copies=copies,
recursive_read=recursive_read,
attempts=3,
)
def GenTests(api):
yield (
api.test('Perf-Debian9-EMCC-GCE-CPU-AVX2-wasm-Release-All-CanvasKit') +
api.properties(buildername=('Perf-Debian9-EMCC-GCE-CPU-AVX2'
'-wasm-Release-All-CanvasKit'),
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]')
)
yield (
api.test('pathkit_trybot') +
api.properties(buildername=('Perf-Debian9-EMCC-GCE-GPU-AVX2'
'-wasm-Release-All-CanvasKit'),
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
patch_ref='89/456789/12',
patch_repo='https://skia.googlesource.com/skia.git',
patch_storage='gerrit',
patch_set=7,
patch_issue=1234,
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/')
)
|
ray-zhong/github_trend_spider
|
refs/heads/master
|
ENV/Lib/site-packages/setuptools/dep_util.py
|
316
|
from distutils.dep_util import newer_group
# yes, this is was almost entirely copy-pasted from
# 'newer_pairwise()', this is just another convenience
# function.
def newer_pairwise_group(sources_groups, targets):
"""Walk both arguments in parallel, testing if each source group is newer
than its corresponding target. Returns a pair of lists (sources_groups,
targets) where sources is newer than target, according to the semantics
of 'newer_group()'.
"""
if len(sources_groups) != len(targets):
raise ValueError("'sources_group' and 'targets' must be the same length")
# build a pair of lists (sources_groups, targets) where source is newer
n_sources = []
n_targets = []
for i in range(len(sources_groups)):
if newer_group(sources_groups[i], targets[i]):
n_sources.append(sources_groups[i])
n_targets.append(targets[i])
return n_sources, n_targets
|
geekboxzone/lollipop_external_chromium_org
|
refs/heads/geekbox
|
chrome/common/extensions/docs/server2/fake_url_fetcher.py
|
85
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import posixpath
from future import Future
from path_util import AssertIsDirectory, IsDirectory
class _Response(object):
def __init__(self, content=''):
self.content = content
self.headers = {'Content-Type': 'none'}
self.status_code = 200
class FakeUrlFetcher(object):
def __init__(self, base_path):
self._base_path = base_path
# Mock capabilities. Perhaps this class should be MockUrlFetcher.
self._sync_count = 0
self._async_count = 0
self._async_resolve_count = 0
def _ReadFile(self, filename):
# Fake DownloadError, the error that appengine usually raises.
class DownloadError(Exception): pass
try:
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
except IOError as e:
raise DownloadError(e)
def _ListDir(self, directory):
# In some tests, we need to test listing a directory from the HTML returned
# from SVN. This reads an HTML file that has the directories HTML.
if not os.path.isdir(os.path.join(self._base_path, directory)):
return self._ReadFile(directory[:-1])
files = os.listdir(os.path.join(self._base_path, directory))
html = '<html><title>Revision: 00000</title>\n'
for filename in files:
if filename.startswith('.'):
continue
if os.path.isdir(os.path.join(self._base_path, directory, filename)):
html += '<a>' + filename + '/</a>\n'
else:
html += '<a>' + filename + '</a>\n'
html += '</html>'
return html
def FetchAsync(self, url):
self._async_count += 1
url = url.rsplit('?', 1)[0]
def resolve():
self._async_resolve_count += 1
return self._DoFetch(url)
return Future(callback=resolve)
def Fetch(self, url):
self._sync_count += 1
return self._DoFetch(url)
def _DoFetch(self, url):
url = url.rsplit('?', 1)[0]
result = _Response()
if IsDirectory(url):
result.content = self._ListDir(url)
else:
result.content = self._ReadFile(url)
return result
def CheckAndReset(self, sync_count=0, async_count=0, async_resolve_count=0):
'''Returns a tuple (success, error). Use in tests like:
self.assertTrue(*fetcher.CheckAndReset(...))
'''
errors = []
for desc, expected, actual in (
('sync_count', sync_count, self._sync_count),
('async_count', async_count, self._async_count),
('async_resolve_count', async_resolve_count,
self._async_resolve_count)):
if actual != expected:
errors.append('%s: expected %s got %s' % (desc, expected, actual))
try:
return (len(errors) == 0, ', '.join(errors))
finally:
self.Reset()
def Reset(self):
self._sync_count = 0
self._async_count = 0
self._async_resolve_count = 0
class FakeURLFSFetcher(object):
'''Use a file_system to resolve fake fetches. Mimics the interface of Google
Appengine's urlfetch.
'''
def __init__(self, file_system, base_path):
AssertIsDirectory(base_path)
self._base_path = base_path
self._file_system = file_system
def FetchAsync(self, url, **kwargs):
return Future(value=self.Fetch(url))
def Fetch(self, url, **kwargs):
return _Response(self._file_system.ReadSingle(
posixpath.join(self._base_path, url)).Get())
def UpdateFS(self, file_system, base_path=None):
'''Replace the underlying FileSystem used to reslove URLs.
'''
self._file_system = file_system
self._base_path = base_path or self._base_path
class MockURLFetcher(object):
def __init__(self, fetcher):
self._fetcher = fetcher
self.Reset()
def Fetch(self, url, **kwargs):
self._fetch_count += 1
return self._fetcher.Fetch(url, **kwargs)
def FetchAsync(self, url, **kwargs):
self._fetch_async_count += 1
def next(result):
self._fetch_resolve_count += 1
return result
return self._fetcher.FetchAsync(url, **kwargs).Then(next)
def CheckAndReset(self,
fetch_count=0,
fetch_async_count=0,
fetch_resolve_count=0):
errors = []
for desc, expected, actual in (
('fetch_count', fetch_count, self._fetch_count),
('fetch_async_count', fetch_async_count, self._fetch_async_count),
('fetch_resolve_count', fetch_resolve_count,
self._fetch_resolve_count)):
if actual != expected:
errors.append('%s: expected %s got %s' % (desc, expected, actual))
try:
return (len(errors) == 0, ', '.join(errors))
finally:
self.Reset()
def Reset(self):
self._fetch_count = 0
self._fetch_async_count = 0
self._fetch_resolve_count = 0
|
ShySec/scrimmage-scoreboard
|
refs/heads/master
|
web2py/applications/scoreboard/languages/default.py
|
180
|
# coding: utf8
{
'!langcode!': 'en-us',
'!langname!': 'English (US)',
'%s %%(shop)': '%s %%(shop)',
'%s %%(shop[0])': '%s %%(shop[0])',
'%s %%{quark[0]}': '%s %%{quark[0]}',
'%s %%{shop[0]}': '%s %%{shop[0]}',
'%s %%{shop}': '%s %%{shop}',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'@markmin\x01**Hello World**': '**Hello World**',
'About': 'About',
'Access Control': 'Access Control',
'Administrative Interface': 'Administrative Interface',
'Ajax Recipes': 'Ajax Recipes',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Buy this book': 'Buy this book',
'Cannot be empty': 'Cannot be empty',
'Check to delete': 'Check to delete',
'Client IP': 'Client IP',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Created By': 'Created By',
'Created On': 'Created On',
'customize me!': 'customize me!',
'Database': 'Database',
'DB Model': 'DB Model',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Description',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'Download': 'Download',
'E-mail': 'E-mail',
'Email and SMS': 'Email and SMS',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'enter date and time as %(format)s': 'enter date and time as %(format)s',
'Errors': 'Errors',
'FAQ': 'FAQ',
'First name': 'First name',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Group %(group_id)s created': 'Group %(group_id)s created',
'Group ID': 'Group ID',
'Group uniquely assigned to user %(id)s': 'Group uniquely assigned to user %(id)s',
'Groups': 'Groups',
'Hello World': 'Hello World',
'Hello World ## comment': 'Hello World ',
'Hello World## comment': 'Hello World',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'Introduction': 'Introduction',
'Invalid email': 'Invalid email',
'Is Active': 'Is Active',
'Last name': 'Last name',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'Logged in': 'Logged in',
'Logged out': 'Logged out',
'Login': 'Login',
'Logout': 'Logout',
'Lost Password': 'Lost Password',
'Lost password?': 'Lost password?',
'Menu Model': 'Menu Model',
'Modified By': 'Modified By',
'Modified On': 'Modified On',
'My Sites': 'My Sites',
'Name': 'Name',
'Object or table name': 'Object or table name',
'Online examples': 'Online examples',
'Origin': 'Origin',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': 'Password',
"Password fields don't match": "Password fields don't match",
'please input your password again': 'please input your password again',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'Profile': 'Profile',
'Python': 'Python',
'Quick Examples': 'Quick Examples',
'Recipes': 'Recipes',
'Record ID': 'Record ID',
'Register': 'Register',
'Registration identifier': 'Registration identifier',
'Registration key': 'Registration key',
'Registration successful': 'Registration successful',
'Remember me (for 30 days)': 'Remember me (for 30 days)',
'Reset Password key': 'Reset Password key',
'Role': 'Role',
'Semantic': 'Semantic',
'Services': 'Services',
'Stylesheet': 'Stylesheet',
'Support': 'Support',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'This App': 'This App',
'Timestamp': 'Timestamp',
'Twitter': 'Twitter',
'User %(id)s Logged-in': 'User %(id)s Logged-in',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Registered': 'User %(id)s Registered',
'User ID': 'User ID',
'value already in database or empty': 'value already in database or empty',
'Verify Password': 'Verify Password',
'Videos': 'Videos',
'View': 'View',
'Welcome': 'Welcome',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
|
pmav99/praktoras
|
refs/heads/conmon-13
|
dogstatsd.py
|
1
|
#!/opt/conmon-agent/embedded/bin/python
# (C) Fractal Industries, Inc. 2016
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
"""
A Python Statsd implementation with some conmon special sauce.
"""
# set up logging before importing any other components
from config import initialize_logging # noqa
initialize_logging('dogstatsd')
from utils.proxy import set_no_proxy_settings # noqa
set_no_proxy_settings()
# stdlib
import logging
import optparse
import os
import select
import signal
import socket
import sys
import threading
from time import sleep, time
from urllib import urlencode
import zlib
# For pickle & PID files, see issue 293
os.umask(022)
# 3rd party
import requests
import simplejson as json
# project
from aggregator import get_formatter, MetricsBucketAggregator
from checks.check_status import DogstatsdStatus
from checks.metric_types import MetricTypes
from config import get_config, get_version
from daemon import AgentSupervisor, Daemon
from util import chunks, get_hostname, get_uuid, plural
from utils.pidfile import PidFile
# urllib3 logs a bunch of stuff at the info level
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.WARN)
requests_log.propagate = True
log = logging.getLogger('dogstatsd')
PID_NAME = "dogstatsd"
PID_DIR = None
# Dogstatsd constants in seconds
DOGSTATSD_FLUSH_INTERVAL = 10
DOGSTATSD_AGGREGATOR_BUCKET_SIZE = 10
WATCHDOG_TIMEOUT = 120
UDP_SOCKET_TIMEOUT = 5
# Since we call flush more often than the metrics aggregation interval, we should
# log a bunch of flushes in a row every so often.
FLUSH_LOGGING_PERIOD = 70
FLUSH_LOGGING_INITIAL = 10
FLUSH_LOGGING_COUNT = 5
EVENT_CHUNK_SIZE = 50
COMPRESS_THRESHOLD = 1024
def add_serialization_status_metric(status, hostname):
"""
Add a metric to track the number of metric serializations,
tagged by their status.
"""
interval = 10.0
value = 1
return {
'tags': ["status:{0}".format(status)],
'metric': 'conmon.dogstatsd.serialization_status',
'interval': interval,
'device_name': None,
'host': hostname,
'points': [(time(), value / interval)],
'type': MetricTypes.RATE,
}
def unicode_metrics(metrics):
for i, metric in enumerate(metrics):
for key, value in metric.items():
if isinstance(value, basestring):
metric[key] = unicode(value, errors='replace')
elif isinstance(value, tuple) or isinstance(value, list):
value_list = list(value)
for j, value_element in enumerate(value_list):
if isinstance(value_element, basestring):
value_list[j] = unicode(value_element, errors='replace')
metric[key] = tuple(value_list)
metrics[i] = metric
return metrics
def serialize_metrics(metrics, hostname):
try:
metrics.append(add_serialization_status_metric("success", hostname))
serialized = json.dumps({"series": metrics})
except UnicodeDecodeError as e:
log.exception("Unable to serialize payload. Trying to replace bad characters. %s", e)
metrics.append(add_serialization_status_metric("failure", hostname))
try:
log.error(metrics)
serialized = json.dumps({"series": unicode_metrics(metrics)})
except Exception as e:
log.exception("Unable to serialize payload. Giving up. %s", e)
serialized = json.dumps({"series": [add_serialization_status_metric("permanent_failure", hostname)]})
if len(serialized) > COMPRESS_THRESHOLD:
headers = {'Content-Type': 'application/json',
'Content-Encoding': 'deflate'}
serialized = zlib.compress(serialized)
else:
headers = {'Content-Type': 'application/json'}
return serialized, headers
def serialize_event(event):
return json.dumps(event)
class Reporter(threading.Thread):
"""
The reporter periodically sends the aggregated metrics to the
server.
"""
def __init__(self, interval, metrics_aggregator, api_host, api_key=None,
use_watchdog=False, event_chunk_size=None):
threading.Thread.__init__(self)
self.interval = int(interval)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.flush_count = 0
self.log_count = 0
self.hostname = get_hostname()
self.watchdog = None
if use_watchdog:
from util import Watchdog
self.watchdog = Watchdog(WATCHDOG_TIMEOUT)
self.api_key = api_key
self.api_host = api_host
self.event_chunk_size = event_chunk_size or EVENT_CHUNK_SIZE
def stop(self):
log.info("Stopping reporter")
self.finished.set()
def run(self):
log.info("Reporting to %s every %ss" % (self.api_host, self.interval))
log.debug("Watchdog enabled: %s" % bool(self.watchdog))
# Persist a start-up message.
DogstatsdStatus().persist()
while not self.finished.isSet(): # Use camel case isSet for 2.4 support.
self.finished.wait(self.interval)
self.metrics_aggregator.send_packet_count('conmon.dogstatsd.packet.count')
self.flush()
if self.watchdog:
self.watchdog.reset()
# Clean up the status messages.
log.debug("Stopped reporter")
DogstatsdStatus.remove_latest_status()
def flush(self):
try:
self.flush_count += 1
self.log_count += 1
packets_per_second = self.metrics_aggregator.packets_per_second(self.interval)
packet_count = self.metrics_aggregator.total_count
metrics = self.metrics_aggregator.flush()
count = len(metrics)
if self.flush_count % FLUSH_LOGGING_PERIOD == 0:
self.log_count = 0
if count:
self.submit(metrics)
events = self.metrics_aggregator.flush_events()
event_count = len(events)
if event_count:
self.submit_events(events)
service_checks = self.metrics_aggregator.flush_service_checks()
service_check_count = len(service_checks)
if service_check_count:
self.submit_service_checks(service_checks)
should_log = self.flush_count <= FLUSH_LOGGING_INITIAL or self.log_count <= FLUSH_LOGGING_COUNT
log_func = log.info
if not should_log:
log_func = log.debug
log_func("Flush #%s: flushed %s metric%s, %s event%s, and %s service check run%s" % (self.flush_count, count, plural(count), event_count, plural(event_count), service_check_count, plural(service_check_count)))
if self.flush_count == FLUSH_LOGGING_INITIAL:
log.info("First flushes done, %s flushes will be logged every %s flushes." % (FLUSH_LOGGING_COUNT, FLUSH_LOGGING_PERIOD))
# Persist a status message.
packet_count = self.metrics_aggregator.total_count
DogstatsdStatus(
flush_count=self.flush_count,
packet_count=packet_count,
packets_per_second=packets_per_second,
metric_count=count,
event_count=event_count,
service_check_count=service_check_count,
).persist()
except Exception:
if self.finished.isSet():
log.debug("Couldn't flush metrics, but that's expected as we're stopping")
else:
log.exception("Error flushing metrics")
def submit(self, metrics):
body, headers = serialize_metrics(metrics, self.hostname)
params = {}
if self.api_key:
params['api_key'] = self.api_key
url = '%s/api/v1/series?%s' % (self.api_host, urlencode(params))
self.submit_http(url, body, headers)
def submit_events(self, events):
headers = {'Content-Type':'application/json'}
event_chunk_size = self.event_chunk_size
for chunk in chunks(events, event_chunk_size):
payload = {
'apiKey': self.api_key,
'events': {
'api': chunk
},
'uuid': get_uuid(),
'internalHostname': get_hostname()
}
params = {}
if self.api_key:
params['api_key'] = self.api_key
url = '%s/intake?%s' % (self.api_host, urlencode(params))
self.submit_http(url, json.dumps(payload), headers)
def submit_http(self, url, data, headers):
headers["CM-Dogstatsd-Version"] = get_version()
log.debug("Posting payload to %s" % url)
try:
start_time = time()
r = requests.post(url, data=data, timeout=5, headers=headers)
r.raise_for_status()
if r.status_code >= 200 and r.status_code < 205:
log.debug("Payload accepted")
status = r.status_code
duration = round((time() - start_time) * 1000.0, 4)
log.debug("%s POST %s (%sms)" % (status, url, duration))
except Exception:
log.exception("Unable to post payload.")
try:
log.error("Received status code: {0}".format(r.status_code))
except Exception:
pass
def submit_service_checks(self, service_checks):
headers = {'Content-Type':'application/json'}
params = {}
if self.api_key:
params['api_key'] = self.api_key
url = '{0}/api/v1/check_run?{1}'.format(self.api_host, urlencode(params))
self.submit_http(url, json.dumps(service_checks), headers)
class Server(object):
"""
A statsd udp server.
"""
def __init__(self, metrics_aggregator, host, port, forward_to_host=None, forward_to_port=None):
self.host = host
self.port = int(port)
self.address = (self.host, self.port)
self.metrics_aggregator = metrics_aggregator
self.buffer_size = 1024 * 8
self.running = False
self.should_forward = forward_to_host is not None
self.forward_udp_sock = None
# In case we want to forward every packet received to another statsd server
if self.should_forward:
if forward_to_port is None:
forward_to_port = 8125
log.info("External statsd forwarding enabled. All packets received will be forwarded to %s:%s" % (forward_to_host, forward_to_port))
try:
self.forward_udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.forward_udp_sock.connect((forward_to_host, forward_to_port))
except Exception:
log.exception("Error while setting up connection to external statsd server")
def start(self):
""" Run the server. """
# Bind to the UDP socket.
# IPv4 only
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setblocking(0)
try:
self.socket.bind(self.address)
except socket.gaierror:
if self.address[0] == 'localhost':
log.warning("Warning localhost seems undefined in your host file, using 127.0.0.1 instead")
self.address = ('127.0.0.1', self.address[1])
self.socket.bind(self.address)
log.info('Listening on host & port: %s' % str(self.address))
# Inline variables for quick look-up.
buffer_size = self.buffer_size
aggregator_submit = self.metrics_aggregator.submit_packets
sock = [self.socket]
socket_recv = self.socket.recv
select_select = select.select
select_error = select.error
timeout = UDP_SOCKET_TIMEOUT
should_forward = self.should_forward
forward_udp_sock = self.forward_udp_sock
# Run our select loop.
self.running = True
while self.running:
try:
ready = select_select(sock, [], [], timeout)
if ready[0]:
message = socket_recv(buffer_size)
aggregator_submit(message)
if should_forward:
forward_udp_sock.send(message)
except select_error, se:
# Ignore interrupted system calls from sigterm.
errno = se[0]
if errno != 4:
raise
except (KeyboardInterrupt, SystemExit):
break
except Exception:
log.exception('Error receiving datagram')
def stop(self):
self.running = False
class Dogstatsd(Daemon):
""" This class is the dogstatsd daemon. """
def __init__(self, pid_file, server, reporter, autorestart):
Daemon.__init__(self, pid_file, autorestart=autorestart)
self.server = server
self.reporter = reporter
def _handle_sigterm(self, signum, frame):
log.debug("Caught sigterm. Stopping run loop.")
self.server.stop()
def run(self):
# Gracefully exit on sigterm.
signal.signal(signal.SIGTERM, self._handle_sigterm)
# Handle Keyboard Interrupt
signal.signal(signal.SIGINT, self._handle_sigterm)
# Start the reporting thread before accepting data
self.reporter.start()
try:
try:
self.server.start()
except Exception, e:
log.exception('Error starting server')
raise e
finally:
# The server will block until it's done. Once we're here, shutdown
# the reporting thread.
self.reporter.stop()
self.reporter.join()
log.info("Dogstatsd is stopped")
# Restart if asked to restart
if self.autorestart:
sys.exit(AgentSupervisor.RESTART_EXIT_STATUS)
@classmethod
def info(self):
logging.getLogger().setLevel(logging.ERROR)
return DogstatsdStatus.print_latest_status()
def init(config_path=None, use_watchdog=False, use_forwarder=False, args=None):
"""Configure the server and the reporting thread.
"""
c = get_config(parse_args=False, cfg_path=config_path)
if (not c['use_dogstatsd'] and
(args and args[0] in ['start', 'restart'] or not args)):
log.info("Dogstatsd is disabled. Exiting")
# We're exiting purposefully, so exit with zero (supervisor's expected
# code). HACK: Sleep a little bit so supervisor thinks we've started cleanly
# and thus can exit cleanly.
sleep(4)
sys.exit(0)
log.debug("Configuring dogstatsd")
port = c['dogstatsd_port']
interval = DOGSTATSD_FLUSH_INTERVAL
api_key = c['api_key']
aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE
non_local_traffic = c['non_local_traffic']
forward_to_host = c.get('statsd_forward_host')
forward_to_port = c.get('statsd_forward_port')
event_chunk_size = c.get('event_chunk_size')
recent_point_threshold = c.get('recent_point_threshold', None)
target = c['cm_url']
if use_forwarder:
target = c['dogstatsd_target']
hostname = get_hostname(c)
# Create the aggregator (which is the point of communication between the
# server and reporting threads.
assert 0 < interval
aggregator = MetricsBucketAggregator(
hostname,
aggregator_interval,
recent_point_threshold=recent_point_threshold,
formatter=get_formatter(c),
histogram_aggregates=c.get('histogram_aggregates'),
histogram_percentiles=c.get('histogram_percentiles'),
utf8_decoding=c['utf8_decoding']
)
# Start the reporting thread.
reporter = Reporter(interval, aggregator, target, api_key, use_watchdog, event_chunk_size)
# Start the server on an IPv4 stack
# Default to loopback
server_host = c['bind_host']
# If specified, bind to all addressses
if non_local_traffic:
server_host = ''
server = Server(aggregator, server_host, port, forward_to_host=forward_to_host, forward_to_port=forward_to_port)
return reporter, server, c
def main(config_path=None):
""" The main entry point for the unix version of dogstatsd. """
# Deprecation notice
from utils.deprecations import deprecate_old_command_line_tools
deprecate_old_command_line_tools()
COMMANDS_START_DOGSTATSD = [
'start',
'stop',
'restart',
'status'
]
parser = optparse.OptionParser("%prog [start|stop|restart|status]")
parser.add_option('-u', '--use-local-forwarder', action='store_true',
dest="use_forwarder", default=False)
opts, args = parser.parse_args()
if not args or args[0] in COMMANDS_START_DOGSTATSD:
reporter, server, cnf = init(config_path, use_watchdog=True, use_forwarder=opts.use_forwarder, args=args)
daemon = Dogstatsd(PidFile(PID_NAME, PID_DIR).get_path(), server, reporter,
cnf.get('autorestart', False))
# If no args were passed in, run the server in the foreground.
if not args:
daemon.start(foreground=True)
return 0
# Otherwise, we're process the deamon command.
else:
command = args[0]
if command == 'start':
daemon.start()
elif command == 'stop':
daemon.stop()
elif command == 'restart':
daemon.restart()
elif command == 'status':
daemon.status()
elif command == 'info':
return Dogstatsd.info()
else:
sys.stderr.write("Unknown command: %s\n\n" % command)
parser.print_help()
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
lilfolr/CITS4406-Assignment2
|
refs/heads/master
|
template.py
|
1
|
"""Provide a base HTML template variable for population with appropriate
statistics in the report.py module.
"""
base_template = \
"""
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css">
<link rel="stylesheet" href="/static/report/main.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.4/jquery.min.js"></script>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script src="/static/report/main.js"></script>
<script>
init();
{chart_data}
</script>
<title>Analysis Report </title>
</head>
<body>
<nav id="initialNavBar" class="navbar navbar-inverse navbar-fixed-top">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href={previous}>Back</a>
</div>
<div id="navbar" class="collapse navbar-collapse">
<ul class="nav navbar-nav">
<li><a href="#invalid">Invalid/Empty</a></li>
<li><a href="#numerical">Numerical</a></li>
<li><a href="#string">String</a></li>
<li><a href="#boolean">Boolean</a></li>
<li><a href="#enum">Enum</a></li>
<li><a href="#identifier">Identifier</a></li>
<li><a href="#datetime">Date Time</a></li>
<li><a href="#date">Date</a></li>
<li><a href="#time">Time</a></li>
<li><a href="#day">Day</a></li>
<li><a href="#email">Email</a></li>
<li><a href="#char">Character</a></li>
<li><a href="#hyper">Hyperlink</a></li>
<li><a href="#currency">Currency</a></li>
</ul>
</div>
</div>
</nav>
<br> <br>
<br> <br>
<br> <br>
<div class="container">
<h1>Analysis Report of {filename}</h1>
</div>
<div class="container">
<div class="row">
<hr id="invalid"/>
<div class="col-md-6">
<h2 class="titleRow">Invalid Rows ({len_invalid_rows})</h2>
<p>These rows contain either too many or too few columns.</p>
{invalid_rows}
</div>
<div class="col-md-6">
<h2 class="titleRow">Empty Columns ({len_empty_columns})</h2>
<p>These columns contain >= 90% empty values.</p>
{empty_columns}
</div>
<div class="col-md-6">
<h2 class="titleRow">Anomaly Cells ({len_error_columns})</h2>
<p>These cells contain invalid values.</p>
{error_columns}
</div>
<div class="col-md-6">
<h2 class="titleRow">Delimiter</h2>
<p>This file contains the delimiter type:</p>
<h4><b>{delimiter_type}</b></h4>
</div>
<div class="col-md-6">
<h2 class="titleRow">Columns ({num_columns})</h2>
<p>Detected columns and their determined type.<p>
{column_details}
</div>
</div>
</div>
<div class="container">
<div class="row">
<div class="col-md-12">
<h2 class="titleRow" id="col_analysis">Column Analysis (Based on {len_columns} rows)</h2>
<h2 class="titleRow" id="charts_header">Charts</h2>
<p>Click 'Show Data' at the end of the table to view a chart. (Disabled for offline reports)</p>
<p>Note: If numerical/currency data contains more than 10,000 values only the top 10,000 will be displayed in the chart.</p>
<h4>Showing chart for column:</h4>
<div id="Stats_Chart_data" class='hidden'>data here</div>
<div id="Stats_Chart" class='hidden' style="width: 900px; height: 500px;"></div>
<hr id="numerical"/>
<h2 class="titleRow">Numerical</h2>
<table class="table table-bordered table-hover">
<tr>
<th style="width:100px">Column</th>
<th>Most Common (Top 5)</th>
<th>Least Common (Top 5)</th>
<th>Unique Items</th>
<th>Range</th>
<th>Distribution</th>
<th>Quartiles</th>
<th>Outliers</th>
</tr>
{numerical_analysis}
</table>
<hr id="string"/>
<h2 class="titleRow">String</h2>
<table class="table table-bordered table-hover">
<tr>
<th>Column</th>
<th>Mode</th>
<th>Most Common (Top 5)</th>
<th>Least Common (Top 5)</th>
<th>Unique Items</th>
</tr>
{string_analysis}
</table>
<hr id="boolean"/>
<h2 class="titleRow">Boolean</h2>
<table class="table table-bordered table-hover">
<tr>
<th>Column</th>
<th>Mode</th>
<th>Most Common (Top 5)</th>
<th>Least Common (Top 5)</th>
<th>Unique Items</th>
<th>Total "True"</th>
<th>Total "False"</th>
<th>Total "Yes"</th>
<th>Total "No"</th>
<th>Total Boolean Values</th>
</tr>
{boolean_analysis}
</table>
<hr id="enum"/>
<h2 class="titleRow">Categorised (Enumerated)</h2>
<table class="table table-bordered table-hover">
<tr>
<th>Column</th>
<th>Mode</th>
<th>Most Common (Top 5)</th>
<th>Least Common (Top 5)</th>
<th>Unique Items</th>
</tr>
{enum_analysis}
</table>
<hr id="identifier"/>
<h2 class="titleRow">Identifier</h2>
<table class="table table-bordered table-hover">
<tr>
<th>Column</th>
<th>Mode</th>
<th>Most Common (Top 5)</th>
<th>Least Common (Top 5)</th>
<th>Unique Items</th>
</tr>
{identifier_analysis}
</table>
<hr id="datetime"/>
<h2 class="titleRow">Datetime</h2>
<table class="table table-bordered table-hover">
<tr>
<th>Column</th>
<th>Mode</th>
<th>Most Common (Top 5)</th>
<th>Least Common (Top 5)</th>
<th>Unique Items</th>
</tr>
{datetime_analysis}
</table>
<hr id="date"/>
<h2 class="titleRow">Date</h2>
<table class="table table-bordered table-hover">
<tr>
<th>Column</th>
<th>Mode</th>
<th>Most Common (Top 5)</th>
<th>Least Common (Top 5)</th>
<th>Unique Items</th>
<th>Dec - Feb Tally</th>
<th>Mar - May Tally</th>
<th>Jun - Aug Tally</th>
<th>Sep - Nov Tally</th>
</tr>
{date_analysis}
</table>
<hr id="time"/>
<h2 class="titleRow">Time</h2>
<table class="table table-bordered table-hover">
<tr>
<th>Column</th>
<th>Mode</th>
<th>Most Common (Top 5)</th>
<th>Least Common (Top 5)</th>
<th>Unique Items</th>
<th>Most Common Hours (Top 5) (hour,count)</th>
<th>Least Common Hours (Top 5) (hour,count)</th>
</tr>
{time_analysis}
</table>
<hr id="day"/>
<h2 class="titleRow">Day</h2>
<table class="table table-bordered table-hover">
<tr>
<th>Column</th>
<th>Mode</th>
<th>Most Common (Top 5)</th>
<th>Least Common (Top 5)</th>
<th>Unique Items</th>
</tr>
{day_analysis}
</table>
<hr id="email"/>
<h2 class="titleRow">Email</h2>
<table class="table table-bordered table-hover">
<tr>
<th>Column</th>
<th>Mode</th>
<th>Most Common (Top 5)</th>
<th>Least Common (Top 5)</th>
<th>Unique Items</th>
</tr>
{email_analysis}
</table>
<hr id="char"/>
<h2 class="titleRow">Character</h2>
<table class="table table-bordered table-hover">
<tr>
<th>Column</th>
<th>Mode</th>
<th>Most Common (Top 5)</th>
<th>Least Common (Top 5)</th>
<th>Unique Items</th>
</tr>
{char_analysis}
</table>
<hr id="hyper"/>
<h2 class="titleRow">Hyperlink</h2>
<table class="table table-bordered table-hover">
<tr>
<th>Column</th>
<th>Mode</th>
<th>Most Common (Top 5)</th>
<th>Least Common (Top 5)</th>
<th>Unique Items</th>
</tr>
{hyper_analysis}
</table>
<hr id="currency"/>
<h2 class="titleRow">Currency</h2>
<table class="table table-bordered table-hover">
<tr>
<th>Column</th>
<th>Min</th>
<th>Max</th>
<th>Mode</th>
<th>Mean</th>
<th>Median Low</th>
<th>Median</th>
<th>Median High</th>
<th>Standard Deviation</th>
<th>Outliers</th>
<th>Most Common (Top 5)</th>
<th>Least Common (Top 5)</th>
<th>Unique Items</th>
</tr>
{currency_analysis}
</table>
</div>
</div>
</div>
<script src="https://code.jquery.com/jquery-2.1.4.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
</body>
"""
|
horance-liu/tensorflow
|
refs/heads/master
|
tensorflow/contrib/eager/python/examples/rnn_ptb/rnn_ptb_test.py
|
61
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for PTBModel with eager execution enabled."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
from tensorflow.contrib.eager.python.examples.rnn_ptb import rnn_ptb
def device():
return "/device:GPU:0" if tfe.num_gpus() else "/device:CPU:0"
class PTBTest(tf.test.TestCase):
def testTrain(self):
model = rnn_ptb.test_model(tfe.num_gpus() > 0)
sequence_length = 35
data = np.ones([4 * sequence_length, 20], dtype=np.int64)
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(1.0)
# Train two epochs
rnn_ptb.train(model, optimizer, data, sequence_length, 0.25)
rnn_ptb.train(model, optimizer, data, sequence_length, 0.25)
def testApply(self):
model = rnn_ptb.test_model(tfe.num_gpus() > 0)
with tf.device(device()):
model(tf.ones([35, 20], dtype=tf.int64), training=False)
def force_gpu_sync():
if tfe.num_gpus():
tf.constant(1).gpu().cpu()
class PTBBenchmark(tf.test.Benchmark):
BATCH_SIZE = 20
SEQ_LEN = 35
def _report(self, label, start, num_iters, dev, batch_size):
wall_time = (time.time() - start) / num_iters
dev = "cpu" if "cpu" in dev.lower() else "gpu"
name = "%s_%s_batch_%d" % (label, dev, batch_size)
examples_per_sec = batch_size / wall_time
self.report_benchmark(
iters=num_iters,
wall_time=wall_time,
name=name,
extras={
"examples_per_sec": examples_per_sec
})
def _benchmark_apply(self, label, model):
with tf.device(device()):
sequence_batch = tf.ones(
[PTBBenchmark.SEQ_LEN, PTBBenchmark.BATCH_SIZE], dtype=tf.int64)
for _ in range(10): # Warmup
model(sequence_batch, training=False).cpu()
gc.collect()
start = time.time()
iters = 100
for _ in range(iters):
model(sequence_batch, training=False).cpu()
self._report(label, start, iters, device(), int(sequence_batch.shape[1]))
def benchmark_apply_small(self):
self._benchmark_apply("eager_apply_small", rnn_ptb.small_model(False))
def benchmark_apply_large(self):
self._benchmark_apply("eager_apply_large", rnn_ptb.large_model(False))
def benchmark_cudnn_apply_small(self):
if not tfe.num_gpus():
return
self._benchmark_apply("eager_cudnn_apply_small", rnn_ptb.small_model(True))
def benchmark_cudnn_apply_large(self):
if not tfe.num_gpus():
return
self._benchmark_apply("eager_cudnn_apply_large", rnn_ptb.large_model(True))
def _benchmark_train(self, label, model):
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(1.)
def model_loss(inputs, targets):
return rnn_ptb.loss_fn(model, inputs, targets, training=True)
grads = tfe.implicit_gradients(model_loss)
sequence_batch = tf.ones(
[PTBBenchmark.SEQ_LEN, PTBBenchmark.BATCH_SIZE], dtype=tf.int64)
def step():
optimizer.apply_gradients(
rnn_ptb.clip_gradients(grads(sequence_batch, sequence_batch), 0.25))
for _ in range(10): # Warmup
step()
force_gpu_sync()
gc.collect()
start = time.time()
iters = 100
for _ in range(iters):
step()
force_gpu_sync()
self._report(label, start, iters, device(), int(sequence_batch.shape[1]))
def benchmark_train_small(self):
self._benchmark_train("eager_train_small", rnn_ptb.small_model(False))
def benchmark_train_large(self):
self._benchmark_train("eager_train_large", rnn_ptb.large_model(False))
def benchmark_cudnn_train_small(self):
if not tfe.num_gpus():
return
self._benchmark_train("eager_cudnn_train_small", rnn_ptb.small_model(True))
def benchmark_cudnn_train_large(self):
if not tfe.num_gpus():
return
self._benchmark_train("eager_cudnn_train_large", rnn_ptb.large_model(True))
if __name__ == "__main__":
tfe.enable_eager_execution()
tf.test.main()
|
dgjustice/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/misc/serverless.py
|
18
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Ryan Scott Brown <ryansb@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: serverless
short_description: Manages a Serverless Framework project
description:
- Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks.
version_added: "2.3"
options:
state:
choices: ['present', 'absent']
description:
- Goal state of given stage/project
required: false
default: present
service_path:
description:
- The path to the root of the Serverless Service to be operated on.
required: true
functions:
description:
- A list of specific functions to deploy. If this is not provided, all functions in the service will be deployed.
required: false
default: []
region:
description:
- AWS region to deploy the service to
required: false
default: us-east-1
deploy:
description:
- Whether or not to deploy artifacts after building them. When this option is `false` all the functions will be built, but no stack update will be run to send them out. This is mostly useful for generating artifacts to be stored/deployed elsewhere.
required: false
default: true
notes:
- Currently, the `serverless` command must be in the path of the node executing the task. In the future this may be a flag.
requirements: [ "serverless" ]
author: "Ryan Scott Brown @ryansb"
'''
EXAMPLES = """
# Basic deploy of a service
- serverless:
service_path: '{{ project_dir }}'
state: present
# Deploy specific functions
- serverless:
service_path: '{{ project_dir }}'
functions:
- my_func_one
- my_func_two
# deploy a project, then pull its resource list back into Ansible
- serverless:
stage: dev
region: us-east-1
service_path: '{{ project_dir }}'
register: sls
# The cloudformation stack is always named the same as the full service, so the
# cloudformation_facts module can get a full list of the stack resources, as
# well as stack events and outputs
- cloudformation_facts:
region: us-east-1
stack_name: '{{ sls.service_name }}'
stack_resources: true
"""
RETURN = """
service_name:
type: string
description: Most
returned: always
sample: my-fancy-service-dev
state:
type: string
description: Whether the stack for the serverless project is present/absent.
returned: always
command:
type: string
description: Full `serverless` command run by this module, in case you want to re-run the command outside the module.
returned: always
sample: serverless deploy --stage production
"""
import os
import traceback
import yaml
def read_serverless_config(module):
path = module.params.get('service_path')
try:
with open(os.path.join(path, 'serverless.yml')) as sls_config:
config = yaml.safe_load(sls_config.read())
return config
except IOError as e:
module.fail_json(msg="Could not open serverless.yml in {}. err: {}".format(path, str(e)), exception=traceback.format_exc())
module.fail_json(msg="Failed to open serverless config at {}".format(
os.path.join(path, 'serverless.yml')))
def get_service_name(module, stage):
config = read_serverless_config(module)
if config.get('service') is None:
module.fail_json(msg="Could not read `service` key from serverless.yml file")
if stage:
return "{}-{}".format(config['service'], stage)
return "{}-{}".format(config['service'], config.get('stage', 'dev'))
def main():
module = AnsibleModule(
argument_spec=dict(
service_path = dict(required=True, type='path'),
state = dict(default='present', choices=['present', 'absent'], required=False),
functions = dict(type='list', required=False),
region = dict(default='', required=False),
stage = dict(default='', required=False),
deploy = dict(default=True, type='bool', required=False),
),
)
service_path = module.params.get('service_path')
state = module.params.get('state')
functions = module.params.get('functions')
region = module.params.get('region')
stage = module.params.get('stage')
deploy = module.params.get('deploy', True)
command = "serverless "
if state == 'present':
command += 'deploy '
elif state == 'absent':
command += 'remove '
else:
module.fail_json(msg="State must either be 'present' or 'absent'. Received: {}".format(state))
if not deploy and state == 'present':
command += '--noDeploy '
if region:
command += '--region {} '.format(region)
if stage:
command += '--stage {} '.format(stage)
rc, out, err = module.run_command(command, cwd=service_path)
if rc != 0:
if state == 'absent' and "-{}' does not exist".format(stage) in out:
module.exit_json(changed=False, state='absent', command=command,
out=out, service_name=get_service_name(module, stage))
module.fail_json(msg="Failure when executing Serverless command. Exited {}.\nstdout: {}\nstderr: {}".format(rc, out, err))
# gather some facts about the deployment
module.exit_json(changed=True, state='present', out=out, command=command,
service_name=get_service_name(module, stage))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
bowang/tensorflow
|
refs/heads/master
|
tensorflow/contrib/opt/__init__.py
|
22
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing optimization routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.opt.python.training.drop_stale_gradient_optimizer import *
from tensorflow.contrib.opt.python.training.external_optimizer import *
from tensorflow.contrib.opt.python.training.lazy_adam_optimizer import *
from tensorflow.contrib.opt.python.training.nadam_optimizer import *
from tensorflow.contrib.opt.python.training.moving_average_optimizer import *
from tensorflow.contrib.opt.python.training.nadam_optimizer import *
from tensorflow.contrib.opt.python.training.variable_clipping_optimizer import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'DelayCompensatedGradientDescentOptimizer',
'DropStaleGradientOptimizer', 'ExternalOptimizerInterface',
'LazyAdamOptimizer', 'NadamOptimizer', 'MovingAverageOptimizer',
'ScipyOptimizerInterface', 'VariableClippingOptimizer'
]
remove_undocumented(__name__, _allowed_symbols)
|
leafclick/intellij-community
|
refs/heads/master
|
python/testData/postfix/isNone/complexExpression_after.py
|
39
|
def f(a, b, c):
if (a + b) * c is None:
<caret>
|
roehm/espresso
|
refs/heads/lbgpu
|
testsuite/configs/check_myconfig_complete.py
|
3
|
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Check whether all features used in the code are defined
#
import sys, os, re, fileinput
sys.path.append(os.path.join(sys.path[0], '..', '..', 'config'))
import featuredefs
if len(sys.argv) < 3:
print "Usage: %s DEFFILE [FILE...]" % sys.argv[0]
exit(2)
print "Checking for completeness of features in test configurations..."
fdefs = featuredefs.defs(sys.argv[1])
featurefound = set()
featurere = re.compile('^#define (\w+)')
for line in fileinput.input(sys.argv[2:]):
res = featurere.match(line)
if res is not None:
feature = res.group(1)
featurefound.add(feature)
unused = fdefs.features.difference(featurefound)
unused = unused.difference(fdefs.notestfeatures)
if len(unused) > 0:
for feature in unused:
print "check_myconfig_complete: %s is not used" % feature
else:
print "check_myconfig_complete: All features are used!"
|
devendermishrajio/nova_test_latest
|
refs/heads/master
|
nova/tests/unit/test_nova_manage.py
|
30
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import StringIO
import sys
import fixtures
import mock
from nova.cmd import manage
from nova import context
from nova import db
from nova.db import migration
from nova.db.sqlalchemy import migration as sqla_migration
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.db import fakes as db_fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_network
from nova.tests.unit import test_flavors
class FixedIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FixedIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = manage.FixedIpCommands()
def test_reserve(self):
self.commands.reserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], True)
def test_reserve_nonexistent_address(self):
self.assertEqual(2, self.commands.reserve('55.55.55.55'))
def test_unreserve(self):
self.commands.unreserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], False)
def test_unreserve_nonexistent_address(self):
self.assertEqual(2, self.commands.unreserve('55.55.55.55'))
def test_list(self):
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO()))
self.commands.list()
self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
def test_list_just_one_host(self):
def fake_fixed_ip_get_by_host(*args, **kwargs):
return [db_fakes.fixed_ip_fields]
self.useFixture(fixtures.MonkeyPatch(
'nova.db.fixed_ip_get_by_host',
fake_fixed_ip_get_by_host))
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO()))
self.commands.list('banana')
self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
class FloatingIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FloatingIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = manage.FloatingIpCommands()
def test_address_to_hosts(self):
def assert_loop(result, expected):
for ip in result:
self.assertIn(str(ip), expected)
address_to_hosts = self.commands.address_to_hosts
# /32 and /31
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/32')
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/31')
# /30
expected = ["192.168.100.%s" % i for i in range(1, 3)]
result = address_to_hosts('192.168.100.0/30')
self.assertEqual(2, len(list(result)))
assert_loop(result, expected)
# /29
expected = ["192.168.100.%s" % i for i in range(1, 7)]
result = address_to_hosts('192.168.100.0/29')
self.assertEqual(6, len(list(result)))
assert_loop(result, expected)
# /28
expected = ["192.168.100.%s" % i for i in range(1, 15)]
result = address_to_hosts('192.168.100.0/28')
self.assertEqual(14, len(list(result)))
assert_loop(result, expected)
# /16
result = address_to_hosts('192.168.100.0/16')
self.assertEqual(65534, len(list(result)))
# NOTE(dripton): I don't test /13 because it makes the test take 3s.
# /12 gives over a million IPs, which is ridiculous.
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/12')
class NetworkCommandsTestCase(test.TestCase):
def setUp(self):
super(NetworkCommandsTestCase, self).setUp()
self.commands = manage.NetworkCommands()
self.net = {'id': 0,
'label': 'fake',
'injected': False,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::/64',
'multi_host': False,
'gateway_v6': 'dead:beef::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '8.8.8.8',
'dns2': '8.8.4.4',
'vlan': 200,
'vlan_start': 201,
'vpn_public_address': '10.0.0.2',
'vpn_public_port': '2222',
'vpn_private_address': '192.168.0.2',
'dhcp_start': '192.168.0.3',
'project_id': 'fake_project',
'host': 'fake_host',
'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
def fake_network_get_by_cidr(context, cidr):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(cidr, self.fake_net['cidr'])
return db_fakes.FakeModel(dict(test_network.fake_network,
**self.fake_net))
def fake_network_get_by_uuid(context, uuid):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(uuid, self.fake_net['uuid'])
return db_fakes.FakeModel(dict(test_network.fake_network,
**self.fake_net))
def fake_network_update(context, network_id, values):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.assertEqual(values, self.fake_update_value)
self.fake_network_get_by_cidr = fake_network_get_by_cidr
self.fake_network_get_by_uuid = fake_network_get_by_uuid
self.fake_network_update = fake_network_update
def test_create(self):
def fake_create_networks(obj, context, **kwargs):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(kwargs['label'], 'Test')
self.assertEqual(kwargs['cidr'], '10.2.0.0/24')
self.assertEqual(kwargs['multi_host'], False)
self.assertEqual(kwargs['num_networks'], 1)
self.assertEqual(kwargs['network_size'], 256)
self.assertEqual(kwargs['vlan'], 200)
self.assertEqual(kwargs['vlan_start'], 201)
self.assertEqual(kwargs['vpn_start'], 2000)
self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
self.assertEqual(kwargs['gateway'], '10.2.0.1')
self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
self.assertEqual(kwargs['bridge'], 'br200')
self.assertEqual(kwargs['bridge_interface'], 'eth0')
self.assertEqual(kwargs['dns1'], '8.8.8.8')
self.assertEqual(kwargs['dns2'], '8.8.4.4')
self.flags(network_manager='nova.network.manager.VlanManager')
from nova.network import manager as net_manager
self.stubs.Set(net_manager.VlanManager, 'create_networks',
fake_create_networks)
self.commands.create(
label='Test',
cidr='10.2.0.0/24',
num_networks=1,
network_size=256,
multi_host='F',
vlan=200,
vlan_start=201,
vpn_start=2000,
cidr_v6='fd00:2::/120',
gateway='10.2.0.1',
gateway_v6='fd00:2::22',
bridge='br200',
bridge_interface='eth0',
dns1='8.8.8.8',
dns2='8.8.4.4',
uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
def test_list(self):
def fake_network_get_all(context):
return [db_fakes.FakeModel(self.net)]
self.stubs.Set(db, 'network_get_all', fake_network_get_all)
output = StringIO()
sys.stdout = output
self.commands.list()
sys.stdout = sys.__stdout__
result = output.getvalue()
_fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s",
"%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s",
"%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"])
head = _fmt % {'id': 'id',
'cidr': 'IPv4',
'cidr_v6': 'IPv6',
'dhcp_start': 'start address',
'dns1': 'DNS1',
'dns2': 'DNS2',
'vlan': 'VlanID',
'project_id': 'project',
'uuid': "uuid"}
body = _fmt % {'id': self.net['id'],
'cidr': self.net['cidr'],
'cidr_v6': self.net['cidr_v6'],
'dhcp_start': self.net['dhcp_start'],
'dns1': self.net['dns1'],
'dns2': self.net['dns2'],
'vlan': self.net['vlan'],
'project_id': self.net['project_id'],
'uuid': self.net['uuid']}
answer = '%s\n%s\n' % (head, body)
self.assertEqual(result, answer)
def test_delete(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_uuid',
self.fake_network_get_by_uuid)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(uuid=self.fake_net['uuid'])
def test_delete_by_cidr(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(fixed_range=self.fake_net['cidr'])
def _test_modify_base(self, update_value, project, host, dis_project=None,
dis_host=None):
self.fake_net = self.net
self.fake_update_value = update_value
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
self.stubs.Set(db, 'network_update', self.fake_network_update)
self.commands.modify(self.fake_net['cidr'], project=project, host=host,
dis_project=dis_project, dis_host=dis_host)
def test_modify_associate(self):
self._test_modify_base(update_value={'project_id': 'test_project',
'host': 'test_host'},
project='test_project', host='test_host')
def test_modify_unchanged(self):
self._test_modify_base(update_value={}, project=None, host=None)
def test_modify_disassociate(self):
self._test_modify_base(update_value={'project_id': None, 'host': None},
project=None, host=None, dis_project=True,
dis_host=True)
class NeutronV2NetworkCommandsTestCase(test.TestCase):
def setUp(self):
super(NeutronV2NetworkCommandsTestCase, self).setUp()
self.flags(network_api_class='nova.network.neutronv2.api.API')
self.commands = manage.NetworkCommands()
def test_create(self):
self.assertEqual(2, self.commands.create())
def test_list(self):
self.assertEqual(2, self.commands.list())
def test_delete(self):
self.assertEqual(2, self.commands.delete())
def test_modify(self):
self.assertEqual(2, self.commands.modify('192.168.0.1'))
class ProjectCommandsTestCase(test.TestCase):
def setUp(self):
super(ProjectCommandsTestCase, self).setUp()
self.commands = manage.ProjectCommands()
def test_quota(self):
output = StringIO()
sys.stdout = output
self.commands.quota(project_id='admin',
key='instances',
value='unlimited',
)
sys.stdout = sys.__stdout__
result = output.getvalue()
print_format = "%-36s %-10s" % ('instances', 'unlimited')
self.assertIn(print_format, result)
def test_quota_update_invalid_key(self):
self.assertEqual(2, self.commands.quota('admin', 'volumes1', '10'))
class VmCommandsTestCase(test.TestCase):
def setUp(self):
super(VmCommandsTestCase, self).setUp()
self.commands = manage.VmCommands()
self.fake_flavor = objects.Flavor(**test_flavors.DEFAULT_FLAVORS[0])
def test_list_without_host(self):
output = StringIO()
sys.stdout = output
with mock.patch.object(objects.InstanceList, 'get_by_filters') as get:
get.return_value = objects.InstanceList(
objects=[fake_instance.fake_instance_obj(
context.get_admin_context(), host='foo-host',
flavor=self.fake_flavor,
system_metadata={})])
self.commands.list()
sys.stdout = sys.__stdout__
result = output.getvalue()
self.assertIn('node', result) # check the header line
self.assertIn('m1.tiny', result) # flavor.name
self.assertIn('foo-host', result)
def test_list_with_host(self):
output = StringIO()
sys.stdout = output
with mock.patch.object(objects.InstanceList, 'get_by_host') as get:
get.return_value = objects.InstanceList(
objects=[fake_instance.fake_instance_obj(
context.get_admin_context(),
flavor=self.fake_flavor,
system_metadata={})])
self.commands.list(host='fake-host')
sys.stdout = sys.__stdout__
result = output.getvalue()
self.assertIn('node', result) # check the header line
self.assertIn('m1.tiny', result) # flavor.name
self.assertIn('fake-host', result)
class DBCommandsTestCase(test.TestCase):
def setUp(self):
super(DBCommandsTestCase, self).setUp()
self.commands = manage.DbCommands()
def test_archive_deleted_rows_negative(self):
self.assertEqual(1, self.commands.archive_deleted_rows(-1))
@mock.patch.object(migration, 'db_null_instance_uuid_scan',
return_value={'foo': 0})
def test_null_instance_uuid_scan_no_records_found(self, mock_scan):
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO()))
self.commands.null_instance_uuid_scan()
self.assertIn("There were no records found", sys.stdout.getvalue())
@mock.patch.object(migration, 'db_null_instance_uuid_scan',
return_value={'foo': 1, 'bar': 0})
def _test_null_instance_uuid_scan(self, mock_scan, delete):
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO()))
self.commands.null_instance_uuid_scan(delete)
output = sys.stdout.getvalue()
if delete:
self.assertIn("Deleted 1 records from table 'foo'.", output)
self.assertNotIn("Deleted 0 records from table 'bar'.", output)
else:
self.assertIn("1 records in the 'foo' table", output)
self.assertNotIn("0 records in the 'bar' table", output)
self.assertNotIn("There were no records found", output)
def test_null_instance_uuid_scan_readonly(self):
self._test_null_instance_uuid_scan(delete=False)
def test_null_instance_uuid_scan_delete(self):
self._test_null_instance_uuid_scan(delete=True)
@mock.patch.object(sqla_migration, 'db_version', return_value=2)
def test_version(self, sqla_migrate):
self.commands.version()
sqla_migrate.assert_called_once_with(database='main')
@mock.patch.object(sqla_migration, 'db_sync')
def test_sync(self, sqla_sync):
self.commands.sync(version=4)
sqla_sync.assert_called_once_with(version=4, database='main')
class ApiDbCommandsTestCase(test.TestCase):
def setUp(self):
super(ApiDbCommandsTestCase, self).setUp()
self.commands = manage.ApiDbCommands()
@mock.patch.object(sqla_migration, 'db_version', return_value=2)
def test_version(self, sqla_migrate):
self.commands.version()
sqla_migrate.assert_called_once_with(database='api')
@mock.patch.object(sqla_migration, 'db_sync')
def test_sync(self, sqla_sync):
self.commands.sync(version=4)
sqla_sync.assert_called_once_with(version=4, database='api')
class ServiceCommandsTestCase(test.TestCase):
def setUp(self):
super(ServiceCommandsTestCase, self).setUp()
self.commands = manage.ServiceCommands()
def test_service_enable_invalid_params(self):
self.assertEqual(2, self.commands.enable('nohost', 'noservice'))
def test_service_disable_invalid_params(self):
self.assertEqual(2, self.commands.disable('nohost', 'noservice'))
class CellCommandsTestCase(test.TestCase):
def setUp(self):
super(CellCommandsTestCase, self).setUp()
self.commands = manage.CellCommands()
def test_create_transport_hosts_multiple(self):
"""Test the _create_transport_hosts method
when broker_hosts is set.
"""
brokers = "127.0.0.1:5672,127.0.0.2:5671"
thosts = self.commands._create_transport_hosts(
'guest', 'devstack',
broker_hosts=brokers)
self.assertEqual(2, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(5672, thosts[0].port)
self.assertEqual('127.0.0.2', thosts[1].hostname)
self.assertEqual(5671, thosts[1].port)
def test_create_transport_hosts_single(self):
"""Test the _create_transport_hosts method when hostname is passed."""
thosts = self.commands._create_transport_hosts('guest', 'devstack',
hostname='127.0.0.1',
port=80)
self.assertEqual(1, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(80, thosts[0].port)
def test_create_transport_hosts_single_broker(self):
"""Test the _create_transport_hosts method for single broker_hosts."""
thosts = self.commands._create_transport_hosts(
'guest', 'devstack',
broker_hosts='127.0.0.1:5672')
self.assertEqual(1, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(5672, thosts[0].port)
def test_create_transport_hosts_both(self):
"""Test the _create_transport_hosts method when both broker_hosts
and hostname/port are passed.
"""
thosts = self.commands._create_transport_hosts(
'guest', 'devstack',
broker_hosts='127.0.0.1:5672',
hostname='127.0.0.2', port=80)
self.assertEqual(1, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(5672, thosts[0].port)
def test_create_transport_hosts_wrong_val(self):
"""Test the _create_transport_hosts method when broker_hosts
is wrongly sepcified
"""
self.assertRaises(ValueError,
self.commands._create_transport_hosts,
'guest', 'devstack',
broker_hosts='127.0.0.1:5672,127.0.0.1')
def test_create_transport_hosts_wrong_port_val(self):
"""Test the _create_transport_hosts method when port in
broker_hosts is wrongly sepcified
"""
self.assertRaises(ValueError,
self.commands._create_transport_hosts,
'guest', 'devstack',
broker_hosts='127.0.0.1:')
def test_create_transport_hosts_wrong_port_arg(self):
"""Test the _create_transport_hosts method when port
argument is wrongly sepcified
"""
self.assertRaises(ValueError,
self.commands._create_transport_hosts,
'guest', 'devstack',
hostname='127.0.0.1', port='ab')
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(db, 'cell_create')
def test_create_broker_hosts(self, mock_db_cell_create, mock_ctxt):
"""Test the create function when broker_hosts is
passed
"""
cell_tp_url = "fake://guest:devstack@127.0.0.1:5432"
cell_tp_url += ",guest:devstack@127.0.0.2:9999/"
ctxt = mock.sentinel
mock_ctxt.return_value = mock.sentinel
self.commands.create("test",
broker_hosts='127.0.0.1:5432,127.0.0.2:9999',
woffset=0, wscale=0,
username="guest", password="devstack")
exp_values = {'name': "test",
'is_parent': False,
'transport_url': cell_tp_url,
'weight_offset': 0.0,
'weight_scale': 0.0}
mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(db, 'cell_create')
def test_create_broker_hosts_with_url_decoding_fix(self,
mock_db_cell_create,
mock_ctxt):
"""Test the create function when broker_hosts is
passed
"""
cell_tp_url = "fake://the=user:the=password@127.0.0.1:5432/"
ctxt = mock.sentinel
mock_ctxt.return_value = mock.sentinel
self.commands.create("test",
broker_hosts='127.0.0.1:5432',
woffset=0, wscale=0,
username="the=user",
password="the=password")
exp_values = {'name': "test",
'is_parent': False,
'transport_url': cell_tp_url,
'weight_offset': 0.0,
'weight_scale': 0.0}
mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(db, 'cell_create')
def test_create_hostname(self, mock_db_cell_create, mock_ctxt):
"""Test the create function when hostname and port is
passed
"""
cell_tp_url = "fake://guest:devstack@127.0.0.1:9999/"
ctxt = mock.sentinel
mock_ctxt.return_value = mock.sentinel
self.commands.create("test",
hostname='127.0.0.1', port="9999",
woffset=0, wscale=0,
username="guest", password="devstack")
exp_values = {'name': "test",
'is_parent': False,
'transport_url': cell_tp_url,
'weight_offset': 0.0,
'weight_scale': 0.0}
mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
|
sunze/py_flask
|
refs/heads/master
|
venv/lib/python3.4/site-packages/billiard/_ext.py
|
9
|
from __future__ import absolute_import
import sys
supports_exec = True
from .compat import _winapi as win32 # noqa
if sys.platform.startswith("java"):
_billiard = None
else:
try:
import _billiard # noqa
except ImportError:
import _multiprocessing as _billiard # noqa
supports_exec = False
try:
Connection = _billiard.Connection
except AttributeError: # Py3
from billiard.connection import Connection # noqa
PipeConnection = getattr(_billiard, "PipeConnection", None)
def ensure_multiprocessing():
if _billiard is None:
raise NotImplementedError("multiprocessing not supported")
def ensure_SemLock():
try:
from _billiard import SemLock # noqa
except ImportError:
try:
from _multiprocessing import SemLock # noqa
except ImportError:
raise ImportError("""\
This platform lacks a functioning sem_open implementation, therefore,
the required synchronization primitives needed will not function,
see issue 3770.""")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.