code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
setup(
app=['ComSkipper.py'],
options=dict(py2app=dict(
plist=dict(
LSBackgroundOnly=True,
),
)),
)
|
ramesh130/etv-comskip
|
src/scripts/ComSkipper/setup.py
|
Python
|
gpl-2.0
| 263
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
from django.utils.translation import gettext_lazy as _
from django.db import models
from events.models import Event, EventCategory
class EventsPluginModel(CMSPlugin):
title = models.CharField(blank=True, null=True, max_length=50)
include_specials = models.BooleanField(blank=False, default=True)
include_regular = models.BooleanField(blank=False, default=True)
show_when_no_events = models.BooleanField(blank=False, default=False)
style = models.IntegerField(blank=False, choices=((0, _('List')), (1, _('Cards'))), default=0)
class EventsPlugin(CMSPluginBase):
name = _("Events")
model = EventsPluginModel
render_template = "events/events.html"
text_enabled = False
allow_children = False
def render(self, context, instance, placeholder):
specials = [instance.include_specials, not instance.include_regular]
context.update({
'events': Event.displayed_events.future().filter(special__in=specials).all(),
'use_cards': instance.style == 1,
'title': instance.title,
'show_when_no_events': instance.show_when_no_events,
})
return context
class EventCategoryPluginModel(CMSPlugin):
category = models.ForeignKey(
to=EventCategory,
related_name='plugins',
blank=False,
null=False,
on_delete=models.CASCADE
)
class EventCategoryPlugin(CMSPluginBase):
name = _("Event Category")
model = EventCategoryPluginModel
render_template = "events/events.html"
text_enabled = False
allow_children = False
def render(self, context, instance, placeholder):
context.update({
'events': Event.displayed_events.future().filter(category=instance.category).all(),
'use_cards': False,
'title': instance.category.name,
'text': instance.category.description,
'show_when_no_events': True,
})
return context
class FeaturedEventPluginModel(CMSPlugin):
event = models.ForeignKey(to='Event', on_delete=models.PROTECT, related_name='featured_plugins', blank=False)
class FeaturedEventPlugin(CMSPluginBase):
name = _("Featured Event")
model = FeaturedEventPluginModel
render_template = "events/snippets/event_card.html"
text_enabled = False
allow_children = False
def render(self, context, instance, placeholder):
context.update({
'event': instance.event,
})
return context
class EventsTeaserPluginModel(CMSPlugin):
delta_days = models.IntegerField(blank=True, null=True)
delta_days.help_text ="Events within the time delta (in days) from now on are shown. Leave empty to make no restrictions."
max_displayed = models.IntegerField(blank=True, null=True)
max_displayed.help_text ="Maximum number of events to be displayed. Leave empty to make no restrictions."
class EventsTeaserPlugin(CMSPluginBase):
name = _("Events Teaser")
model = EventsTeaserPluginModel
render_template = "events/events_teaser.html"
text_enabled = False
allow_children = False
def render(self, context, instance, placeholder):
events = Event.displayed_events.future(delta_days=instance.delta_days, limit=instance.max_displayed).all()
context.update({
'events': events,
})
return context
plugin_pool.register_plugin(EventsPlugin)
plugin_pool.register_plugin(EventCategoryPlugin)
plugin_pool.register_plugin(FeaturedEventPlugin)
plugin_pool.register_plugin(EventsTeaserPlugin)
|
gitsimon/tq_website
|
events/cms_plugins.py
|
Python
|
gpl-2.0
| 3,686
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hasnotesubstrbase import HasNoteSubstrBase
#-------------------------------------------------------------------------
# "People having notes that contain a substring"
#-------------------------------------------------------------------------
class HasNoteMatchingSubstringOf(HasNoteSubstrBase):
"""People having notes containing <substring>"""
name = _('People having notes containing <substring>')
description = _("Matches people whose notes contain text matching a substring")
|
pmghalvorsen/gramps_branch
|
gramps/gen/filters/rules/person/_hasnotematchingsubstringof.py
|
Python
|
gpl-2.0
| 1,747
|
import uuid
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.text import slugify
from . import utils
class BaseModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class SlugModelMixin(models.Model):
slug = models.SlugField(
_('Slug Title'),
max_length=250,
unique=True,
blank=True,
null=True
)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
return super().save(*args, **kwargs)
class Meta:
abstract = True
class Planet(BaseModel):
name = models.CharField(_('Name'), max_length=250, unique=True)
class Meta:
db_table = 'planets'
ordering = ('-updated_at', 'name')
verbose_name = _('Planet')
verbose_name_plural = _('Planets')
def __str__(self):
return self.name
class Resource(SlugModelMixin, BaseModel):
title = models.CharField(_('Title'), max_length=250, unique=True)
description = models.TextField(_('Description'), blank=True)
planet = models.ForeignKey(
Planet, on_delete=models.CASCADE, verbose_name=_('Planet'), related_name='resources'
)
class Meta:
db_table = 'resources'
ordering = ('-updated_at', 'title')
verbose_name = _('Resource')
verbose_name_plural = _('Resources')
def is_link(self):
return False
def __str__(self):
return self.title
class ResourceLink(SlugModelMixin, BaseModel):
url = models.URLField(_("Url"), unique=True)
title = models.CharField(_('Title'), max_length=250, unique=True)
description = models.TextField(_('Description'), blank=True)
planet = models.ForeignKey(
Planet, on_delete=models.CASCADE, verbose_name=_('Planet'), related_name='resources_links'
)
class Meta:
db_table = 'resources_links'
ordering = ('-updated_at', 'title')
verbose_name = _('Resource Link')
verbose_name_plural = _('Resources Links')
def is_link(self):
return True
def save(self, *args, **kwargs):
if self.url:
self.description = utils.get_site_description(self.url)
if not self.description:
self.description = self.title
return super().save(*args, **kwargs)
def __str__(self):
return '{} <{}>'.format(self.title, self.url)
|
mochaoss/inspace
|
inspace/core/models.py
|
Python
|
gpl-3.0
| 2,617
|
"""
Zip splitter.
Status: can read most important headers
Authors: Christophe Gisquet and Victor Stinner
"""
from hachoir_py3.parser import Parser
from hachoir_py3.field import (FieldSet, ParserError,
Bit, Bits, Enum,
TimeDateMSDOS32, SubFile,
UInt8, UInt16, UInt32, UInt64,
String, PascalString16,
RawBytes)
from hachoir_py3.stream.input import ReadStreamError
from hachoir_py3.core.text_handler import textHandler, filesizeHandler, hexadecimal
from hachoir_py3.core.tools import makeUnicode
from hachoir_py3.core.endian import LITTLE_ENDIAN
from hachoir_py3.parser.common.deflate import Deflate
MAX_FILESIZE = 1000 * 1024 * 1024
COMPRESSION_DEFLATE = 8
COMPRESSION_METHOD = {
0: "no compression",
1: "Shrunk",
2: "Reduced (factor 1)",
3: "Reduced (factor 2)",
4: "Reduced (factor 3)",
5: "Reduced (factor 4)",
6: "Imploded",
7: "Tokenizing",
8: "Deflate",
9: "Deflate64",
10: "PKWARE Imploding",
11: "Reserved by PKWARE",
12: "File is compressed using BZIP2 algorithm",
13: "Reserved by PKWARE",
14: "LZMA (EFS)",
15: "Reserved by PKWARE",
16: "Reserved by PKWARE",
17: "Reserved by PKWARE",
18: "File is compressed using IBM TERSE (new)",
19: "IBM LZ77 z Architecture (PFS)",
98: "PPMd version I, Rev 1",
}
def ZipRevision(field):
return "%u.%u" % divmod(field.value, 10)
class ZipVersion(FieldSet):
static_size = 16
HOST_OS = {
0: "FAT file system (DOS, OS/2, NT)",
1: "Amiga",
2: "VMS (VAX or Alpha AXP)",
3: "Unix",
4: "VM/CMS",
5: "Atari",
6: "HPFS file system (OS/2, NT 3.x)",
7: "Macintosh",
8: "Z-System",
9: "CP/M",
10: "TOPS-20",
11: "NTFS file system (NT)",
12: "SMS/QDOS",
13: "Acorn RISC OS",
14: "VFAT file system (Win95, NT)",
15: "MVS",
16: "BeOS (BeBox or PowerMac)",
17: "Tandem",
}
def createFields(self):
yield textHandler(UInt8(self, "zip_version", "ZIP version"), ZipRevision)
yield Enum(UInt8(self, "host_os", "ZIP Host OS"), self.HOST_OS)
class ZipGeneralFlags(FieldSet):
static_size = 16
def createFields(self):
# Need the compression info from the parent, and that is the byte
# following
method = self.stream.readBits(
self.absolute_address + 16, 16, LITTLE_ENDIAN)
yield Bit(self, "is_encrypted", "File is encrypted?")
if method == 6:
yield Bit(self, "use_8k_sliding", "Use 8K sliding dictionary (instead of 4K)")
yield Bit(self, "use_3shannon", "Use a 3 Shannon-Fano tree (instead of 2 Shannon-Fano)")
elif method in (8, 9):
NAME = {
0: "Normal compression",
1: "Maximum compression",
2: "Fast compression",
3: "Super Fast compression"
}
yield Enum(Bits(self, "method", 2), NAME)
elif method == 14: # LZMA
yield Bit(self, "lzma_eos", "LZMA stream is ended with a EndOfStream marker")
yield Bit(self, "unused[]")
else:
yield Bits(self, "compression_info", 2)
yield Bit(self, "has_descriptor",
"Compressed data followed by descriptor?")
yield Bit(self, "enhanced_deflate", "Reserved for use with method 8")
yield Bit(self, "is_patched", "File is compressed with patched data?")
yield Bit(self, "strong_encrypt", "Strong encryption (version >= 50)")
yield Bits(self, "unused[]", 4, "Unused")
yield Bit(self, "uses_unicode", "Filename and comments are in UTF-8")
yield Bit(self, "incomplete", "Reserved by PKWARE for enhanced compression.")
yield Bit(self, "encrypted_central_dir", "Selected data values in the Local Header are masked")
yield Bits(self, "unused[]", 2, "Unused")
class ExtraField(FieldSet):
EXTRA_FIELD_ID = {
0x0007: "AV Info",
0x0009: "OS/2 extended attributes (also Info-ZIP)",
0x000a: "PKWARE Win95/WinNT FileTimes", # undocumented!
0x000c: "PKWARE VAX/VMS (also Info-ZIP)",
0x000d: "PKWARE Unix",
0x000f: "Patch Descriptor",
0x07c8: "Info-ZIP Macintosh (old, J. Lee)",
0x2605: "ZipIt Macintosh (first version)",
0x2705: "ZipIt Macintosh v 1.3.5 and newer (w/o full filename)",
0x334d: "Info-ZIP Macintosh (new, D. Haase Mac3 field)",
0x4341: "Acorn/SparkFS (David Pilling)",
0x4453: "Windows NT security descriptor (binary ACL)",
0x4704: "VM/CMS",
0x470f: "MVS",
0x4b46: "FWKCS MD5 (third party, see below)",
0x4c41: "OS/2 access control list (text ACL)",
0x4d49: "Info-ZIP VMS (VAX or Alpha)",
0x5356: "AOS/VS (binary ACL)",
0x5455: "extended timestamp",
0x5855: "Info-ZIP Unix (original; also OS/2, NT, etc.)",
0x6542: "BeOS (BeBox, PowerMac, etc.)",
0x756e: "ASi Unix",
0x7855: "Info-ZIP Unix (new)",
0xfb4a: "SMS/QDOS",
}
def createFields(self):
yield Enum(UInt16(self, "field_id", "Extra field ID"),
self.EXTRA_FIELD_ID)
size = UInt16(self, "field_data_size", "Extra field data size")
yield size
if size.value > 0:
yield RawBytes(self, "field_data", size.value, "Unknown field data")
class ExtraFields(FieldSet):
def createFields(self):
while self.current_size < self.size:
yield ExtraField(self, "extra[]")
def ZipStartCommonFields(self):
yield ZipVersion(self, "version_needed", "Version needed")
yield ZipGeneralFlags(self, "flags", "General purpose flag")
yield Enum(UInt16(self, "compression", "Compression method"),
COMPRESSION_METHOD)
yield TimeDateMSDOS32(self, "last_mod", "Last modification file time")
yield textHandler(UInt32(self, "crc32", "CRC-32"), hexadecimal)
yield UInt32(self, "compressed_size", "Compressed size")
yield UInt32(self, "uncompressed_size", "Uncompressed size")
yield UInt16(self, "filename_length", "Filename length")
yield UInt16(self, "extra_length", "Extra fields length")
def zipGetCharset(self):
if self["flags/uses_unicode"].value:
return "UTF-8"
else:
return "ISO-8859-15"
class ZipCentralDirectory(FieldSet):
HEADER = 0x02014b50
def createFields(self):
yield ZipVersion(self, "version_made_by", "Version made by")
yield from ZipStartCommonFields(self)
# Check unicode status
charset = zipGetCharset(self)
yield UInt16(self, "comment_length", "Comment length")
yield UInt16(self, "disk_number_start", "Disk number start")
yield UInt16(self, "internal_attr", "Internal file attributes")
yield UInt32(self, "external_attr", "External file attributes")
yield UInt32(self, "offset_header", "Relative offset of local header")
yield String(self, "filename", self["filename_length"].value,
"Filename", charset=charset)
if 0 < self["extra_length"].value:
yield ExtraFields(self, "extra", size=self["extra_length"].value * 8,
description="Extra fields")
if 0 < self["comment_length"].value:
yield String(self, "comment", self["comment_length"].value,
"Comment", charset=charset)
def createDescription(self):
return "Central directory: %s" % self["filename"].display
class Zip64EndCentralDirectory(FieldSet):
HEADER = 0x06064b50
def createFields(self):
yield UInt64(self, "zip64_end_size",
"Size of zip64 end of central directory record")
yield ZipVersion(self, "version_made_by", "Version made by")
yield ZipVersion(self, "version_needed", "Version needed to extract")
yield UInt32(self, "number_disk", "Number of this disk")
yield UInt32(self, "number_disk2",
"Number of the disk with the start of the central directory")
yield UInt64(self, "number_entries",
"Total number of entries in the central directory on this disk")
yield UInt64(self, "number_entries2",
"Total number of entries in the central directory")
yield UInt64(self, "size", "Size of the central directory")
yield UInt64(self, "offset", "Offset of start of central directory")
if 0 < self["zip64_end_size"].value:
yield RawBytes(self, "data_sector", self["zip64_end_size"].value,
"zip64 extensible data sector")
class ZipEndCentralDirectory(FieldSet):
HEADER = 0x06054b50
def createFields(self):
yield UInt16(self, "number_disk", "Number of this disk")
yield UInt16(self, "number_disk2", "Number in the central dir")
yield UInt16(self, "total_number_disk",
"Total number of entries in this disk")
yield UInt16(self, "total_number_disk2",
"Total number of entries in the central dir")
yield UInt32(self, "size", "Size of the central directory")
yield UInt32(self, "offset", "Offset of start of central directory")
yield PascalString16(self, "comment", "ZIP comment")
class ZipDataDescriptor(FieldSet):
HEADER_STRING = b"\x50\x4B\x07\x08"
HEADER = 0x08074B50
static_size = 96
def createFields(self):
yield textHandler(UInt32(self, "file_crc32",
"Checksum (CRC32)"), hexadecimal)
yield filesizeHandler(UInt32(self, "file_compressed_size",
"Compressed size (bytes)"))
yield filesizeHandler(UInt32(self, "file_uncompressed_size",
"Uncompressed size (bytes)"))
class FileEntry(FieldSet):
HEADER = 0x04034B50
filename = None
def data(self, size):
compression = self["compression"].value
if compression == 0:
return SubFile(self, "data", size, filename=self.filename)
compressed = SubFile(self, "compressed_data",
size, filename=self.filename)
if compression == COMPRESSION_DEFLATE:
return Deflate(compressed)
else:
return compressed
def resync(self):
# Non-seekable output, search the next data descriptor
size = self.stream.searchBytesLength(ZipDataDescriptor.HEADER_STRING, False,
self.absolute_address + self.current_size)
if size <= 0:
raise ParserError("Couldn't resync to %s" %
ZipDataDescriptor.HEADER_STRING)
yield self.data(size)
yield textHandler(UInt32(self, "header[]", "Header"), hexadecimal)
data_desc = ZipDataDescriptor(self, "data_desc", "Data descriptor")
# self.info("Resynced!")
yield data_desc
# The above could be checked anytime, but we prefer trying parsing
# than aborting
if self["crc32"].value == 0 and \
data_desc["file_compressed_size"].value != size:
raise ParserError("Bad resync: position=>%i but data_desc=>%i" %
(size, data_desc["file_compressed_size"].value))
def createFields(self):
yield from ZipStartCommonFields(self)
length = self["filename_length"].value
if length:
filename = String(self, "filename", length, "Filename",
charset=zipGetCharset(self))
yield filename
self.filename = filename.value
if self["extra_length"].value:
yield ExtraFields(self, "extra", size=self["extra_length"].value * 8,
description="Extra fields")
size = self["compressed_size"].value
if size > 0:
yield self.data(size)
elif self["flags/incomplete"].value:
yield from self.resync()
if self["flags/has_descriptor"].value and self['crc32'].value == 0:
yield ZipDataDescriptor(self, "data_desc", "Data descriptor")
def createDescription(self):
return "File entry: %s (%s)" % \
(self["filename"].value, self["compressed_size"].display)
def validate(self):
if self["compression"].value not in COMPRESSION_METHOD:
return "Unknown compression method (%u)" % self["compression"].value
return ""
class ZipSignature(FieldSet):
HEADER = 0x05054B50
def createFields(self):
yield PascalString16(self, "signature", "Signature")
class Zip64EndCentralDirectoryLocator(FieldSet):
HEADER = 0x07064b50
def createFields(self):
yield UInt32(self, "disk_number",
"Number of the disk with the start of the zip64 end of central directory")
yield UInt64(self, "relative_offset",
"Relative offset of the zip64 end of central directory record")
yield UInt32(self, "disk_total_number", "Total number of disks")
class ZipFile(Parser):
endian = LITTLE_ENDIAN
MAGIC = b"PK\3\4"
MIME_TYPES = {
# Default ZIP archive
"application/zip": "zip",
"application/x-zip": "zip",
# Java archive (JAR)
"application/x-jar": "jar",
"application/java-archive": "jar",
# Android APK
"application/vnd.android.package-archive": "apk",
# OpenOffice 1.0
"application/vnd.sun.xml.calc": "sxc",
"application/vnd.sun.xml.draw": "sxd",
"application/vnd.sun.xml.impress": "sxi",
"application/vnd.sun.xml.writer": "sxw",
"application/vnd.sun.xml.math": "sxm",
# OpenOffice 1.0 (template)
"application/vnd.sun.xml.calc.template": "stc",
"application/vnd.sun.xml.draw.template": "std",
"application/vnd.sun.xml.impress.template": "sti",
"application/vnd.sun.xml.writer.template": "stw",
"application/vnd.sun.xml.writer.global": "sxg",
# OpenDocument
"application/vnd.oasis.opendocument.chart": "odc",
"application/vnd.oasis.opendocument.image": "odi",
"application/vnd.oasis.opendocument.database": "odb",
"application/vnd.oasis.opendocument.formula": "odf",
"application/vnd.oasis.opendocument.graphics": "odg",
"application/vnd.oasis.opendocument.presentation": "odp",
"application/vnd.oasis.opendocument.spreadsheet": "ods",
"application/vnd.oasis.opendocument.text": "odt",
"application/vnd.oasis.opendocument.text-master": "odm",
# OpenDocument (template)
"application/vnd.oasis.opendocument.graphics-template": "otg",
"application/vnd.oasis.opendocument.presentation-template": "otp",
"application/vnd.oasis.opendocument.spreadsheet-template": "ots",
"application/vnd.oasis.opendocument.text-template": "ott",
}
PARSER_TAGS = {
"id": "zip",
"category": "archive",
"file_ext": tuple(MIME_TYPES.values()),
"mime": tuple(MIME_TYPES.keys()),
"magic": ((MAGIC, 0),),
"subfile": "skip",
"min_size": (4 + 26) * 8, # header + file entry
"description": "ZIP archive"
}
CHUNK_TYPES = {
FileEntry.HEADER: (FileEntry, "file[]", None),
ZipDataDescriptor.HEADER: (ZipDataDescriptor, "spanning[]", None),
0x30304b50: (ZipDataDescriptor, "temporary_spanning[]", None),
ZipCentralDirectory.HEADER: (ZipCentralDirectory, "central_directory[]", None),
ZipEndCentralDirectory.HEADER: (ZipEndCentralDirectory, "end_central_directory", "End of central directory"),
Zip64EndCentralDirectory.HEADER: (Zip64EndCentralDirectory, "end64_central_directory", "ZIP64 end of central directory"),
ZipSignature.HEADER: (ZipSignature, "signature", "Signature"),
Zip64EndCentralDirectoryLocator.HEADER: (Zip64EndCentralDirectoryLocator, "end_locator", "ZIP64 Enf of central directory locator"),
}
def validate(self):
# For generic ZIP files, don't attempt to locate a header in the middle of the file.
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "No magic found at start of file"
try:
if self["header[0]"].value != FileEntry.HEADER:
return "Invalid magic"
except Exception:
return "Unable to get header #0"
try:
file0 = self["file[0]"]
except Exception:
return "Unable to get file #0"
err = file0.validate()
if err:
return "File #0: %s" % err
return True
def createFields(self):
# File data
self.signature = None
self.central_directory = []
while not self.eof:
skip = 0
while True:
try:
header = self.stream.readBits(self.absolute_address + self.current_size + skip, 32, self.endian)
if header in self.CHUNK_TYPES:
break
skipdelta = self.stream.searchBytes(b'PK', self.absolute_address + self.current_size + skip + 8)
if skipdelta is None:
if not self.current_size:
raise ParserError("Failed to find any zip headers")
return
skip = skipdelta - (self.absolute_address + self.current_size)
except ReadStreamError:
if not self.current_size:
raise ParserError("Failed to read stream")
return
if skip:
yield RawBytes(self, "unparsed[]", skip // 8)
yield textHandler(
UInt32(self, "header[]", "Header"), hexadecimal)
ftype, fname, fdesc = self.CHUNK_TYPES[header]
yield ftype(self, fname, fdesc)
def createMimeType(self):
if self["file[0]/filename"].value == "mimetype":
return makeUnicode(self["file[0]/data"].value)
else:
return "application/zip"
def createFilenameSuffix(self):
if self["file[0]/filename"].value == "mimetype":
mime = self["file[0]/compressed_data"].value
if mime in self.MIME_TYPES:
return "." + self.MIME_TYPES[mime]
return ".zip"
def createContentSize(self):
start = 0
end = MAX_FILESIZE * 8
end = self.stream.searchBytes(b"PK\5\6", start, end)
if end is not None:
return end + 22 * 8
return None
|
SickGear/SickGear
|
lib/hachoir_py3/parser/archive/zip.py
|
Python
|
gpl-3.0
| 18,896
|
from __future__ import absolute_import
from navigation.api import register_top_menu
from .links import link_tools_menu
#tool_menu = register_top_menu('tools', link=link_tools_menu, position=-3)
|
commonwealth-of-puerto-rico/lean
|
paart/apps/project_tools/__init__.py
|
Python
|
gpl-3.0
| 197
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | => | | => | Call Q | => | |
| | +----------+ | | +-----------+ | |
| | | ... | | | | ... | | |
| | | 6 | | | | 5, call() | | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | | | | 4, result | | |
| | | ... | | | | 3, except | | |
+----------+ +------------+ +--------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Result Q"
"""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import atexit
import os
from concurrent.futures import _base
import queue
from queue import Full
import multiprocessing
from multiprocessing import SimpleQueue
from multiprocessing.connection import wait
import threading
import weakref
from functools import partial
import itertools
import traceback
# Workers are created as daemon threads and processes. This is done to allow the
# interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
# Hack to embed stringification of remote traceback in local traceback
class _RemoteTraceback(Exception):
def __init__(self, tb):
self.tb = tb
def __str__(self):
return self.tb
class _ExceptionWithTraceback:
def __init__(self, exc, tb):
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return _rebuild_exc, (self.exc, self.tb)
def _rebuild_exc(exc, tb):
exc.__cause__ = _RemoteTraceback(tb)
return exc
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def _get_chunks(*iterables, chunksize):
""" Iterates over zip()ed iterables in chunks. """
it = zip(*iterables)
while True:
chunk = tuple(itertools.islice(it, chunksize))
if not chunk:
return
yield chunk
def _process_chunk(fn, chunk):
""" Processes a chunk of an iterable passed to map.
Runs the function passed to map() on a chunk of the
iterable passed to map.
This function is run in a separate process.
"""
return [fn(*args) for args in chunk]
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(os.getpid())
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException as e:
exc = _ExceptionWithTraceback(e, e.__traceback__)
result_queue.put(_ResultItem(call_item.work_id, exception=exc))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_management_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Args:
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process: A list of the multiprocessing.Process instances used as
workers.
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue: A multiprocessing.Queue of _ResultItems generated by the
process workers.
"""
executor = None
def shutting_down():
return _shutdown or executor is None or executor._shutdown_thread
def shutdown_worker():
# This is an upper bound
nb_children_alive = sum(p.is_alive() for p in processes.values())
for i in range(0, nb_children_alive):
call_queue.put_nowait(None)
# Release the queue's resources as soon as possible.
call_queue.close()
# If .join() is not called on the created processes then
# some multiprocessing.Queue methods may deadlock on Mac OS X.
for p in processes.values():
p.join()
reader = result_queue._reader
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
sentinels = [p.sentinel for p in processes.values()]
assert sentinels
ready = wait([reader] + sentinels)
if reader in ready:
result_item = reader.recv()
else:
# Mark the process pool broken so that submits fail right now.
executor = executor_reference()
if executor is not None:
executor._broken = True
executor._shutdown_thread = True
executor = None
# All futures in flight must be marked failed
for work_id, work_item in pending_work_items.items():
work_item.future.set_exception(
BrokenProcessPool(
"A process in the process pool was "
"terminated abruptly while the future was "
"running or pending."
))
# Delete references to object. See issue16284
del work_item
pending_work_items.clear()
# Terminate remaining workers forcibly: the queues or their
# locks may be in a dirty state and block forever.
for p in processes.values():
p.terminate()
shutdown_worker()
return
if isinstance(result_item, int):
# Clean shutdown of a worker using its PID
# (avoids marking the executor broken)
assert shutting_down()
p = processes.pop(result_item)
p.join()
if not processes:
shutdown_worker()
return
elif result_item is not None:
work_item = pending_work_items.pop(result_item.work_id, None)
# work_item can be None if another process terminated (see above)
if work_item is not None:
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
# Delete references to object. See issue16284
del work_item
# Check whether we should start shutting down.
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if shutting_down():
try:
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
shutdown_worker()
return
except Full:
# This is not a problem: we will eventually be woken up (in
# result_queue.get()) and be able to send a sentinel again.
pass
executor = None
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked:
if _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# indetermined limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
raise NotImplementedError(_system_limited)
class BrokenProcessPool(RuntimeError):
"""
Raised when a process in a ProcessPoolExecutor terminated abruptly
while a future was in the running state.
"""
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = os.cpu_count() or 1
else:
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
# Killed worker processes can produce spurious "broken pipe"
# tracebacks in the queue's own worker thread. But we detect killed
# processes anyway, so silence the tracebacks.
self._call_queue._ignore_epipe = True
self._result_queue = SimpleQueue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
# Map of pids to processes
self._processes = {}
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._broken = False
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(_, q=self._result_queue):
q.put(None)
if self._queue_management_thread is None:
# Start the processes so that their sentinels are known.
self._adjust_process_count()
self._queue_management_thread = threading.Thread(
target=_queue_management_worker,
args=(weakref.ref(self, weakref_cb),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_threads_queues[self._queue_management_thread] = self._result_queue
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue))
p.start()
self._processes[p.pid] = p
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._broken:
raise BrokenProcessPool('A child process terminated '
'abruptly, the process pool is not usable anymore')
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._result_queue.put(None)
self._start_queue_management_thread()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def map(self, fn, *iterables, timeout=None, chunksize=1):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
chunksize: If greater than one, the iterables will be chopped into
chunks of size chunksize and submitted to the process pool.
If set to one, the items in the list will be sent one at a time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
if chunksize < 1:
raise ValueError("chunksize must be >= 1.")
results = super().map(partial(_process_chunk, fn),
_get_chunks(*iterables, chunksize=chunksize),
timeout=timeout)
return itertools.chain.from_iterable(results)
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if self._queue_management_thread:
# Wake up queue management thread
self._result_queue.put(None)
if wait:
self._queue_management_thread.join()
# To reduce the risk of opening too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
self._result_queue = None
self._processes = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
atexit.register(_python_exit)
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/concurrent/futures/process.py
|
Python
|
gpl-3.0
| 20,135
|
# moduleBonusTriageModule
#
# Used by:
# Variations of module: Triage Module I (2 of 2)
type = "active"
runTime = "early"
def handler(fit, src, context):
# Remote effect bonuses (duration / amount / range / fallout)
for skill, amtAttr, stack in (
("Capital Remote Armor Repair Systems", "armorDamageAmount", True),
("Capital Shield Emission Systems", "shieldBonus", True),
("Capital Capacitor Emission Systems", "powerTransferAmount", False),
("Capital Remote Hull Repair Systems", "structureDamageAmount", False)):
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill(skill), "duration",
src.getModifiedItemAttr("siegeRemoteLogisticsDurationBonus"))
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill(skill), amtAttr,
src.getModifiedItemAttr("siegeRemoteLogisticsAmountBonus"),
stackingPenalties=stack)
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill(skill), "maxRange",
src.getModifiedItemAttr("siegeRemoteLogisticsRangeBonus"), stackingPenalties=True)
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill(skill), "falloffEffectiveness",
src.getModifiedItemAttr("siegeRemoteLogisticsRangeBonus"), stackingPenalties=True)
# Local armor/shield rep effects (duration / amoutn)
for skill, amtAttr in (
("Capital Shield Operation", "shieldBonus"),
("Capital Repair Systems", "armorDamageAmount")):
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill(skill), "duration",
src.getModifiedItemAttr("siegeLocalLogisticsDurationBonus"))
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill(skill), amtAttr,
src.getModifiedItemAttr("siegeLocalLogisticsAmountBonus"))
# Speed bonus
fit.ship.boostItemAttr("maxVelocity", src.getModifiedItemAttr("speedFactor"), stackingPenalties=True)
# Scan resolution multiplier
fit.ship.multiplyItemAttr("scanResolution", src.getModifiedItemAttr("scanResolutionMultiplier"),
stackingPenalties=True)
# Mass multiplier
fit.ship.multiplyItemAttr("mass", src.getModifiedItemAttr("siegeMassMultiplier"), stackingPenalties=True)
# Max locked targets
fit.ship.increaseItemAttr("maxLockedTargets", src.getModifiedItemAttr("maxLockedTargetsBonus"))
# EW cap need increase
groups = [
'Burst Jammer',
'Weapon Disruptor',
'ECM',
'Stasis Grappler',
'Sensor Dampener',
'Target Painter']
fit.modules.filteredItemBoost(lambda mod: mod.item.group.name in groups or
mod.item.requiresSkill("Propulsion Jamming"),
"capacitorNeed", src.getModifiedItemAttr("ewCapacitorNeedBonus"))
# todo: test for April 2016 release
# Block EWAR & projected effects
fit.ship.forceItemAttr("disallowOffensiveModifiers", src.getModifiedItemAttr("disallowOffensiveModifiers"))
fit.ship.forceItemAttr("disallowAssistance", src.getModifiedItemAttr("disallowAssistance"))
# new in April 2016 release
fit.drones.filteredItemBoost(lambda mod: mod.item.requiresSkill("Drones"), "damageMultiplier",
src.getModifiedItemAttr("droneDamageBonus"), stackingPenalties=True)
fit.ship.increaseItemAttr("warpScrambleStatus", src.getModifiedItemAttr("siegeModeWarpStatus"))
fit.ship.boostItemAttr("sensorDampenerResistance", src.getModifiedItemAttr("sensorDampenerResistanceBonus"))
fit.ship.boostItemAttr("remoteAssistanceImpedance", src.getModifiedItemAttr("remoteAssistanceImpedanceBonus"))
fit.ship.boostItemAttr("remoteRepairImpedance", src.getModifiedItemAttr("remoteRepairImpedanceBonus"))
fit.ship.forceItemAttr("disallowTethering", src.getModifiedItemAttr("disallowTethering"))
fit.ship.forceItemAttr("disallowDocking", src.getModifiedItemAttr("disallowDocking"))
|
Ebag333/Pyfa
|
eos/effects/modulebonustriagemodule.py
|
Python
|
gpl-3.0
| 4,218
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import difflib
import re
import string
import subprocess
import sys
import tempfile
knownpropsets = { "PSETID_PostRss" : "{00062041-0000-0000-C000-000000000046}",
"PSETID_Sharing" : "{00062040-0000-0000-C000-000000000046}",
"PS_PUBLIC_STRINGS" : "{00020329-0000-0000-C000-000000000046}",
"PSETID_Common" : "{00062008-0000-0000-C000-000000000046}",
"PSETID_Appointment" : "{00062002-0000-0000-C000-000000000046}",
"PSETID_Address" : "{00062004-0000-0000-C000-000000000046}",
"PSETID_Meeting" : "{6ED8DA90-450B-101B-98DA-00AA003F1305}",
"PSETID_Log" : "{0006200A-0000-0000-C000-000000000046}",
"PSETID_CalendarAssistant" : "{11000E07-B51B-40D6-AF21-CAA85EDAB1D0}",
"PSETID_Note" : "{0006200E-0000-0000-C000-000000000046}",
"PSETID_Task": "{00062003-0000-0000-C000-000000000046}",
"PS_INTERNET_HEADERS" : "{00020386-0000-0000-C000-000000000046}",
"PSETID_UnifiedMessaging" : "{4442858E-A9E3-4E80-B900-317A210CC15B}",
"PS_MAPI" : "{00020328-0000-0000-C000-000000000046}",
"PSETID_Attachment" : "{96357f7f-59e1-47d0-99a7-46515c183b54}",
"PSETID_AirSync" : "{71035549-0739-4DCB-9163-00F0580DBBDF}",
"PSETID_Messaging" : "{41F28F13-83F4-4114-A584-EEDB5A6B0BFF}",
"PSETID_XmlExtractedEntities": "{23239608-685D-4732-9C55-4C95CB4E8E33}"
}
knowndatatypes = { "PtypInteger16" : "0x0002",
"PtypInteger32" : "0x0003",
"PtypFloating64" : "0x0005",
"PtypBoolean" : "0x000B",
"PtypEmbeddedTable" : "0x000D",
"PtypObject" : "0x000D",
"PtypString8" : "0x001E",
"PtypString" : "0x001F",
"PtypInteger64" : "0x0014",
"PtypBinary" : "0x0102",
"PtypTime" : "0x0040",
"PtypGuid" : "0x0048",
"PtypServerId" : "0x00FB",
"PtypRestriction" : "0x00FD",
"PtypRuleAction" : "0x00FE",
"PtypMultipleInteger32" : "0x1003",
"PtypMultipleString8" : "0x101E",
"PtypMultipleString" : "0x101F",
"PtypMultipleTime" : "0x1040",
"PtypMultipleBinary" : "0x1102",
}
datatypemap = { "PtypInteger16" : "PT_SHORT",
"PtypInteger32" : "PT_LONG",
"PtypFloating64" : "PT_DOUBLE",
"PtypBoolean" : "PT_BOOLEAN",
"PtypEmbeddedTable" : "PT_OBJECT",
"PtypObject" : "PT_OBJECT",
"PtypString8" : "PT_STRING8",
"PtypString" : "PT_UNICODE",
"PtypInteger64" : "PT_I8",
"PtypBinary" : "PT_BINARY",
"PtypTime" : "PT_SYSTIME",
"PtypGuid" : "PT_CLSID",
"PtypServerId" : "PT_SVREID",
"PtypRestriction" : "PT_SRESTRICT",
"PtypRuleAction" : "PT_ACTIONS",
"PtypMultipleInteger32" : "PT_MV_LONG",
"PtypMultipleString8" : "PT_MV_STRING8",
"PtypMultipleString" : "PT_MV_UNICODE",
"PtypMultipleTime" : "PT_MV_SYSTIME",
"PtypMultipleBinary" : "PT_MV_BINARY"
}
knownrefs = [
"[MS-ASAIRS]",
"[MS-ASCAL]",
"[MS-ASCMD]",
"[MS-ASCNTC]",
"[MS-ASCON]",
"[MS-ASDOC]",
"[MS-ASDTYPE]",
"[MS-ASEMAIL]",
"[MS-ASHTTP]",
"[MS-ASMS]",
"[MS-ASNOTE]",
"[MS-ASPROV]",
"[MS-ASRM]",
"[MS-ASTASK]",
"[MS-ASWBXML]",
"[MS-CAB]",
"[MS-MCI]",
"[MS-OXABREF]",
"[MS-OXBBODY]",
"[MS-OXCDATA]",
"[MS-OXCETF]",
"[MS-OXCFOLD]",
"[MS-OXCFXICS]",
"[MS-OXCHGTR]",
"[MS-OXCICAL]",
"[MS-OXCMAIL]",
"[MS-OXCMSG]",
"[MS-OXCNOTIF]",
"[MS-OXCPERM]",
"[MS-OXCPRPT]",
"[MS-OXCROPS]",
"[MS-OXCRPC]",
"[MS-OXCSPAM]",
"[MS-OXCSTOR]",
"[MS-OXCSYNC]",
"[MS-OXCTABL]",
"[MS-OXDISCO]",
"[MS-OXDOCO]",
"[MS-OXDSCLI]",
"[MS-OXGLOS]",
"[MS-OXIMAP4]",
"[MS-OXLDAP]",
"[MS-OXMSG]",
"[MS-OXMVMBX]",
"[MS-OXOABK]",
"[MS-OXOABKT]",
"[MS-OXOAB]",
"[MS-OXOCAL]",
"[MS-OXOCFG]",
"[MS-OXOCNTC]",
"[MS-OXODLGT]",
"[MS-OXODOC]",
"[MS-OXOFLAG]",
"[MS-OXOJRNL]",
"[MS-OXOMSG]",
"[MS-OXONOTE]",
"[MS-OXOPFFB]",
"[MS-OXOPOST]",
"[MS-OXORMDR]",
"[MS-OXORMMS]",
"[MS-OXORSS]",
"[MS-OXORULE]",
"[MS-OXOSFLD]",
"[MS-OXOSMIME]",
"[MS-OXOSMMS]",
"[MS-OXOSRCH]",
"[MS-OXOTASK]",
"[MS-OXOUM]",
"[MS-OXPFOAB]",
"[MS-OXPHISH]",
"[MS-OXPOP3]",
"[MS-OXPROPS]",
"[MS-OXPROTO]",
"[MS-OXPSVAL]",
"[MS-OXREF]",
"[MS-OXRTFCP]",
"[MS-OXRTFEX]",
"[MS-OXSHARE]",
"[MS-OXSHRMSG]",
"[MS-OXSMTP]",
"[MS-OXTNEF]",
"[MS-OXVCARD]",
"[MS-OXWAVLS]",
"[MS-OXWCONFIG]",
"[MS-OXWMT]",
"[MS-OXWOAB]",
"[MS-OXWOOF]",
"[MS-OXWSADISC]",
"[MS-OXWSATT]",
"[MS-OXWSAUTID]",
"[MS-OXWSBTRF]",
"[MS-OXWSCDATA]",
"[MS-OXWSCONT]",
"[MS-OXWSCONV]",
"[MS-OXWSCORE]",
"[MS-OXWSCVTID]",
"[MS-OXWSDLGM]",
"[MS-OXWSDLIST]",
"[MS-OXWSFOLD]",
"[MS-OXWSGTRM]",
"[MS-OXWSGTZ]",
"[MS-OXWSLVID]",
"[MS-OXWSMSG]",
"[MS-OXWSMSHR]",
"[MS-OXWSMTGS]",
"[MS-OXWSMTRK]",
"[MS-OXWSNTIF]",
"[MS-OXWSPOST]",
"[MS-OXWSPSNTIF]",
"[MS-OXWSRSLNM]",
"[MS-OXWSRULES]",
"[MS-OXWSSRCH]",
"[MS-OXWSSYNC]",
"[MS-OXWSTASK]",
"[MS-OXWSUSRCFG]",
"[MS-OXWSXPROP]",
"[MS-OXWUMS]",
"[MS-PATCH]",
"[MS-XJRNL]",
"[MS-XLOGIN]",
"[MS-XWDCAL]",
"[MS-XWDCNTC]",
"[MS-XWDDOC]",
"[MS-XWDEXT]",
"[MS-XWDFOLD]",
"[MS-XWDMAIL]",
"[MS-XWDNOTIF]",
"[MS-XWDREPL]",
"[MS-XWDSEARCH]",
"[MS-XWDSTRUCTDOC]",
"[MS-XWDVSEC]",
"[MS-NSPI]"
]
knownareas = [
"AB Container",
"Access Control Properties",
"Access Control Properties Property set",
"Address book",
"Address Book",
"Address Properties",
"Address Properties Property set",
"Appointment Property set",
"Appointment",
"Archive",
"BestBody",
"Calendar",
"Calendar Document",
"Calendar Document Property set",
"Calendar Property set",
"Common",
"Common Property set",
"Conferencing",
"Configuration",
"Conflict Note",
"Contact Properties",
"Container Properties",
"Container Properties Property set",
"Conversation Actions",
"Conversations",
"Email",
"E-mail",
"Email Property set",
"Exchange",
"Exchange Administrative",
"ExchangeAdministrative",
"ExchangeAdministrative Property set",
"ExchangeFolder",
"ExchangeFolder Property set",
"ExchangeMessageReadOnly",
"ExchangeMessageStore",
"ExchangeNonTransmittableReserved",
"Exchange Profile Configuration",
"Exchange Property set",
"Extracted Entities",
"Flagging",
"Folder Properties",
"Free/Busy Properties",
"General Message Properties",
"General Message Properties Property set",
"General Report Properties",
"History Properties",
"IC",
"ICS",
"ID Properties",
"ID Properties Property set",
"Journal",
"Mail",
"MapiAddressBook",
"MapiAttachment",
"MapiCommon",
"MapiContainer",
"MAPI Display Tables",
"MapiEnvelope",
"MapiEnvelope Property set",
"MapiMailUser",
"MapiMessage",
"MapiMessageStore",
"MapiNonTransmittable",
"MapiNonTransmittable Property set",
"MapiRecipient",
"MapiStatus",
"Meeting Response",
"Meetings",
"Message Attachment Properties",
"Message Attachment Properties Property set",
"MessageClassDefinedNonTransmittable",
"Message Class Defined Transmittable",
"MessageClassDefinedTransmittable",
"Message Properties",
"Message Properties Property set",
"Message Store Properties",
"Message Time Properties",
"Message Time Properties Property set",
"MIME properties",
"MIME Properties",
"MIME Properties Property set",
"Miscellaneous Properties",
"Miscellaneous Properties Property set",
"Offline Address Book Properties",
"Outlook Application",
"ProviderDefinedNonTransmittable",
"PST Internal",
"Reminders",
"RenMessageFolder",
"RSS",
"Rules",
"Run-time configuration",
"Search",
"Secure Messaging",
"Secure Messaging Properties",
"Server",
"Server-side Rules Properties",
"Server-Side Rules Properties",
"Sharing",
"Site Mailbox",
"SMS",
"Spam",
"Sticky Notes",
"Structured Documents",
"Structured Documents Property set",
"Sync",
"Table Properties",
"Tasks",
"Transport Envelope",
"TransportEnvelope",
"TransportRecipient",
"UM",
"Unified Messaging"
]
properties = []
def make_properties_list(propsfilename):
next_num = 1
propname = ""
propertyinfo = {}
propsfile = file(propsfilename)
for line in propsfile:
if line.startswith("2 Structures"):
break
for line in propsfile:
if line.startswith("2."):
section_num = line.split()[0]
sub_section_num = section_num.split(".")[1]
if int(sub_section_num) != next_num:
print "expected", next_num, "got", sub_section_num
next_num += 1
propname = line.split()[1]
if propertyinfo.has_key("CanonicalName"):
properties.append(propertyinfo.copy())
propertyinfo = {}
if line.strip().startswith("Canonical name:"):
canonicalname = line.strip().split(":")[1].strip()
if ((propname != "") and (propname != canonicalname)):
print "expected", propname, "got", canonicalname
propertyinfo["CanonicalName"] = canonicalname
if line.strip().startswith("Property name:"):
propertyname = line.split(":", 1)
propertyinfo["PropertyName"] = propertyname[1].strip()
if line.strip().startswith("Description:"):
description = line.strip().split(":")[1].strip()
while True:
nextline = propsfile.next().strip()
if (nextline.isspace() or (len(nextline) == 0)):
break
description += (" " + nextline)
propertyinfo["Description"] = description
if line.strip().startswith("Alternate names:"):
altname = line.strip().partition(":")[2]
while True:
nextline = propsfile.next().strip()
if (nextline.isspace() or (len(nextline) == 0)):
break
altname += (", " + nextline)
propertyinfo["AlternateNames"] = altname
if line.strip().startswith("Data type:"):
datatype = line.strip().split(":")[1].strip()
datatype_values = datatype.split(",")
if len(datatype_values) >= 2:
datatypename, datatypeval = datatype_values[:2]
propertyinfo["DataTypeName"] = datatypename.strip()
# There are three props which does not work very well. Examples:
# PtypString8, 0x001EPtypEmbeddedTable, 0x000D
# or
# PtypString8, 0x001E; PtypEmbeddedTable, 0x000D
propertyinfo["DataTypeValue"] = datatypeval.strip()[:6]
else:
sys.stderr.write("Too few types in %s\n" % line)
continue
if line.strip().startswith("Property set:"):
propset = line.strip().split(":")[1].strip()
if propset.find(" ") != -1:
propset = propset.replace(" - ", '-')
propsetname, propsetval = propset.split(" ")
propertyinfo["PropertySet"] = propsetname.strip()
propertyinfo["PropertySetValue"] = propsetval.strip()
if line.strip().startswith("Property ID:"):
propid = line.strip().split(":")[1].strip()
if propid.startswith("0x"):
propertyinfo["PropertyId"] = int(propid, 16)
else:
print "In section 2.%(section)i (%(propname)s):" % { 'section': (next_num -1), 'propname': propname }
print "\t", propid, "doesn't appear to have correct (hex) format"
if line.strip().startswith("Property long ID (LID):"):
proplid = line.strip().split(":")[1].strip()
if proplid.startswith("0x"):
propertyinfo["PropertyLid"] = int(proplid, 16)
else:
print "In section 2.%(section)i (%(propname)s):" % { 'section': (next_num -1), 'propname': propname }
print "\t", proplid, "doesn't appear to have correct (hex) format"
if line.strip().startswith("Area:"):
areaname = line.strip().split(":")[1].strip()
if (knownareas.count(areaname) == 1):
propertyinfo["Area"] = areaname
else:
print "In section 2.%(section)i (%(propname)s):" % { 'section': (next_num -1), 'propname': propname }
print "\t", areaname, "isn't an expected area name (typo?)"
if line.strip().startswith("References:") or line.strip().startswith("Reference:"):
references = line.strip().split(":")[1].strip()
while (1):
nextline = propsfile.next().strip()
if (nextline.isspace() or (len(nextline) == 0)):
break
references += (nextline)
propertyinfo["References"] = references
if line.strip().startswith("Defining Reference:") or line.strip().startswith("Defining reference:") or line.strip().startswith("Defining references"):
reference = line.strip().split(":")[1].strip()
propertyinfo["DefiningReference"] = reference
propertyinfo["OXPROPS_Sect"] = "2.%i" % (next_num -1)
#The whole file should now be parsed
properties.append(propertyinfo)
# sanity check
print "Last section parsed was section 2.%(section)i" % { 'section': (next_num-1) }
# Debugging dump of everything
def debug_dump():
for entry in properties:
print entry
extra_private_tags_struct = """\t{ PidTagFolderChildCount, PT_LONG, \"PidTagFolderChildCount\" },
"""
temporary_private_tags = """
#define openchange_private_ROOT_FOLDER_FID PROP_TAG(PT_I8 , 0xd001) /* 0xd0010014 */
#define openchange_private_ROOT_FOLDER_FID_ERROR PROP_TAG(PT_ERROR , 0xd001) /* 0xd001000a */
#define openchange_private_DEFERRED_ACTIONS_FID PROP_TAG(PT_I8 , 0xd002) /* 0xd0020014 */
#define openchange_private_DEFERRED_ACTIONS_FID_ERROR PROP_TAG(PT_ERROR , 0xd002) /* 0xd002000a */
#define openchange_private_SPOOLER_QUEUE_FID PROP_TAG(PT_I8 , 0xd003) /* 0xd0030014 */
#define openchange_private_SPOOLER_QUEUE_FID_ERROR PROP_TAG(PT_ERROR , 0xd003) /* 0xd003000a */
#define openchange_private_IPM_SUBTREE_FID PROP_TAG(PT_I8 , 0xd004) /* 0xd0040014 */
#define openchange_private_IPM_SUBTREE_FID_ERROR PROP_TAG(PT_ERROR , 0xd004) /* 0xd004000a */
#define openchange_private_INBOX_FID PROP_TAG(PT_I8 , 0xd005) /* 0xd0050014 */
#define openchange_private_INBOX_FID_ERROR PROP_TAG(PT_ERROR , 0xd005) /* 0xd005000a */
#define openchange_private_OUTBOX_FID PROP_TAG(PT_I8 , 0xd006) /* 0xd0060014 */
#define openchange_private_OUTBOX_FID_ERROR PROP_TAG(PT_ERROR , 0xd006) /* 0xd006000a */
#define openchange_private_SENT_ITEMS_FID PROP_TAG(PT_I8 , 0xd007) /* 0xd0070014 */
#define openchange_private_SENT_ITEMS_FID_ERROR PROP_TAG(PT_ERROR , 0xd007) /* 0xd007000a */
#define openchange_private_DELETED_ITEMS_FID PROP_TAG(PT_I8 , 0xd008) /* 0xd0080014 */
#define openchange_private_DELETED_ITEMS_FID_ERROR PROP_TAG(PT_ERROR , 0xd008) /* 0xd008000a */
#define openchange_private_COMMON_VIEWS_FID PROP_TAG(PT_I8 , 0xd009) /* 0xd0090014 */
#define openchange_private_COMMON_VIEWS_FID_ERROR PROP_TAG(PT_ERROR , 0xd009) /* 0xd009000a */
#define openchange_private_SCHEDULE_FID PROP_TAG(PT_I8 , 0xd00a) /* 0xd00a0014 */
#define openchange_private_SCHEDULE_FID_ERROR PROP_TAG(PT_ERROR , 0xd00a) /* 0xd00a000a */
#define openchange_private_SEARCH_FID PROP_TAG(PT_I8 , 0xd00b) /* 0xd00b0014 */
#define openchange_private_SEARCH_FID_ERROR PROP_TAG(PT_ERROR , 0xd00b) /* 0xd00b000a */
#define openchange_private_VIEWS_FID PROP_TAG(PT_I8 , 0xd00c) /* 0xd00c0014 */
#define openchange_private_VIEWS_FID_ERROR PROP_TAG(PT_ERROR , 0xd00c) /* 0xd00c000a */
#define openchange_private_SHORTCUTS_FID PROP_TAG(PT_I8 , 0xd00d) /* 0xd00d0014 */
#define openchange_private_SHORTCUTS_FID_ERROR PROP_TAG(PT_ERROR , 0xd00d) /* 0xd00d000a */
#define openchange_private_MailboxGUID PROP_TAG(PT_CLSID , 0xd00e) /* 0xd00e0048 */
#define openchange_private_MailboxGUID_ERROR PROP_TAG(PT_ERROR , 0xd00e) /* 0xd00e000a */
#define openchange_private_ReplicaID PROP_TAG(PT_SHORT , 0xd00f) /* 0xd00f0002 */
#define openchange_private_ReplicaID_ERROR PROP_TAG(PT_ERROR , 0xd00f) /* 0xd00f000a */
#define openchange_private_ReplicaGUID PROP_TAG(PT_CLSID , 0xd010) /* 0xd0100048 */
#define openchange_private_ReplicaGUID_ERROR PROP_TAG(PT_ERROR , 0xd010) /* 0xd010000a */
#define openchange_private_CONTACT_FID PROP_TAG(PT_I8 , 0xd011) /* 0xd0110014 */
#define openchange_private_CONTACT_FID_ERROR PROP_TAG(PT_ERROR , 0xd011) /* 0xd011000a */
#define openchange_private_CALENDAR_FID PROP_TAG(PT_I8 , 0xd012) /* 0xd0120014 */
#define openchange_private_CALENDAR_FID_ERROR PROP_TAG(PT_ERROR , 0xd012) /* 0xd012000a */
#define openchange_private_JOURNAL_FID PROP_TAG(PT_I8 , 0xd013) /* 0xd0130014 */
#define openchange_private_JOURNAL_FID_ERROR PROP_TAG(PT_ERROR , 0xd013) /* 0xd013000a */
#define openchange_private_NOTE_FID PROP_TAG(PT_I8 , 0xd014) /* 0xd0140014 */
#define openchange_private_NOTE_FID_ERROR PROP_TAG(PT_ERROR , 0xd014) /* 0xd014000a */
#define openchange_private_TASK_FID PROP_TAG(PT_I8 , 0xd015) /* 0xd0150014 */
#define openchange_private_TASK_FID_ERROR PROP_TAG(PT_ERROR , 0xd015) /* 0xd015000a */
#define openchange_private_DRAFTS_FID PROP_TAG(PT_I8 , 0xd016) /* 0xd0160014 */
#define openchange_private_DRAFTS_FID_ERROR PROP_TAG(PT_ERROR , 0xd016) /* 0xd016000a */
#define openchange_private_PF_ROOT PROP_TAG(PT_I8 , 0xd017) /* 0xd0170014 */
#define openchange_private_PF_ROOT_ERROR PROP_TAG(PT_ERROR , 0xd017) /* 0xd017000a */
#define openchange_private_PF_IPM_SUBTREE PROP_TAG(PT_I8 , 0xd018) /* 0xd0180014 */
#define openchange_private_PF_IPM_SUBTREE_ERROR PROP_TAG(PT_ERROR , 0xd018) /* 0xd018000a */
#define openchange_private_PF_NONIPM_SUBTREE PROP_TAG(PT_I8 , 0xd019) /* 0xd0190014 */
#define openchange_private_PF_NONIPM_SUBTREE_ERROR PROP_TAG(PT_ERROR , 0xd019) /* 0xd019000a */
#define openchange_private_PF_EFORMS PROP_TAG(PT_I8 , 0xd01a) /* 0xd01a0014 */
#define openchange_private_PF_EFORMS_ERROR PROP_TAG(PT_ERROR , 0xd01a) /* 0xd01a000a */
#define openchange_private_PF_FREEBUSY PROP_TAG(PT_I8 , 0xd01b) /* 0xd01b0014 */
#define openchange_private_PF_FREEBUSY_ERROR PROP_TAG(PT_ERROR , 0xd01b) /* 0xd01b000a */
#define openchange_private_PF_OAB PROP_TAG(PT_I8 , 0xd01c) /* 0xd01c0014 */
#define openchange_private_PF_OAB_ERROR PROP_TAG(PT_ERROR , 0xd01c) /* 0xd01c000a */
#define openchange_private_PF_LOCAL_EFORMS PROP_TAG(PT_I8 , 0xd01d) /* 0xd01d0014 */
#define openchange_private_PF_LOCAL_EFORMS_ERROR PROP_TAG(PT_ERROR , 0xd01d) /* 0xd01d000a */
#define openchange_private_PF_LOCAL_FREEBUSY PROP_TAG(PT_I8 , 0xd01e) /* 0xd01e0014 */
#define openchange_private_PF_LOCAL_FREEBUSY_ERROR PROP_TAG(PT_ERROR , 0xd01e) /* 0xd01e000a */
#define openchange_private_PF_LOCAL_OAB PROP_TAG(PT_I8 , 0xd01f) /* 0xd01f0014 */
#define openchange_private_PF_LOCAL_OAB_ERROR PROP_TAG(PT_ERROR , 0xd01f) /* 0xd01f000a */
"""
temporary_private_tags_struct = """\t{ openchange_private_ROOT_FOLDER_FID, PT_I8, "openchange_private_ROOT_FOLDER_FID" },
{ openchange_private_DEFERRED_ACTIONS_FID, PT_I8, "openchange_private_DEFERRED_ACTIONS_FID" },
{ openchange_private_SPOOLER_QUEUE_FID, PT_I8, "openchange_private_SPOOLER_QUEUE_FID" },
{ openchange_private_IPM_SUBTREE_FID, PT_I8, "openchange_private_IPM_SUBTREE_FID" },
{ openchange_private_INBOX_FID, PT_I8, "openchange_private_INBOX_FID" },
{ openchange_private_OUTBOX_FID, PT_I8, "openchange_private_OUTBOX_FID" },
{ openchange_private_SENT_ITEMS_FID, PT_I8, "openchange_private_SENT_ITEMS_FID" },
{ openchange_private_DELETED_ITEMS_FID, PT_I8, "openchange_private_DELETED_ITEMS_FID" },
{ openchange_private_COMMON_VIEWS_FID, PT_I8, "openchange_private_COMMON_VIEWS_FID" },
{ openchange_private_SCHEDULE_FID, PT_I8, "openchange_private_SCHEDULE_FID" },
{ openchange_private_SEARCH_FID, PT_I8, "openchange_private_SEARCH_FID" },
{ openchange_private_VIEWS_FID, PT_I8, "openchange_private_VIEWS_FID" },
{ openchange_private_SHORTCUTS_FID, PT_I8, "openchange_private_SHORTCUTS_FID" },
{ openchange_private_MailboxGUID, PT_CLSID, "openchange_private_MailboxGUID" },
{ openchange_private_ReplicaID, PT_SHORT, "openchange_private_ReplicaID" },
{ openchange_private_ReplicaGUID, PT_CLSID, "openchange_private_ReplicaGUID" },
{ openchange_private_CONTACT_FID, PT_I8, "openchange_private_CONTACT_FID" },
{ openchange_private_CALENDAR_FID, PT_I8, "openchange_private_CALENDAR_FID" },
{ openchange_private_JOURNAL_FID, PT_I8, "openchange_private_JOURNAL_FID" },
{ openchange_private_NOTE_FID, PT_I8, "openchange_private_NOTE_FID" },
{ openchange_private_TASK_FID, PT_I8, "openchange_private_TASK_FID" },
{ openchange_private_DRAFTS_FID, PT_I8, "openchange_private_DRAFTS_FID" },
{ openchange_private_PF_ROOT, PT_I8, "openchange_private_PF_ROOT" },
{ openchange_private_PF_IPM_SUBTREE, PT_I8, "openchange_private_PF_IPM_SUBTREE" },
{ openchange_private_PF_NONIPM_SUBTREE, PT_I8, "openchange_private_PF_NONIPM_SUBTREE" },
{ openchange_private_PF_EFORMS, PT_I8, "openchange_private_PF_EFORMS" },
{ openchange_private_PF_FREEBUSY, PT_I8, "openchange_private_PF_FREEBUSY" },
{ openchange_private_PF_OAB, PT_I8, "openchange_private_PF_OAB" },
{ openchange_private_PF_LOCAL_EFORMS, PT_I8, "openchange_private_PF_LOCAL_EFORMS" },
{ openchange_private_PF_LOCAL_FREEBUSY, PT_I8, "openchange_private_PF_LOCAL_FREEBUSY" },
{ openchange_private_PF_LOCAL_OAB, PT_I8, "openchange_private_PF_LOCAL_OAB" },
"""
def make_mapi_properties_file():
proplines = []
altnamelines = []
previous_propid_list = []
# Additional properties referenced on MSDN but not in MS-OXPROPS
properties.append({'CanonicalName': 'PidTagRoamingBinary',
'DataTypeName': 'PtypBinary',
'PropertyId': 0x7C09,
'AlternateNames': 'PR_ROAMING_BINARYSTREAM',
'Area': 'Configuration'})
for entry in properties:
if entry.has_key("CanonicalName") == False:
print "Section", entry["OXPROPS_Sect"], "has no canonical name entry"
continue
if entry.has_key("DataTypeName") == False:
print "Section", entry["OXPROPS_Sect"], "has no data type entry"
continue
if entry.has_key("PropertyId"):
propline = "#define "
propline += string.ljust(entry["CanonicalName"], 68)
propline += " PROP_TAG("
propline += string.ljust(datatypemap[entry["DataTypeName"]], 13) + ", "
propline += "0x" + format(entry["PropertyId"], "04X")
propline += ") "
propline += "/* 0x" + format(entry["PropertyId"], "04X") + knowndatatypes[entry["DataTypeName"]][2:] + " */"
propline += "\n"
proplines.append(propline)
propline = "#define "
propline += string.ljust(entry["CanonicalName"] + "_Error", 68)
propline += " PROP_TAG("
propline += string.ljust("PT_ERROR", 13) + ", "
propline += "0x" + format(entry["PropertyId"], "04X")
propline += ") "
propline += "/* 0x" + format(entry["PropertyId"], "04X") + "000A" + " */"
propline += "\n"
proplines.append(propline)
if entry.has_key("AlternateNames"):
for altname in entry["AlternateNames"].split(","):
altname = altname.strip()
if altname.count(" ") > 0:
print "skipping non-conforming alternative name:", altname
elif altname.startswith("PR_"):
if altname.endswith("_A"):
continue
if altname.endswith("_W"):
continue
if knowndatatypes[entry["DataTypeName"]][2:] == "001F":
altline = "#define " + string.ljust(altname + "_UNICODE", 68)
altline += " PROP_TAG(PT_UNICODE , 0x" + format(entry["PropertyId"], "04X") + ")"
altline += " /* 0x" + format(entry["PropertyId"], "04X") + "001F */" + "\n"
altnamelines.append(altline)
altline = "#define " + string.ljust(altname, 68)
altline += " PROP_TAG(PT_STRING8 , 0x" + format(entry["PropertyId"], "04X") + ")"
altline += " /* 0x" + format(entry["PropertyId"], "04X") + "001E */" + "\n"
altnamelines.append(altline)
elif knowndatatypes[entry["DataTypeName"]][2:] == "101F":
altline = "#define " + string.ljust(altname + "_UNICODE", 68)
altline += " PROP_TAG(PT_MV_UNICODE, 0x" + format(entry["PropertyId"], "04X") + ")"
altline += " /* 0x" + format(entry["PropertyId"], "04X") + "101F */" + "\n"
altnamelines.append(altline)
altline = "#define " + string.ljust(altname, 68)
altline += " PROP_TAG(PT_MV_STRING8, 0x" + format(entry["PropertyId"], "04X") + ")"
altline += " /* 0x" + format(entry["PropertyId"], "04X") + "101E */" + "\n"
altnamelines.append (altline)
else:
altnamelines.append("#define " + string.ljust(altname, 68) + " " + entry["CanonicalName"] + "\n")
altline = "#define " + string.ljust(altname + "_ERROR", 68)
altline += " PROP_TAG(PT_ERROR , 0x" + format(entry["PropertyId"], "04X") + ")"
altline += " /* 0x" + format(entry["PropertyId"], "04X") + "000A */" + "\n"
altnamelines.append(altline)
# hack until we properly handle named properties
proplines.append(temporary_private_tags)
# supplemental properties / alternative names
altnamelines.append("#define PidTagFolderChildCount PROP_TAG(PT_LONG , 0x6638) /* 0x66380003 */\n")
altnamelines.append("#define PidTagOriginalDisplayName PROP_TAG(PT_UNICODE , 0x3A13) /* 0x3A13001F */\n")
altnamelines.append("#define PidTagDesignInProgress PROP_TAG(PT_BOOLEAN , 0x3FE4) /* 0x3FE4000B */\n")
altnamelines.append("#define PidTagSecureOrigination PROP_TAG(PT_BOOLEAN , 0x3FE5) /* 0x3FE5000B */\n")
altnamelines.append("#define PidTagExtendedACLData PROP_TAG(PT_BINARY , 0x3FFE) /* 0x3FFE0102 */\n")
altnamelines.append("#define PidTagAttributeSystem PROP_TAG(PT_BOOLEAN , 0x10F5) /* 0x10F5000B */\n")
altnamelines.append("#define PidTagUrlCompName PROP_TAG(PT_UNICODE , 0x10F3) /* 0x10F3001F */\n")
altnamelines.append("#define PidTagNormalMessageSize PROP_TAG(PT_LONG , 0x66b3) /* 0x66B30003 */\n")
altnamelines.append("#define PidTagUrlCompNameSet PROP_TAG(PT_BOOLEAN , 0x0E62) /* 0x0E62000B */\n")
altnamelines.append("#define PR_NORMAL_MESSAGE_SIZE PidTagNormalMessageSize\n");
altnamelines.append("#define PR_DEFAULT_PROFILE 0x00010102\n")
altnamelines.append("#define PR_PROFILE_HOME_SERVER_ADDRS 0x6613101e\n")
altnamelines.append("#define PR_FID PidTagFolderId\n")
altnamelines.append("#define PR_MID PidTagMid\n")
altnamelines.append("#define PR_INSTANCE_NUM PidTagInstanceNum\n")
altnamelines.append("#define PR_FOLDER_CHILD_COUNT 0x66380003\n")
altnamelines.append("#define PR_INST_ID 0x674d0014\n")
altnamelines.append("#define PR_RULE_MSG_PROVIDER 0x65eb001e\n")
altnamelines.append("#define PR_RULE_MSG_NAME 0x65ec001e\n")
altnamelines.append("#define PR_TRANSMITTABLE_DISPLAY_NAME_UNICODE PidTagTransmittableDisplayName\n")
altnamelines.append("#define PR_TRANSMITTABLE_DISPLAY_NAME 0x3a20001e\n")
altnamelines.append("#define PR_ADDRBOOK_MID PidTagAddressBookMessageId\n")
altnamelines.append("#define PR_FREEBUSY_LAST_MODIFIED PidTagFreeBusyRangeTimestamp\n")
altnamelines.append("#define PR_FREEBUSY_START_RANGE PidTagFreeBusyPublishStart\n")
altnamelines.append("#define PR_FREEBUSY_END_RANGE PidTagFreeBusyPublishEnd\n")
altnamelines.append("#define PR_FREEBUSY_ALL_MONTHS PidTagScheduleInfoMonthsMerged\n")
altnamelines.append("#define PR_FREEBUSY_ALL_EVENTS PidTagScheduleInfoFreeBusyMerged\n")
altnamelines.append("#define PR_FREEBUSY_TENTATIVE_MONTHS PidTagScheduleInfoMonthsTentative\n")
altnamelines.append("#define PR_FREEBUSY_TENTATIVE_EVENTS PidTagScheduleInfoFreeBusyTentative\n")
altnamelines.append("#define PR_FREEBUSY_BUSY_MONTHS PidTagScheduleInfoMonthsBusy\n")
altnamelines.append("#define PR_FREEBUSY_BUSY_EVENTS PidTagScheduleInfoFreeBusyBusy\n")
altnamelines.append("#define PR_FREEBUSY_OOF_MONTHS PidTagScheduleInfoMonthsAway\n")
altnamelines.append("#define PR_FREEBUSY_OOF_EVENTS PidTagScheduleInfoFreeBusyAway\n")
altnamelines.append("#define PR_REMINDERS_ONLINE_ENTRYID 0x36d50102\n")
altnamelines.append("#define PR_IPM_PUBLIC_FOLDERS_ENTRYID PidTagIpmPublicFoldersEntryId\n")
altnamelines.append("#define PR_PARENT_FID PidTagParentFolderId\n")
altnamelines.append("#define PR_URL_COMP_NAME_SET PidTagUrlCompNameSet\n")
altnamelines.append("#define PR_ASSOC_CONTENT_COUNT PidTagAssociatedContentCount\n")
altnamelines.append("#define PR_NTSD_MODIFICATION_TIME 0x3FD60040\n")
altnamelines.append("#define PR_CREATOR_SID 0x0E580102\n")
altnamelines.append("#define PR_LAST_MODIFIER_SID 0x0E590102\n")
altnamelines.append("#define PR_EXTENDED_ACL_DATA 0x3FFE0102\n")
altnamelines.append("#define PR_FOLDER_XVIEWINFO_E 0x36E00102\n")
altnamelines.append("#define PR_FOLDER_VIEWLIST 0x36EB0102\n")
altnamelines.append("#define PR_EMS_AB_HOME_MTA 0x8007001F\n")
altnamelines.append("#define PR_EMS_AB_ASSOC_NT_ACCOUNT 0x80270102\n")
altnamelines.append("#define PR_DELETED_MSG_COUNT 0x66400003\n")
altnamelines.append("#define PR_RECIPIENT_ON_NORMAL_MSG_COUNT 0x66af0003\n")
altnamelines.append("#define PR_CONVERSATION_KEY PidTagConversationKey\n")
# write properties out to a master header file
f = open('libmapi/property_tags.h', 'w')
f.write("/* Automatically generated by script/makepropslist.py. Do not edit */\n")
sortedproplines = sorted(proplines)
for propline in sortedproplines:
f.write(propline)
f.close()
f = open('libmapi/property_altnames.h', 'w')
f.write("/* Automatically generated by script/makepropslist.py. Do not edit */\n")
sortedaltnamelines = sorted(altnamelines)
for propline in sortedaltnamelines:
f.write(propline)
f.close()
# write canonical properties out for lookup
proplines = []
f = open('libmapi/property_tags.c', 'w')
proplines = []
f.write("/* Automatically generated by script/makepropslist.py. Do not edit */\n")
f.write("#include \"libmapi/libmapi.h\"\n")
f.write("#include \"libmapi/libmapi_private.h\"\n")
f.write("#include \"gen_ndr/ndr_exchange.h\"\n")
f.write("#include \"libmapi/property_tags.h\"\n\n")
f.write("struct mapi_proptags\n")
f.write("{\n")
f.write("\tuint32_t proptag;\n")
f.write("\tuint32_t proptype;\n")
f.write("\tconst char *propname;\n")
f.write("};\n")
f.write("\n")
for entry in properties:
if (entry.has_key("CanonicalName") == False):
print "Section", entry["OXPROPS_Sect"], "has no canonical name entry"
continue
if (entry.has_key("DataTypeName") == False):
print "Section", entry["OXPROPS_Sect"], "has no data type entry"
continue
if entry.has_key("PropertyId"):
propline = "\t{ "
propline += string.ljust(entry["CanonicalName"] + ",", 68)
propline += string.ljust(datatypemap[entry["DataTypeName"]] + ",", 14)
propline += string.ljust("\"" + entry["CanonicalName"] + "\"" , 68) + "},\n"
proplines.append(propline)
propline = "\t{ "
propline += string.ljust(entry["CanonicalName"] + "_Error,", 68)
propline += string.ljust("PT_ERROR,", 14)
propline += string.ljust("\"" + entry["CanonicalName"] + "_Error" + "\"" , 68) + "},\n"
proplines.append(propline)
proplines.append(extra_private_tags_struct)
# this is just a temporary hack till we properly support named properties
proplines.append(temporary_private_tags_struct)
sortedproplines = sorted(proplines)
f.write("static struct mapi_proptags canonical_property_tags[] = {\n")
for propline in sortedproplines:
f.write(propline)
f.write("\t{ 0, 0, \"NULL\" }\n")
f.write("};\n")
f.write("""
_PUBLIC_ const char *get_proptag_name(uint32_t proptag)
{
uint32_t idx;
for (idx = 0; canonical_property_tags[idx].proptag; idx++) {
if (canonical_property_tags[idx].proptag == proptag) {
return canonical_property_tags[idx].propname;
}
}
if (((proptag & 0xFFFF) == PT_STRING8) ||
((proptag & 0xFFFF) == PT_MV_STRING8)) {
proptag += 1; /* try as _UNICODE variant */
}
for (idx = 0; canonical_property_tags[idx].proptag; idx++) {
if (canonical_property_tags[idx].proptag == proptag) {
return canonical_property_tags[idx].propname;
}
}
return NULL;
}
_PUBLIC_ uint32_t get_proptag_value(const char *propname)
{
uint32_t idx;
for (idx = 0; canonical_property_tags[idx].proptag; idx++) {
if (!strcmp(canonical_property_tags[idx].propname, propname)) {
return canonical_property_tags[idx].proptag;
}
}
return 0;
}
_PUBLIC_ uint16_t get_property_type(uint16_t untypedtag)
{
uint32_t idx;
uint16_t current_type;
for (idx = 0; canonical_property_tags[idx].proptag; idx++) {
if ((canonical_property_tags[idx].proptag >> 16) == untypedtag) {
current_type = canonical_property_tags[idx].proptype;
if (current_type != PT_ERROR && current_type != PT_STRING8) {
return current_type;
}
}
}
OC_DEBUG(5, "type for property '%x' could not be deduced", untypedtag);
return 0;
}
""")
f.close()
# write canonical properties out for IDL input
proplines = []
previous_idl_proptags = []
previous_idl_pidtags = []
f = open('properties_enum.h', 'w')
f.write("/* Automatically generated by script/makepropslist.py. Do not edit */\n")
f.write("typedef [v1_enum, flag(NDR_PAHEX)] enum {\n")
for entry in properties:
if (entry.has_key("CanonicalName") == False):
print "Section", entry["OXPROPS_Sect"], "has no canonical name entry"
continue
if (entry.has_key("DataTypeName") == False):
print "Section", entry["OXPROPS_Sect"], "has no data type entry"
continue
if entry.has_key("PropertyId"):
if entry["PropertyId"] in previous_idl_proptags:
# Generate property tag
pidtag = format(entry["PropertyId"], "04X") + knowndatatypes[entry["DataTypeName"]][2:]
if pidtag in previous_idl_pidtags:
print "Skipping output of enum entry for", entry["CanonicalName"], "(duplicate)"
continue
else:
propline = "\t" + string.ljust(entry["CanonicalName"], 68)
propline += " = 0x" + format(entry["PropertyId"], "04X") + knowndatatypes[entry["DataTypeName"]][2:]
propline += ",\n"
proplines.append(propline)
continue
propline = "\t" + string.ljust(entry["CanonicalName"], 68)
propline += " = 0x" + format(entry["PropertyId"], "04X") + knowndatatypes[entry["DataTypeName"]][2:]
propline += ",\n"
proplines.append(propline)
if entry["DataTypeName"] == "PtypString":
propline = "\t" + string.ljust(entry["CanonicalName"] + "_string8", 68)
propline += " = 0x" + format(entry["PropertyId"], "04X") + "001E"
propline += ",\n"
proplines.append(propline)
propline = "\t" + string.ljust(entry["CanonicalName"] + "_Error", 68)
propline += " = 0x" + format(entry["PropertyId"], "04X") + "000A"
propline += ",\n"
proplines.append(propline)
previous_idl_proptags.append(entry["PropertyId"])
previous_idl_pidtags.append(format(entry["PropertyId"], "04X") + knowndatatypes[entry["DataTypeName"]][2:])
sortedproplines = sorted(proplines)
for propline in sortedproplines:
f.write(propline)
# Add additional properties, referenced on MSDN but not in MS-OXPROPS
f.write("\t" + string.ljust("PidTagAssociatedContentCount", 68) + " = 0x36170003,\n")
f.write("\t" + string.ljust("PidTagFolderChildCount", 68) + " = 0x66380003,\n")
f.write("\t" + string.ljust("PidTagIpmPublicFoldersEntryId", 68) + " = 0x66310102,\n")
f.write("\t" + string.ljust("PidTagConversationKey", 68) + " = 0x000b0102,\n")
f.write("\t" + string.ljust("PidTagContactEmailAddresses", 68) + " = 0x3a56101f,\n")
f.write("\t" + string.ljust("PidTagGenerateExchangeViews", 68) + " = 0x36e9000b,\n")
f.write("\t" + string.ljust("PidTagLatestDeliveryTime", 68) + " = 0x00190040,\n")
f.write("\t" + string.ljust("PidTagMailPermission", 68) + " = 0x3a0e000b,\n")
f.write("\tMAPI_PROP_RESERVED = 0xFFFFFFFF\n")
f.write("} MAPITAGS;\n")
f.close()
# write canonical properties out for pyopenchange mapistore
proplines = []
previous_idl_proptags = []
previous_idl_pidtags = []
f = open('pyopenchange/pymapi_properties.c', 'w')
f.write("""
/* Automatically generated by script/makepropslist.py. Do not edit */
#include <Python.h>
#include "pyopenchange/pymapi.h"
int pymapi_add_properties(PyObject *m)
{
""")
for entry in properties:
if (entry.has_key("CanonicalName") == False):
print "Section", entry["OXPROPS_Sect"], "has no canonical name entry"
continue
if (entry.has_key("DataTypeName") == False):
print "Section", entry["OXPROPS_Sect"], "has no data type entry"
continue
if entry.has_key("PropertyId"):
if entry["PropertyId"] in previous_idl_proptags:
pidtag = format(entry["PropertyId"], "04X") + knowndatatypes[entry["DataTypeName"]][2:]
if pidtag in previous_idl_pidtags:
print "Skipping output of Python bindings entry for", entry["CanonicalName"], "(duplicate)"
continue
else:
propline = "\tPyModule_AddObject(m, \"" + entry["CanonicalName"] + "\", "
propline += "PyInt_FromLong(0x" + format(entry["PropertyId"], "04X")
propline += knowndatatypes[entry["DataTypeName"]][2:]
propline += "));\n"
proplines.append(propline)
continue
propline = "\tPyModule_AddObject(m, \"" + entry["CanonicalName"] + "\", "
propline += "PyInt_FromLong(0x" + format(entry["PropertyId"], "04X")
propline += knowndatatypes[entry["DataTypeName"]][2:]
propline += "));\n"
proplines.append(propline)
propline = "\tPyModule_AddObject(m, \"" + entry["CanonicalName"] + "_Error\", "
propline += "PyInt_FromLong(0x" + format(entry["PropertyId"], "04X") + "000A"
propline += "));\n"
proplines.append(propline)
previous_idl_proptags.append(entry["PropertyId"])
previous_idl_pidtags.append(format(entry["PropertyId"], "04X") + knowndatatypes[entry["DataTypeName"]][2:])
sortedproplines = sorted(proplines)
for propline in sortedproplines:
f.write(propline)
f.write("""
return 0;
}
""")
f.close()
# write canonical properties out for openchangedb - probably remove this later
proplines = []
previous_idl_proptags = []
previous_idl_pidtags = []
f = open('mapiproxy/libmapiproxy/openchangedb_property.c', 'w')
f.write("""
/* Automatically generated by script/makepropslist.py. Do not edit */
#include "mapiproxy/dcesrv_mapiproxy.h"
#include "libmapiproxy.h"
#include "libmapi/libmapi.h"
#include "libmapi/libmapi_private.h"
struct pidtags {
uint32_t proptag;
const char *pidtag;
};
static struct pidtags pidtags[] = {
""")
for entry in properties:
if (entry.has_key("CanonicalName") == False):
print "Section", entry["OXPROPS_Sect"], "has no canonical name entry"
continue
if (entry.has_key("DataTypeName") == False):
print "Section", entry["OXPROPS_Sect"], "has no data type entry"
continue
if entry.has_key("PropertyId"):
if entry["PropertyId"] in previous_idl_proptags:
pidtag = format(entry["PropertyId"], "04X") + knowndatatypes[entry["DataTypeName"]][2:]
if pidtag in previous_idl_pidtags:
print "Skipping output of pidtags entry for", entry["CanonicalName"], "(duplicate)"
continue
else:
propline = "\t{ " + string.ljust(entry["CanonicalName"] + ",", 68)
propline += "\"" + entry["CanonicalName"] + "\" },\n"
proplines.append(propline)
continue
propline = "\t{ " + string.ljust(entry["CanonicalName"] + ",", 68)
propline += "\"" + entry["CanonicalName"] + "\" },\n"
proplines.append(propline)
previous_idl_proptags.append(entry["PropertyId"])
previous_idl_pidtags.append(format(entry["PropertyId"], "04X") + knowndatatypes[entry["DataTypeName"]][2:])
sortedproplines = sorted(proplines)
for propline in sortedproplines:
f.write(propline)
f.write("""\t{ 0, NULL }
};
static const char *_openchangedb_property_get_string_attribute(uint32_t proptag)
{
uint32_t i;
uint32_t tag_id = (proptag >> 16);
for (i = 0; pidtags[i].pidtag; i++) {
if (tag_id == (pidtags[i].proptag >> 16)) {
return pidtags[i].pidtag;
}
}
return NULL;
}
_PUBLIC_ const char *openchangedb_property_get_attribute(uint32_t proptag)
{
uint32_t i;
uint32_t prop_type = proptag & 0x0FFF;
if (prop_type == PT_UNICODE || prop_type == PT_STRING8) {
return _openchangedb_property_get_string_attribute(proptag);
}
for (i = 0; pidtags[i].pidtag; i++) {
if (pidtags[i].proptag == proptag) {
return pidtags[i].pidtag;
}
}
OC_DEBUG(0, "Unsupported property tag '0x%.8x'", proptag);
return NULL;
}
""")
previous_canonical_names = {}
def check_duplicate_canonical_names():
print "Checking canonical names:"
for entry in properties:
canonicalname = entry["CanonicalName"]
if previous_canonical_names.has_key(canonicalname):
print "\tIn section", entry["OXPROPS_Sect"], ", canonical name:", entry["CanonicalName"], "duplicates name in section", previous_canonical_names[canonicalname]
previous_canonical_names[canonicalname] = (entry["OXPROPS_Sect"])
def check_duplicate_alternative_names():
print "Checking alternative names:"
previous_alternative_names = {}
for entry in properties:
if entry.has_key("AlternateNames"):
for altname in entry["AlternateNames"].split(", "):
altname = altname.strip()
if altname.count(" ") > 0:
print "\tIn section", entry["OXPROPS_Sect"], ", alternative name:", altname, "contains space"
if previous_alternative_names.has_key(altname):
print "\tIn section", entry["OXPROPS_Sect"], ", alternative name:", altname, "duplicates name in section", previous_alternative_names[altname]
if previous_canonical_names.has_key(altname):
print "\tIn section", entry["OXPROPS_Sect"], ", alternative name:", altname, "duplicates canonical name in section", previous_alternative_names[altname]
previous_alternative_names[altname] = (entry["OXPROPS_Sect"])
def check_duplicate_propids():
print "Checking property IDs / LIDs:"
previous_propids = {}
previous_proplids = {}
for entry in properties:
if entry.has_key("PropertyId"):
propnum = entry["PropertyId"]
if previous_propids.has_key(propnum) and propnum < 0x6800:
print "\tIn section", entry["OXPROPS_Sect"], "(" + entry["CanonicalName"] + ")"
print "\t\tProperty id 0x" + format(propnum, "04x"), "(" + entry["DataTypeName"] + ") duplicates property id in section", previous_propids[propnum][0], "(" + previous_propids[propnum][1] + ")"
if (entry.has_key("DataTypeName")):
previous_propids[propnum] = (entry["OXPROPS_Sect"], entry["DataTypeName"])
else:
previous_propids[propnum] = (entry["OXPROPS_Sect"], "[No DataType]")
elif entry.has_key("PropertyLid"):
propnum = entry["PropertyLid"]
if previous_proplids.has_key(propnum):
print "\tIn section", entry["OXPROPS_Sect"], "(" + entry["CanonicalName"] + ")"
print "\t\tProperty LID 0x" + format(propnum, "08x"), "(" + entry["DataTypeName"] + ") duplicates property LID in section", previous_proplids[propnum][0], "(" + previous_proplids[propnum][1] + ")"
if (entry.has_key("DataTypeName")):
previous_proplids[propnum] = (entry["OXPROPS_Sect"], entry["DataTypeName"])
else:
previous_proplids[propnum] = (entry["OXPROPS_Sect"], "[No DataTypeName]")
elif entry["CanonicalName"].startswith("PidLid"):
print "Section", entry["OXPROPS_Sect"], "(" + entry["CanonicalName"] + ") has neither LID nor ID"
elif entry["CanonicalName"].startswith("PidName"):
pass
else:
print "Section", entry["OXPROPS_Sect"], "(" + entry["CanonicalName"] + ") is weird"
if (entry["CanonicalName"].startswith("PidName") and (entry.has_key("PropertyId") or entry.has_key("PropertyLid"))):
print "Section", entry["OXPROPS_Sect"], "(" + entry["CanonicalName"], "in", entry["PropertySet"] + ") has neither LID or ID"
def check_proptypes():
print "Checking that data types match:"
for entry in properties:
datatypename = entry["DataTypeName"]
datatypevalue = entry["DataTypeValue"]
if (knowndatatypes.has_key(datatypename) == False):
print "\tIn section %(section)s : unknown data type %(type)s" % { 'section': entry["OXPROPS_Sect"], 'type': datatypename }
elif (knowndatatypes[datatypename] != datatypevalue):
print "\tIn section %(section)s : got value %(value)i for type %(type)i (expected %(expected)i)" % { 'section': entry["OXPROPS_Sect"], 'value': datatypeval, 'type': datatypename, 'expected': knowndatatype[datatypename] }
def check_propsets():
print "Checking that property sets match:"
for entry in properties:
if entry.has_key("PropertySet"):
propsetname = entry["PropertySet"]
propsetvalue = entry["PropertySetValue"]
if (knownpropsets.has_key(propsetname) == False):
print "\tIn section %(section)s : unknown property set %(propset)s" % { 'section': entry["OXPROPS_Sect"], 'propset': propsetname }
elif (knownpropsets[propsetname] != propsetvalue):
print "\tIn section %(section)s : got value %(value)s for type %(type)s (expected %(expected)s)" % { 'section': entry["OXPROPS_Sect"], 'value': propsetvalue, 'type': propsetname, 'expected': knownpropsets[propsetname] }
def check_descriptions():
print "Checking for descriptions:"
for entry in properties:
if entry.has_key("Description") == False:
print "\tIn section %(section)s : there is no description" % { 'section': entry["OXPROPS_Sect"] }
def check_areas():
print "Checking for areas:"
for entry in properties:
if entry.has_key("Area") == False:
print "\tIn section %(section)s : there is no area" % { 'section': entry["OXPROPS_Sect"] }
def check_reference_line(entry, line, isdefining):
if line.endswith(","):
print "\tIn section %(section)s : trailing comma in (defining?) references" % { 'section': entry["OXPROPS_Sect"] }
line = line.rstrip(",")
for docentry in line.split(","):
docentry = docentry.strip()
if docentry == "":
print "\tIn section %(section)s : empty (defining) reference section" % { 'section': entry["OXPROPS_Sect"] }
elif knownrefs.count(docentry) != 1:
if len(docentry.split(" ")) > 1:
if docentry.split(" ")[1].strip().startswith("section"):
# thats ok
pass
else:
print "\tIn section %(section)s : missing comma in (defining?) references: %(docentry)s" % { 'section': entry["OXPROPS_Sect"], 'docentry': docentry }
else:
print "\tIn section %(section)s : unknown document: %(docname)s" % { 'section': entry["OXPROPS_Sect"], 'docname': docentry }
else:
try:
reffile = file("docs/" + docentry + ".txt")
reftext = reffile.read().replace(" ", "")
if docentry == "[MS-OXCFXICS]":
if (reftext.count((entry["CanonicalName"][6:])) < 1):
print "\tIn section %(section)s : (defining) references contains %(docname)s, but %(prop)s wasn't found in that document" % { 'section': entry["OXPROPS_Sect"], 'docname': docentry, 'prop': entry['CanonicalName'] }
elif reftext.count(entry["CanonicalName"]) < 1:
print "\tIn section %(section)s : (defining) references contains %(docname)s, but %(prop)s wasn't found in that document" % { 'section': entry["OXPROPS_Sect"], 'docname': docentry, 'prop': entry['CanonicalName'] }
except IOError:
pass
def check_references():
print "Checking for references:"
for entry in properties:
if entry.has_key("References"):
check_reference_line(entry, entry["References"], False)
elif entry.has_key("DefiningReference"):
check_reference_line(entry, entry["DefiningReference"], True)
else:
print "\tIn section %(section)s : there is no (defining) reference entry" % { 'section': entry["OXPROPS_Sect"] }
def check_properties_list():
check_proptypes()
check_propsets()
check_duplicate_canonical_names()
check_duplicate_alternative_names()
check_duplicate_propids()
# check_descriptions()
check_areas()
check_references()
def next_available_id(knownprops, increment):
try:
knownprops.index(increment)
knownprops.remove(increment)
increment += 1
return next_available_id(knownprops, increment)
except ValueError:
return increment
def find_key(dic, val):
"""return the key of dictionary dic given the value"""
try:
for k,v in dic.iteritems():
if v == val:
return k
except ValueError:
print "Value %s not found" % val
def make_mapi_named_properties_file():
content = ""
attributes = ""
start_content = ""
namedprops = []
knownprops = []
previous_ldif_lid = []
previous_ldif_name = []
for entry in properties:
if (entry.has_key("CanonicalName") == False):
print "Section", entry["OXPROPS_Sect"], "has no canonical name entry"
continue
if (entry.has_key("DataTypeName") == False):
print "Section", entry["OXPROPS_Sect"], "has no data type entry"
continue
if (entry.has_key("PropertyId") == False):
if entry.has_key("PropertySet"):
guid = entry["PropertySet"]
else:
guid = "[No PropSet]"
# Its a named property
name = entry["CanonicalName"]
proptype = entry["DataTypeName"]
if entry.has_key("PropertyLid"):
proplid = "0x" + format(entry["PropertyLid"], "04x")
if proplid in previous_ldif_lid:
print "Skipping output for named properties MNID_ID", name, "(duplicate)"
continue;
kind = "MNID_ID"
OOM = "NULL" # use as default
propname = "NULL"
if entry.has_key("PropertyName"):
OOM = entry["PropertyName"].strip()
elif entry.has_key("AlternateNames"):
altname = entry["AlternateNames"].strip()
if altname.startswith("dispid"):
OOM = altname[6:]
else:
OOM = altname
else:
pass
previous_ldif_lid.append(proplid)
else:
proplid = "0x0000"
kind = "MNID_STRING"
OOM = "NULL"
propname = "NULL" # use as default
if entry.has_key("PropertyName"):
propname = entry["PropertyName"].strip()
elif entry.has_key("AlternateNames"):
for altname in entry["AlternateNames"]:
altname = altname.strip()
if altname.startswith("dispid"):
propname = altname[6:]
search_dup = "%s/%s" % (propname, guid)
if search_dup in previous_ldif_name:
print "Skipping output for named properties MNID_STRING", name, "(duplicate)"
continue;
previous_ldif_name.append(search_dup)
namedprop = (name, OOM, proplid, propname, knowndatatypes[proptype], kind, guid)
namedprops.append(namedprop)
else:
# It's not a named property
# Store conflicting properties with propid > 0x8000
propid = entry["PropertyId"]
if propid >= 0x8000:
try:
knownprops.index(propid)
except ValueError:
knownprops.append(propid)
# Create the default GUID containers
for key in sorted(knownpropsets):
cn = knownpropsets[key].strip('{}').lower()
oleguid_ldif = "dn: CN=%s,CN=External,CN=Server\n" \
"objectClass: External\n" \
"cn: %s\n" \
"name: %s\n" \
"oleguid: %s\n\n" % (cn, cn, str(key), cn)
content += oleguid_ldif
# Write named properties
sortednamedprops = sorted(namedprops, key=lambda namedprops: namedprops[6]) # sort by guid
increment = next_available_id(knownprops, 0x8000)
for line in sortednamedprops:
propset = line[6]
if propset not in knownpropsets:
# Try to guess from the closest match
result = difflib.get_close_matches(propset, knownpropsets.keys(),
1, 0.9)
if len(result) > 0:
propset = result[0]
else:
raise KeyError(propset)
oleguid = knownpropsets[propset].strip('{}').lower()
if line[5] == "MNID_STRING":
named_props_ldif = "dn: CN=%s,CN=MNID_STRING,CN=%s,CN=External,CN=Server\n" \
"objectClass: External\n" \
"objectClass: MNID_STRING\n" \
"cn: %s\n" \
"canonical: %s\n" \
"oleguid: %s\n" \
"mapped_id: 0x%.4x\n" \
"prop_id: %s\n" \
"prop_type: %s\n" \
"prop_name: %s\n\n" % (
line[3], oleguid, line[3], line[0], oleguid, increment,
line[2], line[4], line[3])
else:
named_props_ldif = "dn: CN=%s,CN=MNID_ID,CN=%s,CN=External,CN=Server\n" \
"objectClass: External\n" \
"objectClass: MNID_ID\n" \
"cn: %s\n" \
"canonical: %s\n" \
"oleguid: %s\n" \
"mapped_id: 0x%.4x\n" \
"prop_id: %s\n" \
"prop_type: %s\n" \
"oom: %s\n\n" % (
line[2], oleguid, line[2], line[0], oleguid, increment,
line[2], line[4], line[1])
content += named_props_ldif
increment += 1
increment = next_available_id(knownprops, increment)
# Store remaining reserved named properties IDs in attributes
for ids in sorted(knownprops):
attributes += "reserved_tags: 0x%.4x\n" % ids
start_content = "# LDIF file automatically auto-generated by script/makepropslist.py. Do not edit\n\n"
start_content += "dn: CN=Server\n" \
"objectClass: top\n" \
"cn: Server\n\n" \
\
"dn: CN=Internal,CN=Server\n" \
"objectClass: Internal\n" \
"objectClass: container\n" \
"objectClass: top\n" \
"cn: Internal\n" \
"mapping_index: 0x0001\n\n" \
\
"dn: CN=External,CN=Server\n" \
"objectClass: External\n" \
"objectClass: container\n" \
"objectClass: top\n" \
"cn: External\n" \
"mapping_index: 0x%.4x\n" % increment
start_content += attributes + "\n"
start_content += "dn: CN=Users,CN=Server\n" \
"objectClass: container\n" \
"objectClass: top\n" \
"cn: Users\n\n"
content = start_content + content
# wite named properties buffered file out to LDIF file
f = open('setup/mapistore/mapistore_namedprops_v2.ldif', 'w')
f.write(content)
f.close()
# write named properties defines and structure
f = open('libmapi/mapi_nameid.h', 'w')
f.write("""
/* Automatically generated by script/makepropslist.py. Do not edit */
#ifndef __MAPI_NAMEID_H__
#define __MAPI_NAMEID_H__
/* NOTE TO DEVELOPERS: If this is a MNID_STRING named property, then
* we use the arbitrary 0xa000-0xafff property ID range for internal
* mapping purpose only.
*/
struct mapi_nameid_tags {
uint32_t proptag;
const char *OOM;
uint16_t lid;
const char *Name;
uint32_t propType;
uint8_t ulKind;
const char *OLEGUID;
uint32_t position;
};
struct mapi_nameid_names {
uint32_t proptag;
const char *propname;
};
struct mapi_nameid {
struct MAPINAMEID *nameid;
uint16_t count;
struct mapi_nameid_tags *entries;
};
/* MNID_ID named properties */
""")
for line in sortednamedprops:
if line[5] == "MNID_ID":
proptag = "0x%.8x" % (int(line[2], 16) << 16 | int(line[4], 16))
propline = "#define %s %s\n" % (string.ljust(line[0], 60), string.ljust(proptag, 20))
f.write(propline)
f.write("\n/* MNID_STRING named properties (internal mapping) */\n")
mnstring_id = 0xa000
for line in sortednamedprops:
if line[5] == "MNID_STRING":
proptag = "0x%.8x" % ((mnstring_id << 16) | int(line[4], 16))
propline = "#define %s %s\n" % (string.ljust(line[0], 60), string.ljust(proptag, 20))
mnstring_id += 1
f.write(propline)
# Additional properties
propline = "#define %s %s\n" % (string.ljust("PidLidRemoteTransferSize", 60), string.ljust("0x8f050003", 20))
f.write(propline)
f.write("#endif /* ! MAPI_NAMEID_H__ */")
f.close()
# write named properties internal mapping
f = open('libmapi/mapi_nameid_private.h', 'w')
f.write("""
/* Automatically generated by script/makepropslist.py. Do not edit */
#ifndef __MAPI_NAMEID_PRIVATE_H__
#define __MAPI_NAMEID_PRIVATE_H__
static struct mapi_nameid_tags mapi_nameid_tags[] = {
""")
for line in sortednamedprops:
if line[5] == "MNID_ID":
OOM = "\"%s\"" % line[1]
key = find_key(knowndatatypes, line[4])
datatype = datatypemap[key]
propline = "{ %s, %s, %s, %s, %s, %s, %s, %s },\n" % (
string.ljust(line[0], 60), string.ljust(OOM, 65), line[2], line[3],
string.ljust(datatype, 15), "MNID_ID", line[6], "0x0")
f.write(propline)
for line in sortednamedprops:
if line[5] == "MNID_STRING":
OOM = "%s" % line[1]
key = find_key(knowndatatypes, line[4])
datatype = datatypemap[key]
propline = "{ %s, %s, %s, \"%s\", %s, %s, %s, %s },\n" % (
string.ljust(line[0], 60), string.ljust(OOM, 65), line[2], line[3],
string.ljust(datatype, 15), "MNID_STRING", line[6], "0x0")
f.write(propline)
# Addtional named properties
propline = "{ %s, %s, %s, %s, %s, %s, %s, %s },\n" % (
string.ljust("PidLidRemoteTransferSize", 60), string.ljust("\"RemoteTransferSize\"", 65), "0x8f05",
"NULL", string.ljust("PT_LONG", 15), "MNID_ID", "PSETID_Remote", "0x0")
f.write(propline)
propline = "{ %s, %s, %s, %s, %s, %s, %s, %s }\n" % (
string.ljust("0x00000000", 60), string.ljust("NULL", 65), "0x0000", "NULL",
string.ljust("PT_UNSPECIFIED", 15), "0x0", "NULL", "0x0")
f.write(propline)
f.write("""
};
""")
f.write("""
static struct mapi_nameid_names mapi_nameid_names[] = {
""")
for line in sortednamedprops:
propline = "{ %s, \"%s\" },\n" % (string.ljust(line[0], 60), line[0])
f.write(propline)
# Additional named properties
propline = "{ %s, \"%s\" }\n" % (string.ljust("PidLidRemoteTransferSize", 60), "PidLidRemoteTransferSize")
propline = "{ %s, \"%s\" }\n" % (string.ljust("0x00000000", 60), "NULL")
f.write(propline)
f.write("""
};
#endif /* !MAPI_NAMEID_PRIVATE_H__ */
""")
f.close()
def dump_areas_count():
areas = {}
for area in knownareas:
areas[area] = 0
for entry in properties:
if (entry.has_key("Area") == False):
print "Section", entry["OXPROPS_Sect"], "has no area entry"
else:
areas[entry["Area"]] += 1
for area in knownareas:
print area, ":", areas[area]
def fix_problems(propsfilename):
retcode = subprocess.call(["sed", "-i",
"-e", "s/.Dta type: PtypBoolean, 0x000B/Data type: PtypBoolean, 0x000B/",
"-e", "s/.Data Type: PtypString, 0x001F/Data type: PtypString, 0x001F/",
"-e", "s/.Data type: PtyString, 0x001F/Data type: PtypString, 0x001F/",
"-e", "s/.Area: MAPI Display Tables\[MS-OXOABK\] section 2.2.3.33/Area: MAPI Display Tables\\nDefining Reference: \[MS-OXOABK\] section 2.2.3.33/",
"-e", "s/.Area: ProviderDefinedNonTransmittable\[MS-OXCTABL\] section 2.2.1.2/Area: ProviderDefinedNonTransmittable\\nDefining Reference: \[MS-OXCTABL\] section 2.2.1.2/",
"-e", "s/.Area: Server-Side Rules Properties\[MS-OXORULE\] section 2.2.1.3.2.2/Area: Server-Side Rules Properties\\nDefining Reference: \[MS-OXORULE\] section 2.2.1.3.2.2/",
"-e", "s/.Area: MapiMailUser\[MS-OXOABK\] section 2.2.4.66/Area: MapiMailUser\\nDefining Reference: \[MS-OXOABK\] section 2.2.4.66/",
"-e", "s/.Description: \[MS-OXORULE\] section 2.2.7.3/Defining Reference: \[MS-OXORULE\] section 2.2.7.3/",
"-e", "s/.Property set: PSTID_Sharing {00062040-0000-0000-C000-000000000046}/Property set: PSETID_Sharing {00062040-0000-0000-C000-000000000046}/",
"-e", "s/.Property set: PSETID_Address {00062004-0000-0000-C000-00000000046}/Property set: PSETID_Address {00062004-0000-0000-C000-000000000046}/",
"-e", "s/.Property set: PSETID_Address{00062004-0000-0000-C000-000000000046}/Property set: PSETID_Address {00062004-0000-0000-C000-000000000046}/",
"-e", "s/.Property set: PSETID_Appointment {00062002-0000-0000-C000-0000000000046}/Property set: PSETID_Appointment {00062002-0000-0000-C000-000000000046}/",
"-e", "s/.Property set: PSETID_Address {00062004-0000-0000-C00-0000000000046}/Property set: PSETID_Address {00062004-0000-0000-C000-000000000046}/",
"-e", "s/.Consuming Reference: \[MS-OXCICAL\] Alternate names: PR_NEXT_SEND_ACCT/Consuming Reference: \[MS-OXCICAL\]\\nAlternate names: PR_NEXT_SEND_ACCT/",
"-e", "s/.Alternate names: PR_WB_SF_ID//",
"-e", "s/.Alternate names: PR_WB_SF_TAG//",
"-e", "s/.Alternate names: PR_EMS_AB_DL_MEM_REJECT_PERMS//",
"-e", "s/.Alternate names: PR_EMS_AB_DL_MEM_SUBMIT_PERMS//",
"-e", "s/.General Message Properties Defining reference/General Message Properties\\nDefining reference/",
"-e", "s/.Data type: PtypString8, 0x001E; PtypEmbeddedTable, 0x000D/Data type: PtypString8, 0x001E/",
"-e", "s/.Data type: PtypString, 0x001F; PtypMultipleBinary, 0x1102/Data type: PtypString, 0x001F/",
propsfilename])
if retcode != 0:
print "Could not fix problem:", retcode
sys.exit(retcode)
# Fix data type error for PidTagWlinkGroupHeaderID - PtypGuid instead of PtypBinary
with open(propsfilename) as f:
file_str = f.read()
file_str = file_str.replace("Description: Specifies the ID of the navigation shortcut that groups other navigation shortcuts.\n\nProperty ID: 0x6842\n\nData type: PtypBinary, 0x0102", "Description: Specifies the ID of the navigation shortcut that groups other navigation shortcuts.\n\nProperty ID: 0x6842\n\nData type: PtypGuid, 0x0048")
with open(propsfilename, "w") as f:
f.write(file_str)
f.close()
def main():
oxpropsparser = argparse.ArgumentParser(description='Convert MS-OXPROPS to other formats')
oxpropsparser.add_argument('--pdffile', required=True)
oxpropsparser.add_argument('--sanitycheck', action='store_true')
oxpropsparser.add_argument('--sanitycheckonly', action='store_true')
args = oxpropsparser.parse_args()
propsfile = tempfile.mkstemp(suffix='txt')
propsfilename = propsfile[1]
retcode = subprocess.call(["pdftotext", "-nopgbrk", "-layout", args.pdffile, propsfilename])
if retcode != 0:
print "Could not convert file to text:", retcode
sys.exit(retcode)
fix_problems(propsfilename)
make_properties_list(propsfilename)
if args.sanitycheck or args.sanitycheckonly:
check_properties_list() # uses global variable
# dump_areas_count()
if args.sanitycheckonly == False:
make_mapi_properties_file()
make_mapi_named_properties_file()
if __name__ == "__main__":
main()
|
Zentyal/openchange
|
script/makepropslist.py
|
Python
|
gpl-3.0
| 64,957
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>:
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=protected-access
"""Tests for BaseKeyParser."""
import logging
import unittest
from unittest import mock
from PyQt5.QtCore import Qt
from qutebrowser.keyinput import basekeyparser
from qutebrowser.test import stubs, helpers
from qutebrowser.utils import objreg
CONFIG = {'input': {'timeout': 100}}
BINDINGS = {'test': {'<Ctrl-a>': 'ctrla',
'a': 'a',
'ba': 'ba',
'ax': 'ax',
'ccc': 'ccc'},
'test2': {'foo': 'bar', '<Ctrl+X>': 'ctrlx'}}
fake_keyconfig = mock.Mock(spec=['get_bindings_for'])
fake_keyconfig.get_bindings_for.side_effect = lambda s: BINDINGS[s]
def setUpModule():
"""Mock out some imports in basekeyparser."""
basekeyparser.QObject = mock.Mock()
logging.disable(logging.WARNING)
def tearDownModule():
"""Restore mocked out stuff."""
logging.disable(logging.NOTSET)
class SplitCountTests(unittest.TestCase):
"""Test the _split_count method.
Attributes:
kp: The BaseKeyParser we're testing.
"""
def setUp(self):
self.kp = basekeyparser.BaseKeyParser(0, supports_count=True)
def test_onlycount(self):
"""Test split_count with only a count."""
self.kp._keystring = '10'
self.assertEqual(self.kp._split_count(), (10, ''))
def test_normalcount(self):
"""Test split_count with count and text."""
self.kp._keystring = '10foo'
self.assertEqual(self.kp._split_count(), (10, 'foo'))
def test_minuscount(self):
"""Test split_count with a negative count."""
self.kp._keystring = '-1foo'
self.assertEqual(self.kp._split_count(), (None, '-1foo'))
def test_expcount(self):
"""Test split_count with an exponential count."""
self.kp._keystring = '10e4foo'
self.assertEqual(self.kp._split_count(), (10, 'e4foo'))
def test_nocount(self):
"""Test split_count with only a command."""
self.kp._keystring = 'foo'
self.assertEqual(self.kp._split_count(), (None, 'foo'))
def test_nosupport(self):
"""Test split_count with a count when counts aren't supported."""
self.kp._supports_count = False
self.kp._keystring = '10foo'
self.assertEqual(self.kp._split_count(), (None, '10foo'))
class ReadConfigTests(unittest.TestCase):
"""Test reading the config."""
def setUp(self):
objreg.register('key-config', fake_keyconfig)
basekeyparser.usertypes.Timer = mock.Mock()
def tearDown(self):
objreg.delete('key-config')
def test_read_config_invalid(self):
"""Test reading config without setting it before."""
kp = basekeyparser.BaseKeyParser(0)
with self.assertRaises(ValueError):
kp.read_config()
def test_read_config_valid(self):
"""Test reading config."""
kp = basekeyparser.BaseKeyParser(0, supports_count=True,
supports_chains=True)
kp.read_config('test')
self.assertIn('ccc', kp.bindings)
self.assertIn('ctrl+a', kp.special_bindings)
kp.read_config('test2')
self.assertNotIn('ccc', kp.bindings)
self.assertNotIn('ctrl+a', kp.special_bindings)
self.assertIn('foo', kp.bindings)
self.assertIn('ctrl+x', kp.special_bindings)
class SpecialKeysTests(unittest.TestCase):
"""Check execute() with special keys.
Attributes:
kp: The BaseKeyParser to be tested.
"""
def setUp(self):
patcher = mock.patch(
'qutebrowser.keyinput.basekeyparser.usertypes.Timer',
autospec=True)
patcher.start()
objreg.register('key-config', fake_keyconfig)
self.addCleanup(patcher.stop)
self.kp = basekeyparser.BaseKeyParser(0)
self.kp.execute = mock.Mock()
self.kp.read_config('test')
def tearDown(self):
objreg.delete('key-config')
def test_valid_key(self):
"""Test a valid special keyevent."""
self.kp.handle(helpers.fake_keyevent(Qt.Key_A, Qt.ControlModifier))
self.kp.handle(helpers.fake_keyevent(Qt.Key_X, Qt.ControlModifier))
self.kp.execute.assert_called_once_with('ctrla', self.kp.Type.special)
def test_invalid_key(self):
"""Test an invalid special keyevent."""
self.kp.handle(helpers.fake_keyevent(Qt.Key_A, (Qt.ControlModifier |
Qt.AltModifier)))
self.assertFalse(self.kp.execute.called)
def test_keychain(self):
"""Test a keychain."""
self.kp.handle(helpers.fake_keyevent(Qt.Key_B))
self.kp.handle(helpers.fake_keyevent(Qt.Key_A))
self.assertFalse(self.kp.execute.called)
class KeyChainTests(unittest.TestCase):
"""Test execute() with keychain support.
Attributes:
kp: The BaseKeyParser to be tested.
timermock: The mock to be used as timer.
"""
def setUp(self):
"""Set up mocks and read the test config."""
objreg.register('key-config', fake_keyconfig)
self.timermock = mock.Mock()
basekeyparser.usertypes.Timer = mock.Mock(return_value=self.timermock)
self.kp = basekeyparser.BaseKeyParser(0, supports_chains=True,
supports_count=False)
self.kp.execute = mock.Mock()
self.kp.read_config('test')
def tearDown(self):
objreg.delete('key-config')
def test_valid_special_key(self):
"""Test valid special key."""
self.kp.handle(helpers.fake_keyevent(Qt.Key_A, Qt.ControlModifier))
self.kp.handle(helpers.fake_keyevent(Qt.Key_X, Qt.ControlModifier))
self.kp.execute.assert_called_once_with('ctrla', self.kp.Type.special)
self.assertEqual(self.kp._keystring, '')
def test_invalid_special_key(self):
"""Test invalid special key."""
self.kp.handle(helpers.fake_keyevent(Qt.Key_A, (Qt.ControlModifier |
Qt.AltModifier)))
self.assertFalse(self.kp.execute.called)
self.assertEqual(self.kp._keystring, '')
def test_keychain(self):
"""Test valid keychain."""
# Press 'x' which is ignored because of no match
self.kp.handle(helpers.fake_keyevent(Qt.Key_X, text='x'))
# Then start the real chain
self.kp.handle(helpers.fake_keyevent(Qt.Key_B, text='b'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_A, text='a'))
self.kp.execute.assert_called_once_with('ba', self.kp.Type.chain, None)
self.assertEqual(self.kp._keystring, '')
def test_ambigious_keychain(self):
"""Test ambigious keychain."""
basekeyparser.config = stubs.ConfigStub(CONFIG)
# We start with 'a' where the keychain gives us an ambigious result.
# Then we check if the timer has been set up correctly
self.kp.handle(helpers.fake_keyevent(Qt.Key_A, text='a'))
self.assertFalse(self.kp.execute.called)
basekeyparser.usertypes.Timer.assert_called_once_with(
self.kp, 'ambigious_match')
self.timermock.setSingleShot.assert_called_once_with(True)
self.timermock.setInterval.assert_called_once_with(100)
self.assertTrue(self.timermock.timeout.connect.called)
self.assertFalse(self.timermock.stop.called)
self.timermock.start.assert_called_once_with()
# Now we type an 'x' and check 'ax' has been executed and the timer
# stopped.
self.kp.handle(helpers.fake_keyevent(Qt.Key_X, text='x'))
self.kp.execute.assert_called_once_with('ax', self.kp.Type.chain, None)
self.timermock.stop.assert_called_once_with()
self.assertEqual(self.kp._keystring, '')
def test_invalid_keychain(self):
"""Test invalid keychain."""
self.kp.handle(helpers.fake_keyevent(Qt.Key_B, text='b'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_C, text='c'))
self.assertEqual(self.kp._keystring, '')
class CountTests(unittest.TestCase):
"""Test execute() with counts."""
def setUp(self):
objreg.register('key-config', fake_keyconfig)
basekeyparser.usertypes.Timer = mock.Mock()
self.kp = basekeyparser.BaseKeyParser(0, supports_chains=True,
supports_count=True)
self.kp.execute = mock.Mock()
self.kp.read_config('test')
def test_no_count(self):
"""Test with no count added."""
self.kp.handle(helpers.fake_keyevent(Qt.Key_B, text='b'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_A, text='a'))
self.kp.execute.assert_called_once_with('ba', self.kp.Type.chain, None)
self.assertEqual(self.kp._keystring, '')
def test_count_0(self):
"""Test with count=0."""
self.kp.handle(helpers.fake_keyevent(Qt.Key_0, text='0'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_B, text='b'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_A, text='a'))
self.kp.execute.assert_called_once_with('ba', self.kp.Type.chain, 0)
self.assertEqual(self.kp._keystring, '')
def test_count_42(self):
"""Test with count=42."""
self.kp.handle(helpers.fake_keyevent(Qt.Key_4, text='4'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_2, text='2'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_B, text='b'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_A, text='a'))
self.kp.execute.assert_called_once_with('ba', self.kp.Type.chain, 42)
self.assertEqual(self.kp._keystring, '')
def test_count_42_invalid(self):
"""Test with count=42 and invalid command."""
# Invalid call with ccx gets ignored
self.kp.handle(helpers.fake_keyevent(Qt.Key_4, text='4'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_2, text='2'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_B, text='c'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_A, text='c'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_A, text='x'))
self.assertFalse(self.kp.execute.called)
self.assertEqual(self.kp._keystring, '')
# Valid call with ccc gets the correct count
self.kp.handle(helpers.fake_keyevent(Qt.Key_4, text='2'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_2, text='3'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_B, text='c'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_A, text='c'))
self.kp.handle(helpers.fake_keyevent(Qt.Key_A, text='c'))
self.kp.execute.assert_called_once_with('ccc', self.kp.Type.chain, 23)
self.assertEqual(self.kp._keystring, '')
def tearDown(self):
objreg.delete('key-config')
if __name__ == '__main__':
unittest.main()
|
larryhynes/qutebrowser
|
qutebrowser/test/keyinput/test_basekeyparser.py
|
Python
|
gpl-3.0
| 11,659
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 PSchem Contributors (see CONTRIBUTORS for details)
# This file is part of PSchem Database
# PSchem is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PSchem is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with PSchem Database. If not, see <http://www.gnu.org/licenses/>.
#print 'CellViews in'
from Index import Index
from Primitives import *
#from Design import *
from xml.etree import ElementTree as et
#print 'CellViews out'
class CellView():
def __init__(self, name, cell):
self._name = name
self._attribs = {}
self._cell = cell
cell.cellViewAdded(self)
@property
def name(self):
return self._name
@property
def path(self):
return self.cell.path + '/' + self.name
@property
def cell(self):
return self._cell
@property
def attributes(self):
return self._attribs
@property
def library(self):
return self.cell.library
@property
def database(self):
return self.cell.database
def save(self):
pass
def restore(self):
pass
def remove(self):
#for a in list(self.attributes):
# a.remove()
self.cell.cellViewRemoved(self)
self.cell = None
def __repr__(self):
return "<CellView '" + self.path + "'>"
class Diagram(CellView):
def __init__(self, name, cell):
CellView.__init__(self, name, cell)
#self._elems = set()
self._items = set()
self._lines = set()
self._rects = set()
self._customPaths = set()
self._ellipses = set()
self._ellipseArcs = set()
self._labels = set()
self._attributeLabels = set()
#self._uu = 160 # default DB units per user units
self._attribs['uu'] = 160 # default DB units per user units
#self._name = 'diagram'
self._designUnits = set()
@property
def designUnits(self):
return self._designUnits
@property
def items(self):
return self._items
@property
def elems(self):
return self.lines | self.rects | self.labels | \
self.attributeLabels | self.customPaths | \
self.ellipses | self.ellipseArcs
@property
def lines(self):
return self._lines
@property
def rects(self):
return self._rects
@property
def customPaths(self):
return self._customPaths
@property
def ellipses(self):
return self._ellipses
@property
def ellipseArcs(self):
return self._ellipseArcs
@property
def labels(self):
return self._labels
@property
def attributeLabels(self):
return self._attributeLabels
@property
def uu(self):
return self._attribs['uu']
@uu.setter
def uu(self, uu):
self._attribs['uu'] = uu
def instanceItemAdded(self, view):
self.items.add(view)
for elem in self.elems:
elem.addToView(view)
#for designUnit in self._designUnits:
# elem.addToDesignUnit(designUnit)
def instanceItemRemoved(self, view):
self.items.remove(view)
for elem in self.elems:
elem.removeFromView()
def designUnitAdded(self, designUnit):
self.designUnits.add(designUnit)
scene = designUnit.scene()
for e in self.elems:
e.addToView(scene)
def designUnitRemoved(self, designUnit):
self.designUnits.remove(designUnit)
#def updateDesignUnits(self):
# for d in self._designUnits:
# d.updateDesignUnit()
# #v.updateItem()
def elementAdded(self, elem):
pass
#for designUnit in self._designUnits:
# elem.addToDesignUnit(designUnit)
def elementChanged(self, elem):
pass
#for designUnit in self._designUnits:
# elem.addToDesignUnit(designUnit)
def elementRemoved(self, elem):
pass
#for designUnit in self._designUnits:
# elem.removeFromDesignUnit(designUnit)
#def addElem(self, elem):
# "main entry point for adding new elements to diagram"
# #self._elems.add(elem)
# elem.addToDiagram(self)
# for designUnit in self._designUnits:
# elem.addToDesignUnit(designUnit)
#def removeElem(self, elem):
# "main entry point for removing elements from diagram"
# for designUnit in self._designUnits:
# elem.removeFromDesignUnit(designUnit)
# elem.removeFromDiagram(self)
def lineAdded(self, line):
self.lines.add(line)
def lineRemoved(self, line):
self.lines.remove(line)
def rectAdded(self, rect):
self.rects.add(rect)
def rectRemoved(self, rect):
self.rects.remove(rect)
def customPathAdded(self, customPath):
self.customPaths.add(customPath)
def customPathRemoved(self, customPath):
self.customPaths.remove(customPath)
def ellipseAdded(self, ellipse):
self.ellipses.add(ellipse)
def ellipseRemoved(self, ellipse):
self.ellipses.remove(ellipse)
def ellipseArcAdded(self, ellipseArc):
self.ellipseArcs.add(ellipseArc)
def ellipseArcRemoved(self, ellipseArc):
self.ellipseArcs.remove(ellipseArc)
def labelAdded(self, label):
self.labels.add(label)
def labelRemoved(self, label):
self.labels.remove(label)
def attributeLabelAdded(self, attributeLabel):
self.attributeLabels.add(attributeLabel)
def attributeLabelRemoved(self, attributeLabel):
self.attributeLabels.remove(attributeLabel)
def remove(self):
for e in list(self.elems):
e.remove()
#self.removeElem(e)
for du in list(self.designUnits):
du.remove()
#self.removeDesignUnit(o)
CellView.remove(self)
def save(self):
root = et.Element(self.name)
tree = et.ElementTree(root)
for a in sorted(self.attributes):
root.attrib[str(a)] = str(self.attributes[a])
for e in sorted(self.elems, key=Element.name):
xElem = e.toXml()
root.append(xElem)
self._indentET(tree.getroot())
et.dump(tree)
#return tree
def restore(self):
pass
def _indentET(self, elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self._indentET(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def __repr__(self):
return "<Diagram '" + self.path + "'>"
class Schematic(Diagram):
def __init__(self, name, cell):
Diagram.__init__(self, name, cell)
#self._name = 'schematic'
self._pins = set()
self._instances = set()
self._netSegments = set()
self._solderDots = set()
self._nets = set()
self._index = Index()
#self._netSegmentsAdded = set()
#self._netSegmentsRemoved = set()
def designUnitAdded(self, designUnit):
self.designUnits.add(designUnit)
#scene = designUnit.scene
#for e in self.elems-self.instances:
#for e in self.elems:
# e.addToView(scene)
#for i in self.instances():
# i.addToView(designUnit)
#for ns in self.netSegments():
# ns.addToDesignUnit(designUnit)
#designUnit.checkNets()
#def components(self):
# components = map(lambda i: i.cell(), self.instances())
# return components.sort()
@property
def elems(self):
return self.lines | self.rects | self.labels | \
self.attributeLabels | self.customPaths | \
self.ellipses | self.ellipseArcs | \
self.pins | self.instances | self.netSegments | self.solderDots
@property
def pins(self):
return self._pins
@property
def instances(self):
return self._instances
@property
def netSegments(self):
self.database.runDeferredProcesses(self)
return self._netSegments
@property
def solderDots(self):
self.database.runDeferredProcesses(self)
return self._solderDots
@property
def index(self):
return self._index
def pinAdded(self, pin):
self.pins.add(pin)
def pinRemoved(self, pin):
self.pins.remove(pin)
def instanceAdded(self, instance):
self.instances.add(instance)
def instanceRemoved(self, instance):
self.instances.remove(instance)
#def addNet(self, net):
# "call only from addToDiagram"
# self.nets.add(net)
#def removeNet(self, net):
# "call only from removeFromDiagram"
# self.nets.remove(net)
#@property
#def nets(self):
# return self._nets #filter(lambda e: isinstance(e, CNet), self.elems)
def netSegmentAdded(self, netSegment):
#print self.__class__.__name__, "ns added", netSegment
self.index.netSegmentAdded(netSegment)
self._netSegments.add(netSegment) #don't trigger deferred processing
#self._netSegmentsAdded.add(netSegment)
self.database.requestDeferredProcessing(self)
#self.splitNetSegment(netSegment)
#for designUnit in self._designUnits:
# netSegment.addToDesignUnit(designUnit)
# if designUnit.scene():
# netSegment.addToView(designUnit.scene())
def netSegmentRemoved(self, netSegment):
#print self.__class__.__name__, "ns removed", netSegment
self.index.netSegmentRemoved(netSegment)
self._netSegments.remove(netSegment) #don't trigger deferred processing
#self._netSegmentsRemoved.add(netSegment)
self.database.requestDeferredProcessing(self)
def solderDotAdded(self, solderDot):
self.index.solderDotAdded(solderDot)
#for designUnit in self._designUnits:
# #solderDot.addToDesignUnit(designUnit)
# if designUnit.scene():
# solderDot.addToView(designUnit.scene())
self._solderDots.add(solderDot) #don't trigger deferred processing
def solderDotRemoved(self, solderDot):
self.index.solderDotRemoved(solderDot)
self._solderDots.remove(solderDot) #don't trigger deferred processing
def splitNetSegment(self, netSegment):
"""
Check if (newly added) netSegment should be split or if it requires
other net segments to split.
"""
idx = self.index
(p1, p2) = idx.coordsOfNetSegments[netSegment]
n = 0
#first split other segments
for p in (p1, p2):
segments = idx.netSegmentsMidPointsAt(p[0], p[1])
for s in list(segments):
if s in idx.coordsOfNetSegments:
#print "split ", s, p, idx.coordsOfNetSegments()[s]
s.splitAt(p)
n += 1
#then, if necessary, split the netSegment
for p in list(idx.netSegmentsEndPoints):
if netSegment in idx.netSegmentsMidPointsAt(p[0], p[1]):
#print "split ", netSegment, p
netSegment.splitAt(p)
n += 1
break
#print self.__class__.__name__, "split", n, "segments"
def splitNetSegments(self):
"""
Go through all net segments in the design unit and make sure that
none of them crosses an end point (of a segment), an instance pin
or a port.
"""
idx = self.index
n = 0
for p in list(idx.netSegmentsEndPoints):
segments = idx.netSegmentsMidPointsAt(p[0], p[1])
for s in list(segments):
if s in idx.coordsOfNetSegments:
#print "split ", s, p
s.splitAt(p)
n += 1
#print self.__class__.__name__, "split", n, "segments"
def mergeNetSegments(self):
"""
Go through all net segments in the design unit and make sure that
there are no two or more segments being just a continuation of each other.
"""
idx = self.index
n = 0
for p in list(idx.netSegmentsEndPoints):
segments = list(idx.netSegmentsEndPointsAt(p[0], p[1]))
if len(segments) > 1:
if all(s.isHorizontal for s in segments) or \
all(s.isVertical for s in segments) or \
all(s.isDiagonal45 for s in segments) or \
all(s.isDiagonal135 for s in segments):
n += len(segments)
segments[0].mergeSegments(segments)
#print self.__class__.__name__, "merged", n, "segments"
def checkSolderDots(self):
"""
Goes through all endpoints and counts the number of segments connected there.
If it larger than 2 check if a solder dot exists
and if not, add it.
"""
idx = self.index
n = 0
for p in list(idx.netSegmentsEndPoints):
segments = idx.netSegmentsEndPointsAt(p[0], p[1])
if len(segments) > 2:
if len(idx.solderDotsAt(p[0], p[1])) == 0:
SolderDot(self, self.database.layers, p[0], p[1])
n += 1
#print self.__class__.__name__, "added", n, "solder dots"
def checkNets(self):
self.splitNetSegments()
self.mergeNetSegments()
self.checkSolderDots()
def runDeferredProcess(self):
"""
Runs deferred processes of the Schematic class.
Do not call it directly, Use Database.runDeferredProcesses(object)
"""
self.checkNets()
def __repr__(self):
return "<Schematic '" + self.path + "'>"
class Symbol(Diagram):
def __init__(self, name, cell):
Diagram.__init__(self, name, cell)
#self._name = 'symbol'
self._symbolPins = set()
def designUnitAdded(self, designUnit):
self.designUnits.add(designUnit)
#scene = designUnit.scene
#for e in self.elems:
# e.addToView(scene)
@property
def elems(self):
return self.lines | self.rects | self.labels | \
self.attributeLabels | self.customPaths | \
self.ellipses | self.ellipseArcs | \
self.symbolPins
@property
def symbolPins(self):
return self._symbolPins
def symbolPinAdded(self, symbolPin):
self.symbolPins.add(symbolPin)
def symbolPinRemoved(self, symbolPin):
self.symbolPins.remove(symbolPin)
def __repr__(self):
return "<Symbol '" + self.path + "'>"
class Netlist(CellView):
def __init__(self, name, cell):
CellView.__init__(self, name, cell)
#self._name = 'netlist'
def __repr__(self):
return "<Netlist '" + self.path + "'>"
|
PSchem/PSchem
|
Database/CellViews.py
|
Python
|
gpl-3.0
| 16,089
|
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from adapt.intent import IntentBuilder
from mycroft.messagebus.message import Message
from mycroft.skills.core import MycroftSkill
__author__ = 'jdorleans'
class StopSkill(MycroftSkill):
def __init__(self):
super(StopSkill, self).__init__(name="StopSkill")
def initialize(self):
# TODO - To be generalized in MycroftSkill
intent = IntentBuilder("StopIntent").require("StopKeyword").build()
self.register_intent(intent, self.handle_intent)
def handle_intent(self, event):
self.emitter.emit(Message("mycroft.stop"))
def stop(self):
pass
def create_skill():
return StopSkill()
|
JarbasAI/jarbas-core
|
mycroft/jarbas-skills/skill_stop/__init__.py
|
Python
|
gpl-3.0
| 1,362
|
"This module provides functionality for compilation of strings as dolfin SubDomains."
# Copyright (C) 2008-2008 Martin Sandve Alnes
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2008-07-01
# Last changed: 2011-04-18
import re
import os
import hashlib
import instant
# Import local compile_extension_module
from dolfin.compilemodules.compilemodule import (compile_extension_module,
expression_to_code_fragments,
math_header)
from dolfin.cpp import deprecation
import dolfin.cpp as cpp
__all__ = ["compile_subdomains", "CompiledSubDomain"]
_map_args = ["x", "y"]
_subdomain_template = """
class %(classname)s: public SubDomain
{
public:
%(members)s
%(classname)s()
{
%(constructor)s
}
/// Return true for points inside the sub domain
bool inside(const Array<double>& x, bool on_boundary) const
{
%(inside)s
}
};
"""
# TODO: Support implementation of map as well
"""
/// Map coordinate x in domain H to coordinate y in domain G (used for periodic boundary conditions)
void map(const Array<double>& x, Array<double>& y) const
{
%(map)s
}
"""
def expression_to_subdomain(cpparg, classname):
"""
Generate code for a :py:class:`SubDomain <dolfin.cpp.SubDomain>`
subclass for a single expression.
"""
# Assure we have a simple string expression
assert isinstance(cpparg, str)
# Extract code fragments from the expr and defaults
fragments, members = expression_to_code_fragments(\
[cpparg], ["x", "on_boundary", "DOLFIN_EPS"])
# Generate code for inside()
insidecode = " return %s;" % cpparg
# Generate code for map()
#mapcode = "..."
# Connect the code fragments using the function template code
fragments["inside"] = insidecode
fragments["classname"] = classname
#fragments["map"] = mapcode
code = _subdomain_template % fragments
return code, members
def compile_subdomain_code(code, classname):
# Complete the code
code = math_header + \
"""
namespace dolfin
{
%s
}
""" % code
# Compile the extension module
compiled_module = compile_extension_module(code)
# Get compiled class
return getattr(compiled_module, classname)
def CompiledSubDomain(cppcode, **kwargs):
"""
Compile a C++ string expression into a
:py:class:`SubDomain <dolfin.cpp.SubDomain>` instance.
*Arguments*
cppcode
a string containing an expression in C++ syntax.
If the string contains a name, it is assumed to be a scalar
variable name, and is added as a public member of the generated
subdomain. All such members need a default initial value.
If the string contains a class name it is interpreted as a
complete implementations of subclasses of :py:class:`SubDomain
<dolfin.cpp.SubDomain>`.
*Examples of usage*
.. code-block:: python
left = CompiledSubDomain("near(x[0], 0) && on_boundary")
right = CompiledSubDomain("near(x[1], 1) && on_boundary")
center = CompiledSubDomain("near(x[1], c)", c = 0.5)
"""
if not isinstance(cppcode, str):
raise TypeError("expected a 'str'")
if isinstance(cppcode, str) and "class" in cppcode and \
"SubDomain" in cppcode:
members = []
classname = re.findall(r"class[ ]+([\w]+).*", code)[0]
code = cppcode
else:
classname = "CompiledSubDomain" + hashlib.md5(cppcode).hexdigest()
code, members = expression_to_subdomain(cppcode, classname)
SubDomainClass = compile_subdomain_code(code, classname)
# Check passed default arguments
not_allowed = [n for n in dir(cpp.SubDomain) if n[0] !="_"]
not_allowed += ["cppcode"]
if not all(member in kwargs for member in members):
missing = []
for member in members:
if member not in kwargs:
missing.append(member)
missing = ", ".join("'%s'" % miss for miss in missing)
raise RuntimeError("expected a default value to all member "\
"variables in the SubDomain. Missing: %s." % missing)
for name in kwargs.keys():
if name in not_allowed:
raise RuntimeError("Parameter name: '%s' is not allowed. It is "\
"part of the interface of SubDomain" % name)
if not (all(isinstance(value, (int, float)) \
for value in kwargs.values())):
raise TypeError("expected default arguments for member variables "\
"to be scalars.")
# Store compile arguments for possible later use
SubDomainClass.cppcode = cppcode
# Instantiate CompiledSubDomain
subdomain = SubDomainClass()
# Set default variables
for member, value in kwargs.items():
setattr(subdomain, member, value)
return subdomain
def compile_subdomains(cppcode):
"""
Compile C++ string expressions into SubDomain instances.
*Arguments*
expressions
a string or a list of strings containing expressions in C++ syntax.
NOTE: This function is deprecated. Use CompiledSubDomain instead.
If expressions is a `str`, it is interpreted as a C++ string with
complete implementations of subclasses of SubDomain. The compiled
subdomains returned will be in the same order as they are defined
in this code.
If it is a list, each item of the list is interpreted as a logical
`inside` expression, and the compiled subdomains returned will be
in the same order as they occur in this list.
If an expression string contains a name, it is assumed to be a
scalar variable name, and is added as a public member of the
generated subdomain.
*Examples of usage*
.. code-block:: python
left = compile_subdomains("x[0] == 0")
right = compile_subdomains("x[1] == 1")
or
.. code-block:: python
bc = compile_subdomains(["x[0] == 0", "x[1] == 1"])
"""
deprecation("compile_subdomains", "1.3.0", \
"compiled_subdomains has been renamed to CompiledSubDomain.")
# If passing a list we compile each SubDomain on its own
if isinstance(cppcode, list):
return [CompiledSubDomain(code_str) for code_str in cppcode]
return CompiledSubDomain(cppcode)
|
maciekswat/dolfin_1.3.0
|
site-packages/dolfin/compilemodules/subdomains.py
|
Python
|
gpl-3.0
| 7,123
|
import oauth2 as oauth
from clatoolkit.models import UnitOffering
from common.util import Utility
from django.http import HttpResponse, HttpResponseServerError
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import redirect
from models import OAuthTempRequestToken, UserAccessToken_LRS, ClientApp
from oauth_consumer.operative import LRS_Auth
from xapi.statement.xapi_getter import xapi_getter
from xapi.statement.xapi_filter import xapi_filter
@login_required
def get_lrs_access_token(request):
provider_id = request.GET.get('provider_id')
auth = LRS_Auth(provider_id = provider_id, callback = Utility.get_site_url(request))
return redirect(auth.authenticate(request.user.id))
# Create your views here.
@login_required
def lrs_test_get_statements(request):
unit_id = request.GET.get('unit_id')
user_id = request.GET.get('user_id')
unit = None
user = None
try:
unit = UnitOffering.objects.get(id = unit_id)
except ObjectDoesNotExist:
return HttpResponseServerError('Error. Unit was not found.')
if user_id is None or user_id == '':
return HttpResponseServerError('Error. User was not found.')
params = None
if request.GET:
params = dict(request.GET.iterlists())
keys = params.keys()
# Convert key:listvalue from querydict to items suitable for dict
for i in range(len(keys)):
if type(params[keys[i]]) is 'list' and len(params[keys[i]]) == 1:
params[keys[i]] = params[keys[i]][0]
filters = xapi_filter()
getter = xapi_getter()
ret = getter.get_xapi_statements(unit_id, user_id, filters)
lrs = LRS_Auth(provider_id = unit.lrs_provider.id)
ret = lrs.get_statement(user_id, filters=params)
print ret
# filters = xapi_filter()
# filters.course = unit.code
# # if platform is not None and platform != "all":
# # filters.platform = platform
# getter = xapi_getter()
# ret = getter.get_xapi_statements(unit_id, user_id, filters)
# print 'statements count: %s ' % str(len(ret))
# from datetime import datetime
# import pytz
# zone = pytz.timezone('Brisbane')
# for stmt in ret:
# stmt_date = stmt['timestamp']
# # tdatetime = datetime.strptime(stmt_date, '%Y-%m-%d %H:%M:%S%z')
# jst_datetime_str = datetime.strftime(jst_datetime, '%Y-%m-%d %H:%M:%S %z')
# print tdatetime
return HttpResponse(ret)
@login_required
def lrs_test_send(request):
unit_id = request.GET.get('unit_id')
unit = None
try:
unit = UnitOffering.objects.get(id = unit_id)
except ObjectDoesNotExist:
return HttpResponseServerError('Error. Unit was not found.')
lrs = LRS_Auth(provider_id = unit.lrs_provider.id)
statement = get_test_xAPI()
return HttpResponse(lrs.transfer_statement(request.user.id, statement = statement))
def lrs_oauth_callback(request):
import os
import urlparse
user_id = request.GET.get('user', None)
print 'USERID: %s' % (user_id)
user = User.objects.get(id=user_id)
status = request.GET.get('status')
if status is not None and status == 'fail':
return HttpResponseServerError('Could not get access token.')
request_token = OAuthTempRequestToken.objects.get(user_id=user)
verifier = request.GET.get('oauth_verifier')
token = oauth.Token(request_token.token, request_token.secret)
request_token.delete() #delete temp token
token.set_verifier(verifier)
# Get Consumer info #Todo: change (most definitely) (IMPORTANT!!)
# consumer_key, consumer_secret = get_consumer_key_and_secret()
app = ClientApp.objects.get(id = request_token.clientapp.id)
client = oauth.Client(oauth.Consumer(app.get_key(), app.get_secret()), token)
# Exchange request_token for authed and verified access_token
resp,content = client.request(app.get_access_token_url(), "POST")
access_token = dict(urlparse.parse_qsl(content))
if access_token['oauth_token']:
UserAccessToken_LRS(user=user, access_token=access_token['oauth_token'],
access_token_secret=access_token['oauth_token_secret'],
clientapp = app).save()
from django.shortcuts import render_to_response
return render_to_response('xapi/get_access_token_successful.html')
def get_test_xAPI():
return """{
"id": "dc382d58-173a-4782-886d-7da23a700015",
"verb": {
"display": {
"en-US": "created"
},
"id": "http://www.w3.org/ns/activitystreams#Create"
},
"timestamp": "2016-11-01T08:33:28.423000+00:00",
"object": {
"definition": {
"type": "http://activitystrea.ms/specs/json/schema/activity-schema.html#task",
"name": {
"en-US": "Add after removing board ID from the toolkit..."
}
},
"id": "http://www.test.com/5818535876f64eded095ae82",
"objectType": "Activity"
},
"actor": {
"account": {
"homePage": "http://www.trello.com/",
"name": "clatoolkitdev"
},
"objectType": "Agent"
},
"authority": {
"member": [
{
"mbox": "mailto:clatoolkitdev@gmail.com",
"name": "clatoolkit",
"objectType": "Agent"
},
{
"account": {
"homePage": "http://example.com/XAPI/OAuth/token/",
"name": "4a77c7336e92425d9e56ec7bdb58223d"
},
"objectType": "Agent"
}
],
"objectType": "Group"
},
"version": "1.0.1",
"context": {
"platform": "Trello",
"contextActivities": {
"other": [],
"parent": [
{
"id": "http://test.com/aaa",
"objectType": "Activity"
}
],
"grouping": [
{
"definition": {
"name": {
"en-US": "TEST-UNIT"
},
"description": {
"en-US": "TEST-UNIT"
}
},
"id": "http://test.com/TEST-UNIT",
"objectType": "Activity"
}
]
},
"registration": "dc382d58-173a-4722-886d-7da68a925924"
}
}"""
|
kojiagile/CLAtoolkit
|
clatoolkit_project/xapi/views.py
|
Python
|
gpl-3.0
| 6,921
|
'''apport package hook for pxljr
(c) 2009 Canonical Ltd.
Author: Brian Murray <brian@ubuntu.com>
'''
from apport.hookutils import *
def add_info(report):
attach_hardware(report)
attach_printing(report)
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/apport/package-hooks/source_pxljr.py
|
Python
|
gpl-3.0
| 213
|
# -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..language import language_set, Language
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..utils import get_keywords, split_keyword
from ..videos import Episode
from bs4 import BeautifulSoup
import logging
import re
import unicodedata
import urllib
logger = logging.getLogger("subliminal")
class Subtitulos(ServiceBase):
server_url = 'http://www.subtitulos.es'
site_url = 'http://www.subtitulos.es'
api_based = False
languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'por-BR', 'por', 'spa-ES', u'spa', u'ita', u'cat'])
language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), #u'Español (Latinoamérica)': Language('spa'),
u'Català': Language('cat'), u'Brazilian': Language('por-BR'), u'English (US)': Language('eng-US'),
u'English (UK)': Language('eng-GB'), 'Galego': Language('glg')}
language_code = 'name'
videos = [Episode]
require_video = False
required_features = ['permissive']
# the '.+' in the pattern for Version allows us to match both 'ó'
# and the 'ó' char directly. This is because now BS4 converts the html
# code chars into their equivalent unicode char
release_pattern = re.compile('Versi.+n (.+) ([0-9]+).([0-9])+ megabytes')
extra_keywords_pattern = re.compile("(?:con|para)\s(?:720p)?(?:\-|\s)?([A-Za-z]+)(?:\-|\s)?(?:720p)?(?:\s|\.)(?:y\s)?(?:720p)?(?:\-\s)?([A-Za-z]+)?(?:\-\s)?(?:720p)?(?:\.)?");
def list_checked(self, video, languages):
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
def query(self, filepath, languages, keywords, series, season, episode):
request_series = series.lower().replace(' ', '_').replace('&', '@').replace('(','').replace(')','')
if isinstance(request_series, unicode):
request_series = unicodedata.normalize('NFKD', request_series).encode('ascii', 'ignore')
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
r = self.session.get('%s/%s/%sx%.2d' % (self.server_url, urllib.quote(request_series), season, episode))
if r.status_code == 404:
logger.debug(u'Could not find subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
return []
if r.status_code != 200:
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
return []
soup = BeautifulSoup(r.content, self.required_features)
subtitles = []
for sub in soup('div', {'id': 'version'}):
sub_keywords = split_keyword(self.release_pattern.search(sub.find('p', {'class': 'title-sub'}).contents[1]).group(1).lower())
if keywords and not keywords & sub_keywords:
logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
continue
for html_language in sub.findAllNext('ul', {'class': 'sslist'}):
language = self.get_language(html_language.findNext('li', {'class': 'li-idioma'}).find('strong').contents[0].string.strip())
if language not in languages:
logger.debug(u'Language %r not in wanted languages %r' % (language, languages))
continue
html_status = html_language.findNext('li', {'class': 'li-estado green'})
status = html_status.contents[0].string.strip()
if status != 'Completado':
logger.debug(u'Wrong subtitle status %s' % status)
continue
path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), html_status.findNext('span', {'class': 'descargar green'}).find('a')['href'],
keywords=sub_keywords)
subtitles.append(subtitle)
return subtitles
Service = Subtitulos
|
mozvip/Sick-Beard
|
lib/subliminal/services/subtitulos.py
|
Python
|
gpl-3.0
| 4,934
|
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**Test for Building Exposure Report Mixin**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Christian Christelis <christian@kartoza.com>'
__date__ = '02/04/2015'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import unittest
from collections import OrderedDict
from safe.impact_reports.building_exposure_report_mixin import (
BuildingExposureReportMixin)
# noinspection PyArgumentList
class BuildingExposureReportMixinTest(unittest.TestCase):
"""Test the ReportMixin.
.. versionadded:: 3.1
"""
# noinspection PyPep8Naming
def setUp(self):
"""Fixture run before all tests."""
self.building_mixin_blank = BuildingExposureReportMixin()
self.building_mixin = BuildingExposureReportMixin()
self.building_mixin.buildings = {
'School': 100,
'University': 10,
'Residential': 20000,
'Religious': 3
}
self.building_mixin.affected_buildings = OrderedDict([
('Hazard Level 2', {
'School': OrderedDict([
('Affected', 50),
('Value', 90000000)
]),
'University': OrderedDict([
('Affected', 0),
('Value', 0)
]),
'Residential': OrderedDict([
('Affected', 12000),
('Value', 1234567000)
]),
'Religious': OrderedDict([
('Affected', 0),
('Value', 0)
])
}),
('Hazard Level 1', {
'School': OrderedDict([
('Affected', 25),
('Value', 50000000)
]),
'University': OrderedDict([
('Affected', 1),
('Value', 11111111111)
]),
'Residential': OrderedDict([
('Affected', 1000),
('Value', 123456000)
]),
'Religious': OrderedDict([
('Affected', 1),
('Value', 10000000000)
])
})
])
def tearDown(self):
"""Run after each test."""
del self.building_mixin_blank
del self.building_mixin
def test_0001_generate_report(self):
"""Generate a blank report."""
blank_report = self.building_mixin_blank.generate_report()
expected_blank_report = [
{'content': ''},
{'content': ''},
{'content': [u'Hazard Category'], 'header': True},
{'content': [u'Buildings Not Affected', '0'], 'header': True},
{'content': [u'All Buildings', '0'], 'header': True},
{'content': ''},
{'content': [u'Building type', u'Total'], 'header': True},
{'content': ''},
{'content': u'Action Checklist:', 'header': True},
{'content': u'Are the critical facilities still open?'},
{'content': (
u'Which structures have warning capacity (eg. sirens, '
u'speakers, etc.)?')},
{'content': u'Which buildings will be evacuation centres?'},
{'content': u'Where will we locate the operations centre?'},
{'content': (
u'Where will we locate warehouse and/or distribution '
u'centres?')},
{
'content': (
u'Where will the students from the %s closed schools go '
u'to study?'),
'arguments': ('0',),
'condition': False
},
{
'content': (
u'Where will the patients from the %s closed hospitals '
u'go for treatment and how will we transport them?'),
'arguments': ('0',),
'condition': False},
{'content': ''}]
message = 'Blank report is not as expected.'
self.assertListEqual(blank_report, expected_blank_report, message)
def test_0002_action_checklist(self):
"""The default action check list."""
action_checklist = self.building_mixin_blank.action_checklist()
expected_action_checklist = [
{'content': u'Action Checklist:', 'header': True},
{'content': u'Are the critical facilities still open?'},
{'content': (
u'Which structures have warning capacity (eg. sirens, '
u'speakers, etc.)?')},
{'content': u'Which buildings will be evacuation centres?'},
{'content': u'Where will we locate the operations centre?'},
{'content': (
u'Where will we locate warehouse and/or distribution '
u'centres?')},
{
'content': (
u'Where will the students from the %s closed schools go '
u'to study?'),
'arguments': ('0',),
'condition': False
},
{
'content': (
u'Where will the patients from the %s closed hospitals '
u'go for treatment and how will we transport them?'),
'arguments': ('0',),
'condition': False}]
message = 'Default action checklist not as expected.'
self.assertListEqual(
action_checklist,
expected_action_checklist,
message)
def test_0003_impact_summary(self):
"""Test the buildings impact summary."""
impact_summary = self.building_mixin.impact_summary()
expected_impact_summary = [
{
'content': [u'Hazard Category', 'Affected', 'Value'],
'header': True
},
{'content': [u'Hazard Level 2', '12,050', '1,324,567,000']},
{'content': [u'Hazard Level 1', '1,027', '21,284,567,111']},
{
'content': [u'Total Buildings Affected', '13,077'],
'header': True
},
{'content': [u'Buildings Not Affected', '7,036'], 'header': True},
{'content': [u'All Buildings', '20,113'], 'header': True}]
message = 'Impact summary is not as expcted.'
self.assertListEqual(
impact_summary,
expected_impact_summary,
message)
def test_0004_buildings_breakdown(self):
"""Test the buildings breakdown."""
buildings_breakdown = self.building_mixin.buildings_breakdown()
expected_buildings_breakdown = [
{
'content': [
u'Building type',
u'Hazard Level 2',
u'Hazard Level 1',
u'Total'],
'header': True
},
{'content': ['Religious', '0', '1', '1']},
{'content': ['Residential', '12,000', '1,000', '13,000']},
{'content': ['School', '50', '25', '75']},
{'content': ['University', '0', '1', '1']}]
message = 'building breakdown is not as expected.'
self.assertListEqual(
buildings_breakdown,
expected_buildings_breakdown,
message)
def test_0005_schools_closed(self):
"""Test schools closed as expected."""
schools_closed_default = self.building_mixin_blank.schools_closed
schools_closed = self.building_mixin.schools_closed
message = 'Default should not have any closed schools.'
self.assertEqual(schools_closed_default, 0, message)
message = 'Schools closed in scenario not as expected.'
self.assertEqual(schools_closed, 75, message)
def test_0006_hospitals_closed(self):
"""Test hospitals closed as expected."""
hospitals_closed_default = self.building_mixin_blank.hospitals_closed
hospitals_closed = self.building_mixin.hospitals_closed
message = 'Default should not have any closed hospitals.'
self.assertEqual(hospitals_closed_default, 0, message)
message = 'Hospitals closed in scenario not as expected.'
self.assertEqual(hospitals_closed, 0, message)
def test_0007_general_methods(self):
"""Test general methods."""
default_count_usage = self.building_mixin_blank._count_usage('School')
message = 'Default count is not as expected.'
self.assertEqual(default_count_usage, 0, message)
count_usage = self.building_mixin._count_usage('School')
message = 'Count is not as expected.'
self.assertEqual(count_usage, 75, message)
default_impact_breakdown = self.building_mixin_blank._impact_breakdown
message = 'The default impact breakdown should be empty.'
self.assertListEqual(default_impact_breakdown, [], message)
impact_breakdown = self.building_mixin._impact_breakdown
message = 'The default impact breakdown should be empty.'
self.assertListEqual(impact_breakdown, ['Affected', 'Value'], message)
default_categories = self.building_mixin_blank._affected_categories
message = 'The default categories should be empty.'
self.assertListEqual(default_categories, [], message)
categories = self.building_mixin._affected_categories
message = 'The categories are not as expected.'
self.assertListEqual(
categories,
['Hazard Level 2', 'Hazard Level 1'],
message)
def test_0008_building_counts(self):
"""Test the building counts."""
default_affected = self.building_mixin_blank.total_affected_buildings
default_unaffected = (
self.building_mixin_blank.total_unaffected_buildings)
default_total = self.building_mixin_blank.total_buildings
message = 'Defaults counts should be 0.'
self.assertEqual(default_total, 0, message)
self.assertEqual(default_unaffected, 0, message)
self.assertEqual(default_affected, 0, message)
affected = self.building_mixin.total_affected_buildings
unaffected = self.building_mixin.total_unaffected_buildings
total = self.building_mixin.total_buildings
message = (
'The total number of buildings should equal the sum of affected '
'and unaffected.')
self.assertEqual(total, unaffected + affected, message)
message = 'The total number of buildings is not as expected.'
self.assertEqual(total, 20113, message)
message = 'The affected number of buildings is not as expected.'
self.assertEqual(affected, 13077, message)
message = 'The unaffected number of buildings is not as expected.'
self.assertEqual(unaffected, 7036, message)
def test_0009_consolidate_to_other(self):
"""Test consolidating smaller building types to 'other'."""
total_buildings_before = self.building_mixin.total_buildings
affected_other_before = self.building_mixin._count_usage('Other')
message = (
'There should be no affected buildings of type other before '
'consolidating.')
self.assertEqual(affected_other_before, 0, message)
other_in_buildings_before = (
'Other' in self.building_mixin.buildings.keys())
message = (
'There should be no buildings of type other before '
'consolidation.')
self.assertFalse(other_in_buildings_before, message)
self.building_mixin._consolidate_to_other()
total_buildings_after = self.building_mixin.total_buildings
message = (
'The total number of buildings should remain the same '
'even after consolidation.')
self.assertEqual(
total_buildings_before,
total_buildings_after,
message)
affected_other_after = self.building_mixin._count_usage('Other')
message = 'The affected other buildings are not as expected.'
self.assertEqual(affected_other_after, 2, message)
other_in_buildings_after = (
'Other' in self.building_mixin.buildings.keys())
message = 'The type other should be in buildings.'
self.assertTrue(other_in_buildings_after, message)
total_other_after = self.building_mixin.buildings['Other']
message = (
'The total number of other after consolidation is '
'not as expected.')
self.assertEqual(total_other_after, 13, message)
if __name__ == '__main__':
suite = unittest.makeSuite(BuildingExposureReportMixinTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
Jannes123/inasafe
|
safe/impact_reports/test/test_building_exposure_report_mixin.py
|
Python
|
gpl-3.0
| 13,175
|
# coding=utf-8
# Author: Dustyn Gibson <miigotu@gmail.com>
#
# URL: https://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import re
import string
from requests.compat import urljoin
from requests.utils import dict_from_cookiejar
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickchill.helper.common import convert_size, try_int
from sickchill.providers.torrent.TorrentProvider import TorrentProvider
class SpeedCDProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "Speedcd")
# Credentials
self.username = None
self.password = None
# Torrent Stats
self.minseed = None
self.minleech = None
self.freeleech = False
# URLs
self.url = 'https://speed.cd'
self.urls = {
'login': urljoin(self.url, 'takeElogin.php'),
'search': urljoin(self.url, 'browse.php'),
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK']
# Cache
self.cache = tvcache.TVCache(self)
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
}
# Yay lets add another request to the process since they are unreasonable.
response = self.get_url(self.url, returns='text')
with BS4Parser(response, 'html5lib') as html:
form = html.find('form', id='loginform')
if form:
self.urls['login'] = urljoin(self.url, form['action'])
response = self.get_url(self.urls['login'], post_data=login_params, returns='text')
if not response:
logger.log("Unable to connect to provider", logger.WARNING)
return False
if re.search('Incorrect username or Password. Please try again.', response):
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
if not self.login():
return results
# http://speed.cd/browse.php?c49=1&c50=1&c52=1&c41=1&c55=1&c2=1&c30=1&freeleech=on&search=arrow&d=on
# Search Params
search_params = {
'c30': 1, # Anime
'c41': 1, # TV/Packs
'c49': 1, # TV/HD
'c50': 1, # TV/Sports
'c52': 1, # TV/B-Ray
'c55': 1, # TV/Kids
'search': '',
}
# Units
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def process_column_header(td):
result = ''
img = td.find('img')
if img:
result = img.get('alt')
if not result:
result = td.get_text(strip=True)
return result
if self.freeleech:
search_params['freeleech'] = 'on'
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_params['search'] = search_string.translate(None, string.punctuation)
data = self.get_url(self.urls['search'], params=search_params, returns='text')
if not data:
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('div', class_='boxContent')
torrent_table = torrent_table.find('table') if torrent_table else []
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
labels = [process_column_header(label) for label in torrent_rows[0]('th')]
# Skip column headers
for result in torrent_rows[1:]:
try:
cells = result('td')
title = cells[labels.index('Title')].find('a').get_text()
download_url = urljoin(self.url, cells[labels.index('Download') - 1].a['href'])
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index('Seeders') - 1].get_text(strip=True))
leechers = try_int(cells[labels.index('Leechers') - 1].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(
"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
torrent_size = cells[labels.index('Size') - 1].get_text()
torrent_size = torrent_size[:-2] + ' ' + torrent_size[-2:]
size = convert_size(torrent_size, units=units) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
provider = SpeedCDProvider()
|
eleonrk/SickRage
|
sickbeard/providers/speedcd.py
|
Python
|
gpl-3.0
| 7,217
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Marius Gedminas <marius@pov.lt>
# (c) 2016, Matthew Gamble <git@matthewgamble.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: git_config
author:
- "Matthew Gamble"
- "Marius Gedminas"
version_added: 2.1
requirements: ['git']
short_description: Read and write git configuration
description:
- The M(git_config) module changes git configuration by invoking 'git config'.
This is needed if you don't want to use M(template) for the entire git
config file (e.g. because you need to change just C(user.email) in
/etc/.git/config). Solutions involving M(command) are cumbersone or
don't work correctly in check mode.
options:
list_all:
description:
- List all settings (optionally limited to a given I(scope))
required: false
choices: [ "yes", "no" ]
default: no
name:
description:
- The name of the setting. If no value is supplied, the value will
be read from the config if it has been set.
required: false
default: null
repo:
description:
- Path to a git repository for reading and writing values from a
specific repo.
required: false
default: null
scope:
description:
- Specify which scope to read/set values from. This is required
when setting config values. If this is set to local, you must
also specify the repo parameter. It defaults to system only when
not using I(list_all)=yes.
required: false
choices: [ "local", "global", "system" ]
default: null
value:
description:
- When specifying the name of a single setting, supply a value to
set that setting to the given value.
required: false
default: null
'''
EXAMPLES = '''
# Set some settings in ~/.gitconfig
- git_config:
name: alias.ci
scope: global
value: commit
- git_config:
name: alias.st
scope: global
value: status
# Or system-wide:
- git_config:
name: alias.remotev
scope: system
value: remote -v
- git_config:
name: core.editor
scope: global
value: vim
# scope=system is the default
- git_config:
name: alias.diffc
value: diff --cached
- git_config:
name: color.ui
value: auto
# Make etckeeper not complain when invoked by cron
- git_config:
name: user.email
repo: /etc
scope: local
value: 'root@{{ ansible_fqdn }}'
# Read individual values from git config
- git_config:
name: alias.ci
scope: global
# scope: system is also assumed when reading values, unless list_all=yes
- git_config:
name: alias.diffc
# Read all values from git config
- git_config:
list_all: yes
scope: global
# When list_all=yes and no scope is specified, you get configuration from all scopes
- git_config:
list_all: yes
# Specify a repository to include local settings
- git_config:
list_all: yes
repo: /path/to/repo.git
'''
RETURN = '''
---
config_value:
description: When list_all=no and value is not set, a string containing the value of the setting in name
returned: success
type: string
sample: "vim"
config_values:
description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings
returned: success
type: dictionary
sample:
core.editor: "vim"
color.ui: "auto"
alias.diffc: "diff --cached"
alias.remotev: "remote -v"
'''
def main():
module = AnsibleModule(
argument_spec=dict(
list_all=dict(required=False, type='bool', default=False),
name=dict(type='str'),
repo=dict(type='path'),
scope=dict(required=False, type='str', choices=['local', 'global', 'system']),
value=dict(required=False)
),
mutually_exclusive=[['list_all', 'name'], ['list_all', 'value']],
required_if=[('scope', 'local', ['repo'])],
required_one_of=[['list_all', 'name']],
supports_check_mode=True,
)
git_path = module.get_bin_path('git')
if not git_path:
module.fail_json(msg="Could not find git. Please ensure it is installed.")
params = module.params
# We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting.
# Set the locale to C to ensure consistent messages.
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
if params['name']:
name = params['name']
else:
name = None
if params['scope']:
scope = params['scope']
elif params['list_all']:
scope = None
else:
scope = 'system'
if params['value']:
new_value = params['value']
else:
new_value = None
args = [git_path, "config", "--includes"]
if params['list_all']:
args.append('-l')
if scope:
args.append("--" + scope)
if name:
args.append(name)
if scope == 'local':
dir = params['repo']
elif params['list_all'] and params['repo']:
# Include local settings from a specific repo when listing all available settings
dir = params['repo']
else:
# Run from root directory to avoid accidentally picking up any local config settings
dir = "/"
(rc, out, err) = module.run_command(' '.join(args), cwd=dir)
if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
# This just means nothing has been set at the given scope
module.exit_json(changed=False, msg='', config_values={})
elif rc >= 2:
# If the return code is 1, it just means the option hasn't been set yet, which is fine.
module.fail_json(rc=rc, msg=err, cmd=' '.join(args))
if params['list_all']:
values = out.rstrip().splitlines()
config_values = {}
for value in values:
k, v = value.split('=', 1)
config_values[k] = v
module.exit_json(changed=False, msg='', config_values=config_values)
elif not new_value:
module.exit_json(changed=False, msg='', config_value=out.rstrip())
else:
old_value = out.rstrip()
if old_value == new_value:
module.exit_json(changed=False, msg="")
if not module.check_mode:
new_value_quoted = "'" + new_value + "'"
(rc, out, err) = module.run_command(' '.join(args + [new_value_quoted]), cwd=dir)
if err:
module.fail_json(rc=rc, msg=err, cmd=' '.join(args + [new_value_quoted]))
module.exit_json(
msg='setting changed',
diff=dict(
before_header=' '.join(args),
before=old_value + "\n",
after_header=' '.join(args),
after=new_value + "\n"
),
changed=True
)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
kbrebanov/ansible-modules-extras
|
source_control/git_config.py
|
Python
|
gpl-3.0
| 7,487
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2017, 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Snapcraft plugins drive different build systems
Each part has a build system . Most parts are built from source using one of
a range of build systems such as CMake or Scons. Some parts are pre-built
and just copied into place, for example parts that reuse existing binary
packages.
You tell snapcraft which build system it must drive by specifying the
snapcraft plugin for that part. Every part must specify a plugin explicitly
(when you see a part that does not specify a plugin, that's because the
actual part definition is in the cloud, where the plugin is specified!)
These plugins implement a lifecycle over the following steps:
- pull: retrieve the source for the part from the specified location
- build: drive the build system determined by the choice of plugin
- stage: consolidate desirable files from all the parts in one tree
- prime: distill down to only the files which will go into the snap
- snap: compress the prime tree into the installable snap file
These steps correspond to snapcraft commands. So when you initiate a
'snapcraft pull' you will invoke the respective plugin for each part in
the snap, in sequence, to handle the source pull. Each part will then have a
fully populated parts/<part-name>/src/ directory. Similarly, if you then say
'snapcraft build' you will invoke the plugin responsible for each part in
turn, to build the part.
# Snapcraft Lifecycle
## Pull
In this first step, source material is retrieved from the specified
location, whether that is a URL for a tarball, a local path to a source tree
inside the snap, a revision control reference to checkout, or something
specific to the plugin such as PyPI. The plugin might also download
necessary artifacts, such as the Java SDK, which are not specific to the
particular part but which are needed by the plugin to handle its type of
build system.
All the downloaded content for each part goes into the
`parts/<part-name>/src/` directory, which acts as a cache to prevent
re-fetching content. You can clean that cache out with 'snapcraft clean'.
## Build
Snapcraft calculates an appropriate sequence to build the parts, based on
explicit 'after' references and the order of the parts in the
snapcraft.yaml. Each part is built in the `parts/<part-name>/build`
directory and installed into `parts/<part-name>/install`.
Note the install step - we might actually want to use built artifacts from
one part in the build process of another, so the `parts/<part-name>/install`
directory is useful as a 'working fresh install' of the part.
Between the plugin, the part definition YAML, and the build system of the
part, it is expected that the part can be built and installed in the right
place.
At this point you have a tree under `parts/` with a subdirectory for every
part, and underneath those, separate src, build and install trees for each
part.
## Stage
We now need to start consolidating the important pieces of each part into a
single tree. We do this twice - once in a very sweeping way that will
produce a lot of extraneous materials but is useful for debugging. This is
the 'stage' step of the lifecycle, because we move a lot of the build output
from each part into a consolidated tree under `stage/` which has the
structure of a snap but has way too much extra information.
The important thing about the staging area is that it lets you get all the
shared libraries in one place and lets you find overlapping content in the
parts. You can also try this directory as if it were a snap, and you'll have
all the debugging information in the tree, which is useful for developers.
Each part describes its own staging content - the files that should be
staged. The part will often describe "chunks" of content, called filesets,
so that they can be referred to as a useful set rather than having to call
out individual files.
## Prime
It is useful to have a directory tree which exactly mirrors the structure of
the final snap. This is the `prime/` directory, and the lifecycle includes a
'prime' step which copies only that final, required content from the
`stage/` directory into the `prime/` directory.
So the `prime/` directory contains only the content that will be put into
the final snap, unlike the staging area which may include debug and
development files not destined for your snap.
The snap metadata will also be placed in `./prime/meta` during the prime
step, so this `./prime` directory is useful for inspecting exactly what is
going into your snap or to conduct any final post-processing on snapcraft's
output.
## Snap
The final step in the snapcraft lifecycle builds a snap out of the `prime/`
directory. It will be in the top level directory, alongside snapcraft.yaml,
called <name>-<version>-<arch>.snap
# Standard part definition keywords
There are several builtin keywords which can be used in any part regardless
of the choice of plugin.
- after: [part, part, part...]
Snapcraft will make sure that it builds all of the listed parts before
it tries to build this part. Essentially these listed dependencies for
this part, useful when the part needs a library or tool built by another
part.
If such a dependency part is not defined in this snapcraft.yaml, it must
be defined in the cloud parts library, and snapcraft will retrieve the
definition of the part from the cloud. In this way, a shared library of
parts is available to every snap author - just say 'after' and list the
parts you want that others have already defined.
- build-packages: [pkg, pkg, pkg...]
A list of packages to install on the build host before building
the part. The files from these packages typically will not go into the
final snap unless they contain libraries that are direct dependencies of
binaries within the snap (in which case they'll be discovered via `ldd`),
or they are explicitly described in stage-packages.
- stage-packages: YAML list
A set of packages to be downloaded and unpacked to join the part
before it's built. Note that these packages are not installed on the host.
Like the rest of the part, all files from these packages will make it into
the final snap unless filtered out via the `snap` keyword.
One may simply specify packages in a flat list, in which case the packages
will be fetched and unpacked regardless of build environment. In addition,
a specific grammar made up of sub-lists is supported here that allows one
to filter stage packages depending on various selectors (e.g. the target
arch), as well as specify optional packages. The grammar is made up of two
nestable statements: 'on' and 'try'.
Let's discuss `on`.
- on <selector>[,<selector>...]:
- ...
- else[ fail]:
- ...
The body of the 'on' clause is taken into account if every (AND, not OR)
selector is true for the target build environment. Currently the only
selectors supported are target architectures (e.g. amd64).
If the 'on' clause doesn't match and it's immediately followed by an 'else'
clause, the 'else' clause must be satisfied. An 'on' clause without an
'else' clause is considered satisfied even if no selector matched. The
'else fail' form allows erroring out if an 'on' clause was not matched.
For example, say you only wanted to stage `foo` if building for amd64 (and
not stage `foo` if otherwise):
- on amd64: [foo]
Building on that, say you wanted to stage `bar` if building on an arch
other than amd64:
- on amd64: [foo]
- else: [bar]
You can nest these for more complex behaviors:
- on amd64: [foo]
- else:
- on i386: [bar]
- on armhf: [baz]
If your project requires a package that is only available on amd64, you can
fail if you're not building for amd64:
- on amd64: [foo]
- else fail
Now let's discuss `try`:
- try:
- ...
- else:
- ...
The body of the 'try' clause is taken into account only when all packages
contained within it are valid. If not, if it's immediately followed by
'else' clauses they are tried in order, and one of them must be satisfied.
A 'try' clause with no 'else' clause is considered satisfied even if it
contains invalid packages.
For example, say you wanted to stage `foo`, but it wasn't available for all
architectures. Assuming your project builds without it, you can make it an
optional stage package:
- try: [foo]
You can also add alternatives:
- try: [foo]
- else: [bar]
Again, you can nest these for more complex behaviors:
- on amd64: [foo]
- else:
- try: [bar]
- organize: YAML
Snapcraft will rename files according to this YAML sub-section. The
content of the 'organize' section consists of old path keys, and their
new values after the renaming.
This can be used to avoid conflicts between parts that use the same
name, or to map content from different parts into a common conventional
file structure. For example:
organize:
usr/oldfilename: usr/newfilename
usr/local/share/: usr/share/
The key is the internal part filename, the value is the exposed filename
that will be used during the staging process. You can rename whole
subtrees of the part, or just specific files.
Note that the path is relative (even though it is "usr/local") because
it refers to content underneath parts/<part-name>/install which is going
to be mapped into the stage and prime areas.
- filesets: YAML
When we map files into the stage and prime areas on the way to putting
them into the snap, it is convenient to be able to refer to groups of
files as well as individual files. Snapcraft lets you name a fileset
and then use it later for inclusion or exclusion of those files from the
resulting snap.
For example, consider man pages of header files.. You might want them
in, or you might want to leave them out, but you definitely don't want
to repeatedly have to list all of them either way.
This section is thus a YAML map of fileset names (the keys) to a list of
filenames. The list is built up by adding individual files or whole
subdirectory paths (and all the files under that path) and wildcard
globs, and then pruning from those paths.
The wildcard * globs all files in that path. Exclusions are denoted by
an initial `-`.
For example you could add usr/local/* then remove usr/local/man/*:
filesets:
allbutman: [ usr/local/*, -usr/local/man/* ]
manpages: [ usr/local/man ]
Filenames are relative to the part install directory in
`parts/<part-name>/install`. If you have used 'organize' to rename files
then the filesets will be built up from the names after organization.
- stage: YAML file and fileset list
A list of files from a part install directory to copy into `stage/`.
Rules applying to the list here are the same as those of filesets.
Referencing of fileset keys is done with a $ prefixing the fileset key,
which will expand with the value of such key.
For example:
stage:
- usr/lib/* # Everything under parts/<part-name>/install/usr/lib
- -usr/lib/libtest.so # Excludng libtest.so
- $manpages # Including the 'manpages' fileset
- snap: YAML file and fileset list
A list of files from a part install directory to copy into `prime/`.
This section takes exactly the same form as the 'stage' section but the
files identified here will go into the ultimate snap (because the
`prime/` directory reflects the file structure of the snap with no
extraneous content).
- build-attributes: [attribute1, attribute2]
A list of special attributes that affect the build of this specific part.
Supported attributes:
- no-install:
Do not run the install target provided by the plugin's build system.
Supported by: kbuild
- debug:
Plugins that support the concept of build types build in Release mode
by default. Setting the 'debug' attribute requests that they instead
build in Debug mode.
"""
from collections import OrderedDict # noqa
import pkg_resources # noqa
def _get_version():
import os as _os
if _os.environ.get("SNAP_NAME") == "snapcraft":
return _os.environ["SNAP_VERSION"]
try:
return pkg_resources.require("snapcraft")[0].version
except pkg_resources.DistributionNotFound:
return "devel"
# Set this early so that the circular imports aren't too painful
__version__ = _get_version()
# Workaround for potential import loops.
from snapcraft.internal import repo # noqa isort:skip
# For backwards compatibility with external plugins.
import snapcraft._legacy_loader # noqa: F401 isort:skip
from snapcraft.plugins.v1 import PluginV1 as BasePlugin # noqa: F401 isort:skip
from snapcraft import common # noqa
from snapcraft import extractors # noqa
from snapcraft import file_utils # noqa
from snapcraft import plugins # noqa
from snapcraft import shell_utils # noqa
from snapcraft import sources # noqa
# FIXME LP: #1662658
from snapcraft._store import ( # noqa
create_key,
download,
gated,
list_keys,
list_registered,
login,
register,
register_key,
sign_build,
status,
upload,
upload_metadata,
validate,
)
from snapcraft.project._project_options import ProjectOptions # noqa isort:skip
|
chipaca/snapcraft
|
snapcraft/__init__.py
|
Python
|
gpl-3.0
| 14,331
|
# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils, errors, constants
import os
import urllib2
try:
import json
except ImportError:
import simplejson as json
class etcd():
def __init__(self, url='http://127.0.0.1:4001'):
self.url = url
self.baseurl = '%s/v1/keys' % (self.url)
def get(self, key):
url = "%s/%s" % (self.baseurl, key)
data = None
value = ""
try:
r = urllib2.urlopen(url)
data = r.read()
except:
return value
try:
# {"action":"get","key":"/name","value":"Jane Jolie","index":5}
item = json.loads(data)
if 'value' in item:
value = item['value']
if 'errorCode' in item:
value = "ENOENT"
except:
raise
pass
return value
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
self.etcd = etcd(constants.ANSIBLE_ETCD_URL)
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if isinstance(terms, basestring):
terms = [ terms ]
ret = []
for term in terms:
key = term.split()[0]
value = self.etcd.get(key)
ret.append(value)
return ret
|
JensRantil/ansible
|
lib/ansible/runner/lookup_plugins/etcd.py
|
Python
|
gpl-3.0
| 2,083
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import django_filters
import django_tables2 as tables
from django_tables2.utils import A
from .models import Software, SoftwareLicenseAgreement
class SoftwareFilter(django_filters.FilterSet):
description = django_filters.CharFilter(lookup_expr="icontains")
begin__last_used = django_filters.DateFilter(
field_name="softwareversion__last_used",
lookup_expr="gte")
end_last_used = django_filters.DateFilter(
field_name="softwareversion__last_used",
lookup_expr="lte")
class Meta:
model = Software
fields = ('name', 'description', 'group', 'category', 'academic_only',
'restricted',)
class SoftwareTable(tables.Table):
name = tables.LinkColumn('kg_software_detail', args=[A('pk')])
group = tables.LinkColumn('kg_group_detail', args=[A('group__name')])
softwareversion__last_used = tables.Column(verbose_name="Last used")
class Meta:
model = Software
fields = ('name', 'description', 'group', 'category',
'softwareversion__last_used')
empty_text = "No items"
class SoftwareLicenseAgreementTable(tables.Table):
software = tables.LinkColumn(
'kg_software_detail', accessor="license__software",
args=[A('license__software__pk')])
license = tables.LinkColumn(
'kg_software_license_detail', args=[A('license__pk')])
person = tables.LinkColumn(
'kg_person_detail', args=[A('person__username')])
class Meta:
model = SoftwareLicenseAgreement
fields = ("software", "license", "person", "date")
empty_text = "No items"
|
brianmay/karaage
|
karaage/plugins/kgsoftware/tables.py
|
Python
|
gpl-3.0
| 2,366
|
# Tests invocation of the interpreter with various command line arguments
# All tests are executed with environment variables ignored
# See test_cmd_line_script.py for testing of script execution
import os
import test.support, unittest
import os
import sys
import subprocess
def _spawn_python(*args):
cmd_line = [sys.executable]
# When testing -S, we need PYTHONPATH to work (see test_site_flag())
if '-S' not in args:
cmd_line.append('-E')
cmd_line.extend(args)
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def _kill_python(p):
return _kill_python_and_exit_code(p)[0]
def _kill_python_and_exit_code(p):
p.stdin.close()
data = p.stdout.read()
p.stdout.close()
# try to cleanup the child so we don't appear to leak when running
# with regrtest -R. This should be a no-op on Windows.
subprocess._cleanup()
returncode = p.wait()
return data, returncode
class CmdLineTest(unittest.TestCase):
def start_python(self, *args):
return self.start_python_and_exit_code(*args)[0]
def start_python_and_exit_code(self, *args):
p = _spawn_python(*args)
return _kill_python_and_exit_code(p)
def exit_code(self, *args):
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
with open(os.devnull, 'w') as devnull:
return subprocess.call(cmd_line, stdout=devnull,
stderr=subprocess.STDOUT)
def test_directories(self):
self.assertNotEqual(self.exit_code('.'), 0)
self.assertNotEqual(self.exit_code('< .'), 0)
def verify_valid_flag(self, cmd_line):
data = self.start_python(cmd_line)
self.assertTrue(data == b'' or data.endswith(b'\n'))
self.assertTrue(b'Traceback' not in data)
def test_optimize(self):
self.verify_valid_flag('-O')
self.verify_valid_flag('-OO')
def test_q(self):
self.verify_valid_flag('-Qold')
self.verify_valid_flag('-Qnew')
self.verify_valid_flag('-Qwarn')
self.verify_valid_flag('-Qwarnall')
def test_site_flag(self):
if os.name == 'posix':
# Workaround bug #586680 by adding the extension dir to PYTHONPATH
from distutils.util import get_platform
s = "./build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
p = os.environ.get('PYTHONPATH', '')
if p:
p += ':'
os.environ['PYTHONPATH'] = p + s
self.verify_valid_flag('-S')
def test_usage(self):
self.assertTrue(b'usage' in self.start_python('-h'))
def test_version(self):
version = ('Python %d.%d' % sys.version_info[:2]).encode("ascii")
self.assertTrue(self.start_python('-V').startswith(version))
def test_verbose(self):
# -v causes imports to write to stderr. If the write to
# stderr itself causes an import to happen (for the output
# codec), a recursion loop can occur.
data, rc = self.start_python_and_exit_code('-v')
self.assertEqual(rc, 0)
self.assertTrue(b'stack overflow' not in data)
data, rc = self.start_python_and_exit_code('-vv')
self.assertEqual(rc, 0)
self.assertTrue(b'stack overflow' not in data)
def test_run_module(self):
# Test expected operation of the '-m' switch
# Switch needs an argument
self.assertNotEqual(self.exit_code('-m'), 0)
# Check we get an error for a nonexistent module
self.assertNotEqual(
self.exit_code('-m', 'fnord43520xyz'),
0)
# Check the runpy module also gives an error for
# a nonexistent module
self.assertNotEqual(
self.exit_code('-m', 'runpy', 'fnord43520xyz'),
0)
# All good if module is located and run successfully
self.assertEqual(
self.exit_code('-m', 'timeit', '-n', '1'),
0)
def test_run_module_bug1764407(self):
# -m and -i need to play well together
# Runs the timeit module and checks the __main__
# namespace has been populated appropriately
p = _spawn_python('-i', '-m', 'timeit', '-n', '1')
p.stdin.write(b'Timer\n')
p.stdin.write(b'exit()\n')
data = _kill_python(p)
self.assertTrue(data.find(b'1 loop') != -1)
self.assertTrue(data.find(b'__main__.Timer') != -1)
def test_run_code(self):
# Test expected operation of the '-c' switch
# Switch needs an argument
self.assertNotEqual(self.exit_code('-c'), 0)
# Check we get an error for an uncaught exception
self.assertNotEqual(
self.exit_code('-c', 'raise Exception'),
0)
# All good if execution is successful
self.assertEqual(
self.exit_code('-c', 'pass'),
0)
# Test handling of non-ascii data
if sys.getfilesystemencoding() != 'ascii':
command = "assert(ord('\xe9') == 0xe9)"
self.assertEqual(
self.exit_code('-c', command),
0)
def test_unbuffered_output(self):
# Test expected operation of the '-u' switch
for stream in ('stdout', 'stderr'):
# Binary is unbuffered
code = ("import os, sys; sys.%s.buffer.write(b'x'); os._exit(0)"
% stream)
data, rc = self.start_python_and_exit_code('-u', '-c', code)
self.assertEqual(rc, 0)
self.assertEqual(data, b'x', "binary %s not unbuffered" % stream)
# Text is line-buffered
code = ("import os, sys; sys.%s.write('x\\n'); os._exit(0)"
% stream)
data, rc = self.start_python_and_exit_code('-u', '-c', code)
self.assertEqual(rc, 0)
self.assertEqual(data.strip(), b'x',
"text %s not line-buffered" % stream)
def test_unbuffered_input(self):
# sys.stdin still works with '-u'
code = ("import sys; sys.stdout.write(sys.stdin.read(1))")
p = _spawn_python('-u', '-c', code)
p.stdin.write(b'x')
p.stdin.flush()
data, rc = _kill_python_and_exit_code(p)
self.assertEqual(rc, 0)
self.assertTrue(data.startswith(b'x'), data)
def test_large_PYTHONPATH(self):
with test.support.EnvironmentVarGuard() as env:
path1 = "ABCDE" * 100
path2 = "FGHIJ" * 100
env['PYTHONPATH'] = path1 + os.pathsep + path2
code = """
import sys
path = ":".join(sys.path)
path = path.encode("ascii", "backslashreplace")
sys.stdout.buffer.write(path)"""
code = code.strip().splitlines()
code = '; '.join(code)
p = _spawn_python('-S', '-c', code)
stdout, _ = p.communicate()
p.stdout.close()
self.assertTrue(path1.encode('ascii') in stdout)
self.assertTrue(path2.encode('ascii') in stdout)
def test_main():
test.support.run_unittest(CmdLineTest)
test.support.reap_children()
if __name__ == "__main__":
test_main()
|
mancoast/CPythonPyc_test
|
fail/314_test_cmd_line.py
|
Python
|
gpl-3.0
| 7,293
|
# tempfile.py unit tests.
import tempfile
import errno
import io
import os
import signal
import sys
import re
import warnings
import contextlib
import weakref
import unittest
from test import support, script_helper
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform.startswith('openbsd'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
# Common functionality.
class BaseTestCase(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def setUp(self):
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings("ignore", category=RuntimeWarning,
message="mktemp", module=__name__)
def tearDown(self):
self._warnings_manager.__exit__(None, None, None)
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
class TestExports(BaseTestCase):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1,
"TemporaryDirectory" : 1,
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
class TestRandomNameSequence(BaseTestCase):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
super().setUp()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = next(self.r)
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in range(TEST_FILES):
s = next(r)
self.nameCheck(s, '', '', '')
self.assertNotIn(s, dict)
dict[s] = 1
def supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
for s in r:
i += 1
if i == 20:
break
@unittest.skipUnless(hasattr(os, 'fork'),
"os.fork is required for this test")
def test_process_awareness(self):
# ensure that the random source differs between
# child and parent.
read_fd, write_fd = os.pipe()
pid = None
try:
pid = os.fork()
if not pid:
os.close(read_fd)
os.write(write_fd, next(self.r).encode("ascii"))
os.close(write_fd)
# bypass the normal exit handlers- leave those to
# the parent.
os._exit(0)
parent_value = next(self.r)
child_value = os.read(read_fd, len(parent_value)).decode("ascii")
finally:
if pid:
# best effort to ensure the process can't bleed out
# via any bugs above
try:
os.kill(pid, signal.SIGKILL)
except EnvironmentError:
pass
os.close(read_fd)
os.close(write_fd)
self.assertNotEqual(child_value, parent_value)
class TestCandidateTempdirList(BaseTestCase):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertIsInstance(c, str)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertIn(dirname, cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertIn(dirname, cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
# We test _get_default_tempdir some more by testing gettempdir.
class TestGetDefaultTempdir(BaseTestCase):
"""Test _get_default_tempdir()."""
def test_no_files_left_behind(self):
# use a private empty directory
with tempfile.TemporaryDirectory() as our_temp_directory:
# force _get_default_tempdir() to consider our empty directory
def our_candidate_list():
return [our_temp_directory]
with support.swap_attr(tempfile, "_candidate_tempdir_list",
our_candidate_list):
# verify our directory is empty after _get_default_tempdir()
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
def raise_OSError(*args, **kwargs):
raise OSError()
with support.swap_attr(io, "open", raise_OSError):
# test again with failing io.open()
with self.assertRaises(FileNotFoundError):
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
open = io.open
def bad_writer(*args, **kwargs):
fp = open(*args, **kwargs)
fp.write = raise_OSError
return fp
with support.swap_attr(io, "open", bad_writer):
# test again with failing write()
with self.assertRaises(FileNotFoundError):
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
class TestGetCandidateNames(BaseTestCase):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assertIsInstance(obj, tempfile._RandomNameSequence)
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
@contextlib.contextmanager
def _inside_empty_temp_dir():
dir = tempfile.mkdtemp()
try:
with support.swap_attr(tempfile, 'tempdir', dir):
yield
finally:
support.rmtree(dir)
def _mock_candidate_names(*names):
return support.swap_attr(tempfile,
'_get_candidate_names',
lambda: iter(names))
class TestMkstempInner(BaseTestCase):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
file = self.mkstemped(dir, pre, suf, bin)
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write(b"blat")
self.do_create(pre="a").write(b"blat")
self.do_create(suf="b").write(b"blat")
self.do_create(pre="a", suf="b").write(b"blat")
self.do_create(pre="aa", suf=".txt").write(b"blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write(b"blat")
finally:
os.rmdir(dir)
@unittest.skipUnless(has_stat, 'os.stat not available')
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0o600
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
@unittest.skipUnless(has_spawnl, 'os.spawnl not available')
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
@unittest.skipUnless(has_textmode, "text mode not available")
def test_textmode(self):
# _mkstemp_inner can create files in text mode
# A text file is truncated at the first Ctrl+Z byte
f = self.do_create(bin=0)
f.write(b"blat\x1a")
f.write(b"extra\n")
os.lseek(f.fd, 0, os.SEEK_SET)
self.assertEqual(os.read(f.fd, 20), b"blat")
def default_mkstemp_inner(self):
return tempfile._mkstemp_inner(tempfile.gettempdir(),
tempfile.template,
'',
tempfile._bin_openflags)
def test_collision_with_existing_file(self):
# _mkstemp_inner tries another name when a file with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
(fd1, name1) = self.default_mkstemp_inner()
os.close(fd1)
self.assertTrue(name1.endswith('aaa'))
(fd2, name2) = self.default_mkstemp_inner()
os.close(fd2)
self.assertTrue(name2.endswith('bbb'))
def test_collision_with_existing_directory(self):
# _mkstemp_inner tries another name when a directory with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
dir = tempfile.mkdtemp()
self.assertTrue(dir.endswith('aaa'))
(fd, name) = self.default_mkstemp_inner()
os.close(fd)
self.assertTrue(name.endswith('bbb'))
class TestGetTempPrefix(BaseTestCase):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertIsInstance(p, str)
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
fd = os.open(p, os.O_RDWR | os.O_CREAT)
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
class TestGetTempDir(BaseTestCase):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
file = tempfile.NamedTemporaryFile()
file.write(b"blat")
file.close()
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
class TestMkstemp(BaseTestCase):
"""Test mkstemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
class TestMkdtemp(BaseTestCase):
"""Test mkdtemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = list(range(TEST_FILES))
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, str)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
@unittest.skipUnless(has_stat, 'os.stat not available')
def test_mode(self):
# mkdtemp creates directories with the proper mode
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0o777 # Mask off sticky bits inherited from /tmp
expected = 0o700
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
def test_collision_with_existing_file(self):
# mkdtemp tries another name when a file with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
file = tempfile.NamedTemporaryFile(delete=False)
file.close()
self.assertTrue(file.name.endswith('aaa'))
dir = tempfile.mkdtemp()
self.assertTrue(dir.endswith('bbb'))
def test_collision_with_existing_directory(self):
# mkdtemp tries another name when a directory with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
dir1 = tempfile.mkdtemp()
self.assertTrue(dir1.endswith('aaa'))
dir2 = tempfile.mkdtemp()
self.assertTrue(dir2.endswith('bbb'))
class TestMktemp(BaseTestCase):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
super().setUp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
super().tearDown()
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0o600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
file = self.mktemped(self.dir, pre, suf)
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class TestNamedTemporaryFile(BaseTestCase):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf="", delete=True):
if dir is None:
dir = tempfile.gettempdir()
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf,
delete=delete)
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_method_lookup(self):
# Issue #18879: Looking up a temporary file method should keep it
# alive long enough.
f = self.do_create()
wr = weakref.ref(f)
write = f.write
write2 = f.write
del f
write(b'foo')
del write
write2(b'bar')
del write2
if support.check_impl_detail(cpython=True):
# No reference cycle was created.
self.assertIsNone(wr())
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write(b'blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
# Tests that delete-on-close can be disabled
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write(b'blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write(b'abc\n')
f.close()
f.close()
f.close()
def test_context_manager(self):
# A NamedTemporaryFile can be used as a context manager
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
# How to test the mode and bufsize parameters?
class TestSpooledTemporaryFile(BaseTestCase):
"""Test SpooledTemporaryFile()."""
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
return file
def test_basic(self):
# SpooledTemporaryFile can create files
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
# A SpooledTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write(b'blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(isinstance(filename, str) and os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
# A SpooledTemporaryFile can be written to multiple within the max_size
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_writelines(self):
# Verify writelines with a SpooledTemporaryFile
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_fileno(self):
# A SpooledTemporaryFile should roll over to a real file on fileno()
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile()
f.write(b'abc\n')
self.assertFalse(f._rolled)
f.close()
f.close()
f.close()
def test_multiple_close_after_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
self.assertTrue(f._rolled)
f.close()
f.close()
f.close()
def test_bound_methods(self):
# It should be OK to steal a bound method from a SpooledTemporaryFile
# and use it independently; when the file rolls over, those bound
# methods should continue to function
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write(b"a" * 35)
write(b"b" * 35)
seek(0, 0)
self.assertEqual(read(70), b'a'*35 + b'b'*35)
def test_properties(self):
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+b')
self.assertIsNone(f.name)
with self.assertRaises(AttributeError):
f.newlines
with self.assertRaises(AttributeError):
f.encoding
f.write(b'x')
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'rb+')
self.assertIsNotNone(f.name)
with self.assertRaises(AttributeError):
f.newlines
with self.assertRaises(AttributeError):
f.encoding
def test_text_mode(self):
# Creating a SpooledTemporaryFile with a text mode should produce
# a file object reading and writing (Unicode) text strings.
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10)
f.write("abc\n")
f.seek(0)
self.assertEqual(f.read(), "abc\n")
f.write("def\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\n")
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNone(f.name)
self.assertIsNone(f.newlines)
self.assertIsNone(f.encoding)
f.write("xyzzy\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\n")
# Check that Ctrl+Z doesn't truncate the file
f.write("foo\x1abar\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n")
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNotNone(f.name)
self.assertEqual(f.newlines, os.linesep)
self.assertIsNotNone(f.encoding)
def test_text_newline_and_encoding(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10,
newline='', encoding='utf-8')
f.write("\u039B\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n")
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNone(f.name)
self.assertIsNone(f.newlines)
self.assertIsNone(f.encoding)
f.write("\u039B" * 20 + "\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n")
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNotNone(f.name)
self.assertIsNotNone(f.newlines)
self.assertEqual(f.encoding, 'utf-8')
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_truncate_with_size_parameter(self):
# A SpooledTemporaryFile can be truncated to zero size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.seek(0)
f.truncate()
self.assertFalse(f._rolled)
self.assertEqual(f._file.getvalue(), b'')
# A SpooledTemporaryFile can be truncated to a specific size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.truncate(4)
self.assertFalse(f._rolled)
self.assertEqual(f._file.getvalue(), b'abcd')
# A SpooledTemporaryFile rolls over if truncated to large size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.truncate(20)
self.assertTrue(f._rolled)
if has_stat:
self.assertEqual(os.fstat(f.fileno()).st_size, 20)
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
class TestTemporaryFile(BaseTestCase):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
tempfile.TemporaryFile()
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write(b'blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
# cleanup
f.close()
os.rmdir(dir)
raise
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write(b'abc\n')
f.close()
f.close()
f.close()
# How to test the mode and bufsize parameters?
def test_mode_and_encoding(self):
def roundtrip(input, *args, **kwargs):
with tempfile.TemporaryFile(*args, **kwargs) as fileobj:
fileobj.write(input)
fileobj.seek(0)
self.assertEqual(input, fileobj.read())
roundtrip(b"1234", "w+b")
roundtrip("abdc\n", "w+")
roundtrip("\u039B", "w+", encoding="utf-16")
roundtrip("foo\r\n", "w+", newline="")
# Helper for test_del_on_shutdown
class NulledModules:
def __init__(self, *modules):
self.refs = [mod.__dict__ for mod in modules]
self.contents = [ref.copy() for ref in self.refs]
def __enter__(self):
for d in self.refs:
for key in d:
d[key] = None
def __exit__(self, *exc_info):
for d, c in zip(self.refs, self.contents):
d.clear()
d.update(c)
class TestTemporaryDirectory(BaseTestCase):
"""Test TemporaryDirectory()."""
def do_create(self, dir=None, pre="", suf="", recurse=1):
if dir is None:
dir = tempfile.gettempdir()
tmp = tempfile.TemporaryDirectory(dir=dir, prefix=pre, suffix=suf)
self.nameCheck(tmp.name, dir, pre, suf)
# Create a subdirectory and some files
if recurse:
d1 = self.do_create(tmp.name, pre, suf, recurse-1)
d1.name = None
with open(os.path.join(tmp.name, "test.txt"), "wb") as f:
f.write(b"Hello world!")
return tmp
def test_mkdtemp_failure(self):
# Check no additional exception if mkdtemp fails
# Previously would raise AttributeError instead
# (noted as part of Issue #10188)
with tempfile.TemporaryDirectory() as nonexistent:
pass
with self.assertRaises(FileNotFoundError) as cm:
tempfile.TemporaryDirectory(dir=nonexistent)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_explicit_cleanup(self):
# A TemporaryDirectory is deleted when cleaned up
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
self.assertTrue(os.path.exists(d.name),
"TemporaryDirectory %s does not exist" % d.name)
d.cleanup()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
finally:
os.rmdir(dir)
@support.skip_unless_symlink
def test_cleanup_with_symlink_to_a_directory(self):
# cleanup() should not follow symlinks to directories (issue #12464)
d1 = self.do_create()
d2 = self.do_create(recurse=0)
# Symlink d1/foo -> d2
os.symlink(d2.name, os.path.join(d1.name, "foo"))
# This call to cleanup() should not follow the "foo" symlink
d1.cleanup()
self.assertFalse(os.path.exists(d1.name),
"TemporaryDirectory %s exists after cleanup" % d1.name)
self.assertTrue(os.path.exists(d2.name),
"Directory pointed to by a symlink was deleted")
self.assertEqual(os.listdir(d2.name), ['test.txt'],
"Contents of the directory pointed to by a symlink "
"were deleted")
d2.cleanup()
@support.cpython_only
def test_del_on_collection(self):
# A TemporaryDirectory is deleted when garbage collected
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
name = d.name
del d # Rely on refcounting to invoke __del__
self.assertFalse(os.path.exists(name),
"TemporaryDirectory %s exists after __del__" % name)
finally:
os.rmdir(dir)
def test_del_on_shutdown(self):
# A TemporaryDirectory may be cleaned up during shutdown
with self.do_create() as dir:
for mod in ('os', 'shutil', 'sys', 'tempfile', 'warnings'):
code = """if True:
import os
import shutil
import sys
import tempfile
import warnings
tmp = tempfile.TemporaryDirectory(dir={dir!r})
sys.stdout.buffer.write(tmp.name.encode())
tmp2 = os.path.join(tmp.name, 'test_dir')
os.mkdir(tmp2)
with open(os.path.join(tmp2, "test.txt"), "w") as f:
f.write("Hello world!")
{mod}.tmp = tmp
warnings.filterwarnings("always", category=ResourceWarning)
""".format(dir=dir, mod=mod)
rc, out, err = script_helper.assert_python_ok("-c", code)
tmp_name = out.decode().strip()
self.assertFalse(os.path.exists(tmp_name),
"TemporaryDirectory %s exists after cleanup" % tmp_name)
err = err.decode('utf-8', 'backslashreplace')
self.assertNotIn("Exception ", err)
def test_warnings_on_cleanup(self):
# ResourceWarning will be triggered by __del__
with self.do_create() as dir:
d = self.do_create(dir=dir, recurse=3)
name = d.name
# Check for the resource warning
with support.check_warnings(('Implicitly', ResourceWarning), quiet=False):
warnings.filterwarnings("always", category=ResourceWarning)
del d
support.gc_collect()
self.assertFalse(os.path.exists(name),
"TemporaryDirectory %s exists after __del__" % name)
def test_multiple_close(self):
# Can be cleaned-up many times without error
d = self.do_create()
d.cleanup()
d.cleanup()
d.cleanup()
def test_context_manager(self):
# Can be used as a context manager
d = self.do_create()
with d as name:
self.assertTrue(os.path.exists(name))
self.assertEqual(name, d.name)
self.assertFalse(os.path.exists(name))
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
mancoast/CPythonPyc_test
|
fail/335_test_tempfile.py
|
Python
|
gpl-3.0
| 40,804
|
# Authors:
# Rob Crittenden <rcritten@redhat.com>
# Pavel Zuna <pzuna@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipaserver/plugins/automount.py' module.
"""
import textwrap
import tempfile
import shutil
import pytest
from ipalib import api
from ipalib import errors
from ipapython.dn import DN
import six
from nose.tools import raises, assert_raises # pylint: disable=E0611
from ipatests.test_xmlrpc.xmlrpc_test import XMLRPC_test, assert_attr_equal
from ipatests.util import assert_deepequal
if six.PY3:
unicode = str
class MockTextui(list):
"""Collects output lines"""
# Extend the mock object if other textui methods are called
def print_plain(self, line):
self.append(unicode(line))
class AutomountTest(XMLRPC_test):
"""Provides common functionality for automount tests"""
locname = u'testlocation'
tofiles_output = '' # To be overridden
def check_tofiles(self):
"""Check automountlocation_tofiles output against self.tofiles_output
"""
res = api.Command['automountlocation_tofiles'](self.locname)
mock_ui = MockTextui()
command = api.Command['automountlocation_tofiles']
command.output_for_cli(mock_ui, res, self.locname, version=u'2.88')
expected_output = self.tofiles_output
assert_deepequal(expected_output, u'\n'.join(mock_ui))
def check_import_roundtrip(self):
"""Check automountlocation_tofiles/automountlocation_import roundtrip
Loads self.tofiles_output (which should correspond to
automountlocation_tofiles output), then checks the resulting map
against tofiles_output again.
Do not use this if the test creates maps that aren't connected to
auto.master -- these can't be imported successfully.
"""
conf_directory = tempfile.mkdtemp()
# Parse the tofiles_output into individual files, replace /etc/ by
# our temporary directory name
current_file = None
for line in self.tofiles_output.splitlines():
line = line.replace('/etc/', '%s/' % conf_directory)
if line.startswith(conf_directory) and line.endswith(':'):
current_file = open(line.rstrip(':'), 'w')
elif '--------' in line:
current_file.close()
elif line.startswith('maps not connected to '):
break
else:
current_file.write(line + '\n')
assert current_file is not None, ('The input file does not contain any'
'records of files to be opened.')
current_file.close()
self.failsafe_add(api.Object.automountlocation, self.locname)
try:
# Feed the files to automountlocation_import & check
master_file = u'%s/auto.master' % conf_directory
automountlocation_import = api.Command['automountlocation_import']
res = automountlocation_import(self.locname, master_file,
version=u'2.88')
assert_deepequal(dict(
result=dict(
keys=lambda k: k,
maps=lambda m: m,
skipped=(),
duplicatemaps=(),
duplicatekeys=(),
)), res) # pylint: disable=used-before-assignment
self.check_tofiles()
finally:
res = api.Command['automountlocation_del'](self.locname)['result']
assert res
assert not res['failed']
# Success; delete the temporary directory
shutil.rmtree(conf_directory)
@pytest.mark.tier1
class test_automount(AutomountTest):
"""
Test the `automount` plugin.
"""
mapname = u'testmap'
keyname = u'testkey'
keyname_rename = u'testkey_rename'
keyname2 = u'testkey2'
description = u'description of map'
info = u'ro'
newinfo = u'rw'
map_kw = {'automountmapname': mapname, 'description': description, 'raw': True}
key_kw = {'automountkey': keyname, 'automountinformation': info, 'raw': True}
key_kw2 = {'automountkey': keyname2, 'automountinformation': info, 'raw': True}
tofiles_output = textwrap.dedent(u"""
/etc/auto.master:
/-\t/etc/auto.direct
---------------------------
/etc/auto.direct:
maps not connected to /etc/auto.master:
---------------------------
/etc/testmap:
testkey2\tro
""").strip()
def test_0_automountlocation_add(self):
"""
Test adding a location `xmlrpc.automountlocation_add` method.
"""
ret = self.failsafe_add(
api.Object.automountlocation, self.locname
)
entry = ret['result']
assert_attr_equal(entry, 'cn', self.locname)
def test_1_automountmap_add(self):
"""
Test adding a map `xmlrpc.automountmap_add` method.
"""
res = api.Command['automountmap_add'](self.locname, **self.map_kw)['result']
assert res
assert_attr_equal(res, 'automountmapname', self.mapname)
def test_2_automountkey_add(self):
"""
Test adding a key using `xmlrpc.automountkey_add` method.
"""
res = api.Command['automountkey_add'](self.locname, self.mapname, **self.key_kw2)['result']
assert res
assert_attr_equal(res, 'automountkey', self.keyname2)
def test_3_automountkey_add(self):
"""
Test adding a key using `xmlrpc.automountkey_add` method.
"""
res = api.Command['automountkey_add'](self.locname, self.mapname, **self.key_kw)['result']
assert res
assert_attr_equal(res, 'automountkey', self.keyname)
@raises(errors.DuplicateEntry)
def test_4_automountkey_add(self):
"""
Test adding a duplicate key using `xmlrpc.automountkey_add` method.
"""
api.Command['automountkey_add'](
self.locname, self.mapname, **self.key_kw)
def test_5_automountmap_show(self):
"""
Test the `xmlrpc.automountmap_show` method.
"""
res = api.Command['automountmap_show'](self.locname, self.mapname, raw=True)['result']
assert res
assert_attr_equal(res, 'automountmapname', self.mapname)
def test_6_automountmap_find(self):
"""
Test the `xmlrpc.automountmap_find` method.
"""
res = api.Command['automountmap_find'](self.locname, self.mapname, raw=True)['result']
assert_attr_equal(res[0], 'automountmapname', self.mapname)
def test_7_automountkey_show(self):
"""
Test the `xmlrpc.automountkey_show` method.
"""
showkey_kw={'automountkey': self.keyname, 'automountinformation' : self.info, 'raw': True}
res = api.Command['automountkey_show'](self.locname, self.mapname, **showkey_kw)['result']
assert res
assert_attr_equal(res, 'automountkey', self.keyname)
assert_attr_equal(res, 'automountinformation', self.info)
def test_8_automountkey_find(self):
"""
Test the `xmlrpc.automountkey_find` method.
"""
res = api.Command['automountkey_find'](self.locname, self.mapname, raw=True)['result']
assert res
assert len(res) == 2
assert_attr_equal(res[0], 'automountkey', self.keyname)
assert_attr_equal(res[0], 'automountinformation', self.info)
def test_9_automountkey_mod(self):
"""
Test the `xmlrpc.automountkey_mod` method.
"""
self.key_kw['newautomountinformation'] = self.newinfo
self.key_kw['rename'] = self.keyname_rename
res = api.Command['automountkey_mod'](self.locname, self.mapname, **self.key_kw)['result']
assert res
assert_attr_equal(res, 'automountinformation', self.newinfo)
assert_attr_equal(res, 'automountkey', self.keyname_rename)
def test_a1_automountmap_mod(self):
"""
Test the `xmlrpc.automountmap_mod` method.
"""
mod_kw = {'description': u'new description'}
res = api.Command['automountmap_mod'](self.locname, self.mapname, **mod_kw)['result']
assert res
assert_attr_equal(res, 'description', 'new description')
def test_a2_automountmap_tofiles(self):
"""
Test the `automountlocation_tofiles` command.
"""
res = api.Command['automountlocation_tofiles'](self.locname,
version=u'2.88')
assert_deepequal(dict(
result=dict(
keys={'auto.direct': ()},
orphanmaps=(dict(
dn=DN(('automountmapname', self.mapname),
('cn', self.locname),
('cn', 'automount'), api.env.basedn),
description=(u'new description',),
automountmapname=(u'testmap',)),),
orphankeys=[(
dict(
dn=DN(('description', self.keyname2),
('automountmapname', 'testmap'),
('cn', self.locname),
('cn', 'automount'), api.env.basedn),
automountkey=(self.keyname2,),
description=(self.keyname2,),
automountinformation=(u'ro',),
),
dict(
dn=DN(('description', self.keyname_rename),
('automountmapname', 'testmap'),
('cn', self.locname),
('cn', 'automount'), api.env.basedn),
automountkey=(self.keyname_rename,),
description=(self.keyname_rename,),
automountinformation=(u'rw',),
))],
maps=(
dict(
dn=DN(('description', '/- auto.direct'),
('automountmapname', 'auto.master'),
('cn', self.locname),
('cn', 'automount'), api.env.basedn),
automountkey=(u'/-',),
description=(u'/- auto.direct',),
automountinformation=(u'auto.direct',)
),
))), res)
# Also check the CLI output
self.check_tofiles()
def test_b_automountkey_del(self):
"""
Test the `xmlrpc.automountkey_del` method.
"""
delkey_kw={'automountkey': self.keyname_rename, 'automountinformation' : self.newinfo}
res = api.Command['automountkey_del'](self.locname, self.mapname, **delkey_kw)['result']
assert res
assert not res['failed']
# Verify that it is gone
with assert_raises(errors.NotFound):
api.Command['automountkey_show'](self.locname, self.mapname, **delkey_kw)
def test_c_automountlocation_del(self):
"""
Test the `xmlrpc.automountlocation_del` method.
"""
res = api.Command['automountlocation_del'](self.locname)['result']
assert res
assert not res['failed']
# Verify that it is gone
with assert_raises(errors.NotFound):
api.Command['automountlocation_show'](self.locname)
def test_d_automountmap_del(self):
"""
Test that the `xmlrpc.automountlocation_del` method removes all maps and keys
"""
# Verify that the second key we added is gone
key_kw = {'automountkey': self.keyname2, 'automountinformation': self.info, 'raw': True}
with assert_raises(errors.NotFound):
api.Command['automountkey_show'](self.locname, self.mapname, **key_kw)
@pytest.mark.tier1
class test_automount_direct(AutomountTest):
"""
Test the `automount` plugin indirect map functionality.
"""
mapname = u'auto.direct2'
keyname = u'/-'
direct_kw = { 'key' : keyname }
tofiles_output = textwrap.dedent(u"""
/etc/auto.master:
/-\t/etc/auto.direct
/-\t/etc/auto.direct2
---------------------------
/etc/auto.direct:
---------------------------
/etc/auto.direct2:
maps not connected to /etc/auto.master:
""").strip()
def test_0_automountlocation_add(self):
"""
Test adding a location.
"""
res = api.Command['automountlocation_add'](self.locname, raw=True)['result']
assert res
assert_attr_equal(res, 'cn', self.locname)
def test_1_automountmap_add_direct(self):
"""
Test adding a second direct map with a different info
"""
res = api.Command['automountmap_add_indirect'](self.locname, self.mapname, **self.direct_kw)['result']
assert res
assert_attr_equal(res, 'automountmapname', self.mapname)
@raises(errors.DuplicateEntry)
def test_2_automountmap_add_duplicate(self):
"""
Test adding a duplicate direct map.
"""
api.Command['automountmap_add_indirect'](
self.locname, self.mapname, **self.direct_kw)
def test_2a_automountmap_tofiles(self):
"""Test the `automountmap_tofiles` command"""
self.check_tofiles()
def test_3_automountlocation_del(self):
"""
Remove the location.
"""
res = api.Command['automountlocation_del'](self.locname)['result']
assert res
assert not res['failed']
# Verity that it is gone
with assert_raises(errors.NotFound):
api.Command['automountlocation_show'](self.locname)
def test_z_import_roundtrip(self):
"""Check automountlocation_tofiles/automountlocation_import roundtrip
"""
self.check_import_roundtrip()
@pytest.mark.tier1
class test_automount_indirect(AutomountTest):
"""
Test the `automount` plugin indirect map functionality.
"""
mapname = u'auto.home'
keyname = u'/home'
parentmap = u'auto.master'
map_kw = {'key': keyname, 'parentmap': parentmap, 'raw': True}
key_kw = {'automountkey': keyname, 'automountinformation': mapname}
tofiles_output = textwrap.dedent(u"""
/etc/auto.master:
/-\t/etc/auto.direct
/home\t/etc/auto.home
---------------------------
/etc/auto.direct:
---------------------------
/etc/auto.home:
maps not connected to /etc/auto.master:
""").strip()
def test_0_automountlocation_add(self):
"""
Test adding a location.
"""
res = api.Command['automountlocation_add'](self.locname, raw=True)['result']
assert res
assert_attr_equal(res, 'cn', self.locname)
def test_1_automountmap_add_indirect(self):
"""
Test adding an indirect map.
"""
res = api.Command['automountmap_add_indirect'](self.locname, self.mapname, **self.map_kw)['result']
assert res
assert_attr_equal(res, 'automountmapname', self.mapname)
@raises(errors.DuplicateEntry)
def test_1a_automountmap_add_indirect(self):
"""
Test adding a duplicate indirect map.
"""
api.Command['automountmap_add_indirect'](self.locname, self.mapname, **self.map_kw)
def test_2_automountmap_show(self):
"""
Test the `xmlrpc.automountmap_show` method.
"""
res = api.Command['automountmap_show'](self.locname, self.mapname, raw=True)['result']
assert res
assert_attr_equal(res, 'automountmapname', self.mapname)
def test_2a_automountmap_tofiles(self):
"""Test the `automountmap_tofiles` command"""
self.check_tofiles()
def test_3_automountkey_del(self):
"""
Remove the indirect key /home.
"""
res = api.Command['automountkey_del'](self.locname, self.parentmap, **self.key_kw)['result']
assert res
assert not res['failed']
# Verify that it is gone
with assert_raises(errors.NotFound):
api.Command['automountkey_show'](self.locname, self.parentmap, **self.key_kw)
def test_4_automountmap_del(self):
"""
Remove the indirect map for auto.home.
"""
res = api.Command['automountmap_del'](self.locname, self.mapname)['result']
assert res
assert not res['failed']
# Verify that it is gone
with assert_raises(errors.NotFound):
api.Command['automountmap_show'](self.locname, self.mapname)
def test_5_automountlocation_del(self):
"""
Remove the location.
"""
res = api.Command['automountlocation_del'](self.locname)['result']
assert res
assert not res['failed']
# Verity that it is gone
with assert_raises(errors.NotFound):
api.Command['automountlocation_show'](self.locname)
def test_z_import_roundtrip(self):
"""Check automountlocation_tofiles/automountlocation_import roundtrip
"""
self.check_import_roundtrip()
@pytest.mark.tier1
class test_automount_indirect_no_parent(AutomountTest):
"""
Test the `automount` plugin Indirect map function.
"""
mapname = u'auto.home'
keyname = u'/home'
mapname2 = u'auto.direct2'
keyname2 = u'direct2'
parentmap = u'auto.master'
map_kw = {'key': keyname, 'raw': True}
map_kw2 = {'key': keyname2, 'raw': True}
tofiles_output = textwrap.dedent(u"""
/etc/auto.master:
/-\t/etc/auto.direct
/home\t/etc/auto.home
---------------------------
/etc/auto.direct:
---------------------------
/etc/auto.home:
direct2\t-fstype=autofs ldap:auto.direct2
maps not connected to /etc/auto.master:
---------------------------
/etc/auto.direct2:
""").strip()
def test_0_automountlocation_add(self):
"""
Test adding a location.
"""
res = api.Command['automountlocation_add'](self.locname, raw=True)['result']
assert res
assert_attr_equal(res, 'cn', self.locname)
def test_1_automountmap_add_indirect(self):
"""
Test adding an indirect map with default parent.
"""
res = api.Command['automountmap_add_indirect'](self.locname, self.mapname, **self.map_kw)['result']
assert res
assert_attr_equal(res, 'automountmapname', self.mapname)
def test_2_automountkey_show(self):
"""
Test the `xmlrpc.automountkey_show` method with default parent.
"""
showkey_kw = {'automountkey': self.keyname, 'automountinformation': self.mapname, 'raw': True}
res = api.Command['automountkey_show'](self.locname, self.parentmap, **showkey_kw)['result']
assert res
assert_attr_equal(res, 'automountkey', self.keyname)
def test_2a_automountmap_add_indirect(self):
"""
Test adding an indirect map with default parent.
"""
res = api.Command['automountmap_add_indirect'](self.locname,
u'auto.direct2', parentmap=self.mapname, **self.map_kw2)['result']
assert res
assert_attr_equal(res, 'automountmapname', self.mapname2)
def test_2b_automountmap_tofiles(self):
"""Test the `automountmap_tofiles` command"""
self.check_tofiles()
def test_3_automountkey_del(self):
"""
Remove the indirect key /home.
"""
delkey_kw={'automountkey': self.keyname, 'automountinformation': self.mapname}
res = api.Command['automountkey_del'](self.locname, self.parentmap, **delkey_kw)['result']
assert res
assert not res['failed']
# Verify that it is gone
with assert_raises(errors.NotFound):
api.Command['automountkey_show'](self.locname, self.parentmap, **delkey_kw)
def test_4_automountmap_del(self):
"""
Remove the indirect map for auto.home.
"""
res = api.Command['automountmap_del'](self.locname, self.mapname)['result']
assert res
assert not res['failed']
# Verify that it is gone
with assert_raises(errors.NotFound):
api.Command['automountmap_show'](self.locname, self.mapname)
def test_5_automountlocation_del(self):
"""
Remove the location.
"""
res = api.Command['automountlocation_del'](self.locname)['result']
assert res
assert not res['failed']
# Verity that it is gone
with assert_raises(errors.NotFound):
api.Command['automountlocation_show'](self.locname)
|
redhatrises/freeipa
|
ipatests/test_xmlrpc/test_automount_plugin.py
|
Python
|
gpl-3.0
| 21,461
|
from pypom import Region
from selenium.webdriver.common.by import By
from pages.base import BasePage
class DashboardPage(BasePage):
URL_TEMPLATE = '/{locale}/dashboards/revisions'
_revision_filter_form_locator = (By.ID, 'revision-filter')
_revision_page_input = (By.ID, 'revision-page')
_notification_tray_locator = (By.CSS_SELECTOR, '.notification-tray')
_first_notification_locator = (By.CSS_SELECTOR, '.notification-tray .notification:first-child')
_parent_locator = (By.ID, 'revision-replace-block')
_pagination_locator = (By.CSS_SELECTOR, '.pagination')
_page_two_link = (By.CSS_SELECTOR, '.pagination > li:first-child + li a')
_ip_toggle_locator = (By.ID, 'show_ips_btn')
_first_row_locator = (By.CSS_SELECTOR,
'.dashboard-table tbody .dashboard-row:first-child')
_first_details_locator = (By.CSS_SELECTOR,
'.dashboard-table tbody .dashboard-row:first-child + .dashboard-detail')
_first_details_diff_locator = (By.CSS_SELECTOR,
'.dashboard-table tbody .dashboard-row:first-child + .dashboard-detail .diff')
_details_locator = (By.CSS_SELECTOR, '.dashboard-detail')
@property
def is_ip_toggle_present(self):
try:
ip_toggle = self.find_element(*self._ip_toggle_locator)
return True
except:
return False
@property
def first_row(self):
first_row = self.find_element(*self._first_row_locator)
return DashboardRow(self, root=first_row)
@property
def first_row_id(self):
return self.find_element(*self._first_row_locator).get_attribute('data-revision-id')
@property
def details_items_length(self):
details_items = self.find_elements(*self._details_locator)
return len(details_items)
def open_first_details(self):
first_row = self.find_element(*self._first_row_locator)
first_row.click()
self.wait.until(lambda s: len(self.find_elements(*self._details_locator)) > 0)
@property
def is_first_details_displayed(self):
first_details = self.find_element(*self._first_details_locator)
return first_details.is_displayed()
@property
def is_first_details_diff_displayed(self):
first_details_diff = self.find_element(*self._first_details_diff_locator)
return first_details_diff.is_displayed()
def click_page_two(self):
revsion_filter_form = self.find_element(*self._revision_filter_form_locator)
page_two_link = self.find_element(*self._page_two_link)
page_two_link.click()
# revsion-page updates to not 1
self.wait.until(lambda s: int(self.find_element(*self._revision_page_input).get_attribute('value')) is not 1)
# form is disabled when ajax request made
self.wait.until(lambda s: 'disabled' in revsion_filter_form.get_attribute('class'))
# wait for tray to be added
self.wait.until(lambda s: len(self.find_elements(*self._notification_tray_locator)) > 0)
# wait for notification in tray
self.wait.until(lambda s: len(self.find_elements(*self._first_notification_locator)) > 0)
# form editable when ajax response arrives
self.wait.until(lambda s: 'disabled' not in revsion_filter_form.get_attribute('class'))
# wait for notification to close
self.wait.until(lambda s: 'closed' in self.find_element(*self._first_notification_locator).get_attribute('class'))
# revsion-page-value updates to 1
self.wait.until(lambda s: int(self.find_element(*self._revision_page_input).get_attribute('value')) == 1)
# opacity maniulation finishes
self.wait.until(lambda s: 'opacity' not in self.find_element(*self._parent_locator).get_attribute('style'))
@property
def dashboard_not_overflowing(self):
crawlBar = self.selenium.execute_script("return document.documentElement.scrollWidth>document.documentElement.clientWidth;")
return not crawlBar
class DashboardRow(Region):
_root_locator = (By.CSS_SELECTOR, '.dashboard-row')
_ban_ip_locator = (By.CSS_SELECTOR, '.dashboard-ban-ip-link')
_spam_ham_button_locator = (By.CSS_SELECTOR, '.spam-ham-button')
@property
def revision_id(self):
return self.root.get_attribute('data-revision-id')
@property
def is_ip_ban_present(self):
try:
ip_toggle = self.find_element(*self._ban_ip_locator)
return True
except:
return False
@property
def is_spam_ham_button_present(self):
try:
ip_toggle = self.find_element(*self._spam_ham_button_locator)
return True
except:
return False
class DashboardDetail(Region):
_root_locator = (By.CSS_SELECTOR, '.dashboard-detail')
_page_buttons_locator = (By.CSS_SELECTOR, '.page-buttons li a')
_diff_locator = (By.CSS_SELECTOR, '.diff')
_diff_rows_locator = (By.CSS_SELECTOR, '.diff tbody tr')
|
yfdyh000/kuma
|
tests/pages/dashboard.py
|
Python
|
mpl-2.0
| 5,047
|
from bedrock.redirects.util import (redirect, is_firefox_redirector,
platform_redirector, no_redirect)
def firefox_mobile_faq(request, *args, **kwargs):
qs = request.META.get('QUERY_STRING', '')
if 'os=firefox-os' in qs:
return 'https://support.mozilla.org/products/firefox-os'
return 'firefox.android.faq'
def firefox_channel(*args, **kwargs):
return platform_redirector('firefox.channel.desktop',
'firefox.channel.android',
'firefox.channel.ios')
redirectpatterns = (
# overrides
redirect(r'^firefox/aurora/all/?$', 'firefox.all', to_kwargs={'channel': 'developer'}),
# bug 831810 & 1142583 & 1239960, 1329931
redirect(r'^mwc/?$', 'https://support.mozilla.org/products/firefox-os', re_flags='i'),
# bug 748503
redirect(r'^projects/firefox/[^/]+a[0-9]+/firstrun(?P<p>.*)$',
'/firefox/nightly/firstrun{p}'),
# bug 1275483
redirect(r'^firefox/nightly/whatsnew/?', 'firefox.nightly_firstrun'),
# bug 840814
redirect(r'^projects/firefox'
r'(?P<version>/(?:\d+\.\d+\.?(?:\d+)?\.?(?:\d+)?(?:[a|b]?)(?:\d*)(?:pre)?(?:\d)?))'
r'(?P<page>/(?:firstrun|whatsnew))'
r'(?P<rest>/.*)?$', '/firefox{version}{page}{rest}'),
# bug 877165
redirect(r'^firefox/connect', 'mozorg.home'),
# bug 657049, 1238851
redirect(r'^firefox/accountmanager/?$', 'https://developer.mozilla.org/Persona'),
# Bug 1009247, 1101220, 1299947, 1314603, 1328409
redirect(r'^(firefox/)?beta/?$', firefox_channel(), cache_timeout=0, anchor='beta'),
redirect(r'^(firefox/)?aurora/?$', firefox_channel(), cache_timeout=0, anchor='aurora'),
redirect(r'^(firefox/)?nightly/?$', firefox_channel(), cache_timeout=0, anchor='nightly'),
redirect(r'^mobile/beta/?$', 'firefox.channel.android', anchor='beta'),
redirect(r'^mobile/aurora/?$', 'firefox.channel.android', anchor='aurora'),
redirect(r'^mobile/nightly/?$', 'firefox.channel.android', anchor='nightly'),
# bug 988044
redirect(r'^firefox/unsupported-systems\.html$', 'firefox.unsupported-systems'),
# bug 736934, 860865, 1101220, 1153351
redirect(r'^mobile/(?P<channel>(?:beta|aurora)/)?notes/?$',
'/firefox/android/{channel}notes/'),
redirect(r'^firefox/(?P<channel>(?:beta|aurora|organizations)/)?system-requirements(\.html)?$',
'/firefox/{channel}system-requirements/'),
# bug 1155870
redirect(r'^firefox/os/(releases|notes)/?$',
'https://developer.mozilla.org/Firefox_OS/Releases'),
redirect(r'^firefox/os/(?:release)?notes/(?P<v>[^/]+)/?$',
'https://developer.mozilla.org/Firefox_OS/Releases/{v}'),
# bug 878871
redirect(r'^firefoxos', '/firefox/os/'),
# Bug 1006616
redirect(r'^download/?$', 'firefox.new'),
# bug 837883
redirect(r'^firefox/firefox\.exe$', 'mozorg.home', re_flags='i'),
# bug 821006
redirect(r'^firefox/all(\.html)?$', 'firefox.all'),
# bug 727561
redirect(r'^firefox/search(?:\.html)?$', 'firefox.new'),
# bug 860865, 1101220
redirect(r'^firefox/all-(?:beta|rc)(?:/|\.html)?$', 'firefox.all',
to_kwargs={'channel': 'beta'}),
redirect(r'^firefox/all-aurora(?:/|\.html)?$', 'firefox.all',
to_kwargs={'channel': 'developer'}),
redirect(r'^firefox/aurora/(?P<page>all|notes|system-requirements)/?$',
'/firefox/developer/{page}/'),
redirect(r'^firefox/organizations/all\.html$', 'firefox.all',
to_kwargs={'channel': 'organizations'}),
# bug 729329
redirect(r'^mobile/sync', 'firefox.sync'),
# bug 882845
redirect(r'^firefox/toolkit/download-to-your-devices', 'firefox.new'),
# bug 1014823
redirect(r'^(products/)?firefox/releases/whatsnew/?$', 'firefox.whatsnew'),
# bug 929775
redirect(r'^firefox/update', 'firefox.new', query={
'utm_source': 'firefox-browser',
'utm_medium': 'firefox-browser',
'utm_campaign': 'firefox-update-redirect',
}),
# Bug 868182, 986174
redirect(r'^(m|(firefox/)?mobile)/features/?$', 'firefox.android.index'),
redirect(r'^(m|(firefox/)?mobile)/faq/?$', firefox_mobile_faq, query=False),
# bug 884933
redirect(r'^(m|(firefox/)?mobile)/platforms/?$',
'https://support.mozilla.org/kb/will-firefox-work-my-mobile-device'),
redirect(r'^m/?$', 'firefox.new'),
# Bug 730488 deprecate /firefox/all-older.html
redirect(r'^firefox/all-older\.html$', 'firefox.new'),
# bug 1120658
redirect(r'^seamonkey-transition\.html$',
'http://www-archive.mozilla.org/seamonkey-transition.html'),
# Bug 1186373
redirect(r'^firefox/hello/npssurvey/?$',
'https://www.surveygizmo.com/s3/2227372/Firefox-Hello-Product-Survey',
permanent=False),
# Bug 1221739
redirect(r'^firefox/hello/feedbacksurvey/?$',
'https://www.surveygizmo.com/s3/2319863/d2b7dc4b5687',
permanent=False),
# bug 1148127
redirect(r'^products/?$', 'firefox.family.index'),
# Bug 1110927
redirect(r'^(products/)?firefox/start/central\.html$', 'firefox.new'),
redirect(r'^firefox/sync/firstrun\.html$', 'firefox.sync'),
# Bug 920212
redirect(r'^firefox/fx/?$', 'firefox.new'),
# Bug 979531, 1003727, 979664, 979654, 979660
redirect(r'^firefox/customize/?$', 'firefox.desktop.customize'),
redirect(r'^firefox/(?:performance|happy|speed|memory)/?$', 'firefox.desktop.fast'),
redirect(r'^firefox/security/?$', 'firefox.desktop.trust'),
redirect(r'^firefox/technology/?$', 'https://developer.mozilla.org/docs/Tools'),
# Bug 979527
redirect(r'^(products/)?firefox/central(/|\.html|-lite\.html)?$', is_firefox_redirector(
'https://support.mozilla.org/kb/get-started-firefox-overview-main-features',
'firefox.new'), cache_timeout=0),
# bug 868169
redirect(r'^mobile/android-download\.html$',
'https://play.google.com/store/apps/details',
query={'id': 'org.mozilla.firefox'}, merge_query=True),
redirect(r'^mobile/android-download-beta\.html$',
'https://play.google.com/store/apps/details',
query={'id': 'org.mozilla.firefox_beta'}, merge_query=True),
# bug 675031
redirect(r'^projects/fennec(?P<page>/[\/\w\.-]+)?',
'http://website-archive.mozilla.org/www.mozilla.org/fennec_releasenotes/projects/fennec{page}'),
# bug 876581
redirect(r'^firefox/phishing-protection(/?)$',
'https://support.mozilla.org/kb/how-does-phishing-and-malware-protection-work'),
# bug 1006079
redirect(r'^mobile/home/?(?:index.html)?$',
'https://blog.mozilla.org/services/2012/08/31/retiring-firefox-home/'),
# bug 949562
redirect(r'^mobile/home/1\.0/releasenotes(?:/(?:index.html)?)?$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_home/mobile/home/1.0/releasenotes/'),
redirect(r'^mobile/home/1\.0\.2/releasenotes(?:/(?:index.html)?)?$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_home/mobile/home/1.0.2/releasenotes/'),
redirect(r'^mobile/home/faq(?:/(?:index.html)?)?$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_home/mobile/home/faq/'),
# bug 960064
redirect(r'^firefox/(?P<num>vpat-[.1-5]+)(?:\.html)?$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_vpat/firefox-{num}.html'),
redirect(r'^firefox/vpat(?:\.html)?',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_vpat/firefox-vpat-3.html'),
# bug 1017564
redirect(r'^mobile/.+/system-requirements/?$',
'https://support.mozilla.org/kb/will-firefox-work-my-mobile-device'),
# bug 858315
redirect(r'^projects/devpreview/firstrun(?:/(?:index.html)?)?$', '/firefox/firstrun/'),
redirect(r'^projects/devpreview/(?P<page>[\/\w\.-]+)?$',
'http://website-archive.mozilla.org/www.mozilla.org/devpreview_releasenotes/projects/devpreview/{page}'),
# bug 1001238, 1025056
no_redirect(r'^firefox/(24\.[5678]\.\d|28\.0)/releasenotes/?$'),
# bug 1235082
no_redirect(r'^firefox/23\.0(\.1)?/releasenotes/?$'),
# bug 947890, 1069902
redirect(r'^firefox/releases/(?P<v>[01]\.(?:.*))$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes/en-US/firefox/releases/{v}'),
redirect(r'^(?P<path>(?:firefox|mobile)/(?:\d)\.(?:.*)/releasenotes(?:.*))$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes/en-US/{path}'),
#
# bug 988746, 989423, 994186, 1153351
redirect(r'^mobile/(?P<v>2[38]\.0(?:\.\d)?|29\.0(?:beta|\.\d)?)/releasenotes/?$',
'/firefox/android/{v}/releasenotes/'),
redirect(r'^mobile/(?P<v>[3-9]\d\.\d(?:a2|beta|\.\d)?)/(?P<p>aurora|release)notes/?$',
'/firefox/android/{v}/{p}notes/'),
# bug 1041712, 1069335, 1069902
redirect(r'^(?P<prod>firefox|mobile)/(?P<vers>([0-9]|1[0-9]|2[0-8])\.(\d+(?:beta|a2|\.\d+)?))'
r'/(?P<channel>release|aurora)notes/(?P<page>[\/\w\.-]+)?$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes/en-US'
'/{prod}/{vers}/{channel}notes/{page}'),
# bug 767614 superceeded by bug 957711 and 1003718 and 1239960
redirect(r'^(mobile|fennec)/?$', 'firefox.family.index'),
# bug 876668
redirect(r'^mobile/customize(?:/.*)?$', '/firefox/android/'),
# bug 1211907
redirect(r'^firefox/independent/?$', 'firefox.new'),
redirect(r'^firefox/personal/?$', 'firefox.new'),
# bug 845983
redirect(r'^metrofirefox(?P<path>/.*)?$', '/firefox{path}'),
# bug 1003703, 1009630
redirect(r'^firefox(?P<vers>/.+)/firstrun/eu/?$', '/firefox{vers}/firstrun/', query={
'utm_source': 'direct',
'utm_medium': 'none',
'utm_campaign': 'redirect',
'utm_content': 'eu-firstrun-redirect',
}),
# bug 960543
redirect(r'^firefox/(?P<vers>[23])\.0/eula', '/legal/eula/firefox-{vers}/'),
# bug 1224060
redirect(
r'^ja/firefox/ios/(?P<vers>[0-9]+(\.[0-9]+)*)/(?P<file>releasenotes|system-requirements)',
'https://www.mozilla.jp/firefox/ios/{vers}/{file}/', locale_prefix=False),
# bug 1150713
redirect(r'^firefox/sms(?:/.*)?$', 'firefox.family.index'),
# Redirects for SeaMonkey project website, now living at seamonkey-project.org
redirect(r'^projects/seamonkey/$', 'http://www.seamonkey-project.org/'),
redirect(r'^projects/seamonkey/artwork\.html$',
'http://www.seamonkey-project.org/dev/artwork'),
redirect(r'^projects/seamonkey/community\.html$',
'http://www.seamonkey-project.org/community'),
redirect(r'^projects/seamonkey/get-involved\.html$',
'http://www.seamonkey-project.org/dev/get-involved'),
redirect(r'^projects/seamonkey/index\.html$', 'http://www.seamonkey-project.org/'),
redirect(r'^projects/seamonkey/news\.html$', 'http://www.seamonkey-project.org/news'),
redirect(r'^projects/seamonkey/project-areas\.html$',
'http://www.seamonkey-project.org/dev/project-areas'),
redirect(r'^projects/seamonkey/releases/$', 'http://www.seamonkey-project.org/releases/'),
redirect(r'^projects/seamonkey/releases/index\.html$',
'http://www.seamonkey-project.org/releases/'),
redirect(r'^projects/seamonkey/review-and-flags\.html$',
'http://www.seamonkey-project.org/dev/review-and-flags'),
redirect(r'^projects/seamonkey/releases/(?P<vers>1\..*)\.html$',
'http://www.seamonkey-project.org/releases/{vers}'),
redirect(r'^projects/seamonkey/releases/seamonkey(?P<x>.*)/index.html$',
'http://www.seamonkey-project.org/releases/seamonkey{x}/'),
redirect(r'^projects/seamonkey/releases/seamonkey(?P<x>.*/.*).html$',
'http://www.seamonkey-project.org/releases/seamonkey{x}'),
redirect(r'^projects/seamonkey/releases/updates/(?P<x>.*)$',
'http://www.seamonkey-project.org/releases/updates/{x}'),
redirect(r'^projects/seamonkey/start/$', 'http://www.seamonkey-project.org/start/'),
# Bug 638948 redirect beta privacy policy link
redirect(r'^firefox/beta/feedbackprivacypolicy/?$', '/privacy/firefox/'),
# Bug 1238248
redirect(r'^firefox/push/?$',
'https://support.mozilla.org/kb/push-notifications-firefox'),
# Bug 1239960
redirect(r'^firefox/partners/?$', 'https://support.mozilla.org/products/firefox-os'),
# Bug 1243060
redirect(r'^firefox/tiles/?$',
'https://support.mozilla.org/kb/about-tiles-new-tab'),
# Bug 1239863, 1329931
redirect(r'^firefox/os(/.*)?$', 'https://support.mozilla.org/products/firefox-os'),
# Bug 1252332
redirect(r'^sync/?$', 'firefox.sync'),
# Bug 424204
redirect(r'^firefox/help/?$', 'https://support.mozilla.org/'),
redirect(r'^fxandroid/?$', 'firefox.android.index'),
# Bug 1255882
redirect(r'^firefox/personal', 'firefox.new'),
redirect(r'^firefox/upgrade', 'firefox.new'),
redirect(r'^firefox/ie', 'firefox.new'),
# must go above the bug 1255882 stuff below
redirect('^projects/xul/joy-of-xul\.html$',
'https://developer.mozilla.org/docs/Mozilla/Tech/XUL/The_Joy_of_XUL'),
redirect('^projects/xul/xre(old)?\.html$',
'https://developer.mozilla.org/docs/Archive/Mozilla/XULRunner'),
redirect('^projects/xslt/js-interface\.html$',
'https://developer.mozilla.org/docs/'
'Web/XSLT/Using_the_Mozilla_JavaScript_interface_to_XSL_Transformations'),
redirect('^projects/xslt/faq\.html$',
'https://developer.mozilla.org/docs/'
'Web/API/XSLTProcessor/XSL_Transformations_in_Mozilla_FAQ'),
redirect('^projects/xslt/standalone\.html$',
'https://developer.mozilla.org/docs/'
'Archive/Mozilla/Building_TransforMiiX_standalone'),
redirect('^projects/plugins/first-install-problem\.html$',
'https://developer.mozilla.org/Add-ons/Plugins/The_First_Install_Problem'),
redirect('^projects/plugins/install-scheme\.html$',
'https://developer.mozilla.org/docs/'
'Installing_plugins_to_Gecko_embedding_browsers_on_Windows'),
redirect('^projects/plugins/npruntime-sample-in-visual-studio\.html$',
'https://developer.mozilla.org/docs/'
'Compiling_The_npruntime_Sample_Plugin_in_Visual_Studio'),
redirect('^projects/plugins/npruntime\.html$',
'https://developer.mozilla.org/docs/Plugins/Guide/Scripting_plugins'),
redirect('^projects/plugins/plugin-host-control\.html$',
'https://developer.mozilla.org/docs/'
'Archive/Mozilla/ActiveX_Control_for_Hosting_Netscape_Plug-ins_in_IE'),
redirect('^projects/plugins/xembed-plugin-extension\.html$',
'https://developer.mozilla.org/Add-ons/Plugins/XEmbed_Extension_for_Mozilla_Plugins'),
redirect('^projects/netlib/http/http-debugging\.html$',
'https://developer.mozilla.org/docs/Mozilla/Debugging/HTTP_logging'),
redirect('^projects/netlib/integrated-auth\.html$',
'https://developer.mozilla.org/docs/Mozilla/Integrated_authentication'),
redirect('^projects/netlib/Link_Prefetching_FAQ\.html$',
'https://developer.mozilla.org/docs/Web/HTTP/Link_prefetching_FAQ'),
redirect(r'^projects/embedding/GRE\.html$',
'https://developer.mozilla.org/docs/Archive/Mozilla/GRE'),
redirect(r'^projects/embedding/windowAPIs\.html$',
'https://developer.mozilla.org/docs/Mozilla/Tech/Embedded_Dialog_API'),
redirect(r'^projects/embedding/howto/config\.html$',
'https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla/Roll_your_own_browser'),
redirect(r'^projects/embedding/howto/Initializations\.html$',
'https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla/Roll_your_own_browser'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasicsTOC\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#toc'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics2\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#Why_Gecko'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics3\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#What_You_Need_to_Embed'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics4\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#Getting_the_Code'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics5\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#Understanding_the_Coding_Environment'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics6\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#XPCOM'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics7\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#XPIDL'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics8\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#XPConnect_and_XPT_files'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics9\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#String_classes'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics10\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#XUL.2FXBL'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics11\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#Choosing_Additional_Functionalities'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics12\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#What_Gecko_Provides'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics13\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#What_You_Provide'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics14\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#Common_Embedding_Tasks'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics16\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#Appendix:_Data_Flow_Inside_Gecko'),
redirect(r'^projects/embedding/examples/',
'https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla/Roll_your_own_browser'),
# Bug 1255882
redirect(r'^projects/bonecho/anti-phishing/?$',
'https://support.mozilla.org/kb/how-does-phishing-and-malware-protection-work'),
redirect(r'^projects/bonecho(/.*)?$', 'firefox.channel.desktop'),
redirect(r'^projects/bonsai(/.*)?$', 'https://wiki.mozilla.org/Bonsai'),
redirect(r'^projects/camino(/.*)?$', 'http://caminobrowser.org/'),
redirect(r'^projects/cck(/.*)?$', 'https://wiki.mozilla.org/CCK'),
redirect(r'^projects/chimera(/.*)?$', 'http://caminobrowser.org/'),
redirect(r'^projects/deerpark(/.*)?$', 'firefox.channel.desktop'),
redirect(r'^projects/embedding/faq\.html$',
'https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla/FAQ/How_do_I...'),
redirect(r'^projects/embedding(/.*)?$',
'https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla'),
redirect(r'^projects/granparadiso(/.*)?$', 'firefox.channel.desktop'),
redirect(r'^projects/inspector/faq\.html$',
'https://developer.mozilla.org/docs/Tools/Add-ons/DOM_Inspector/DOM_Inspector_FAQ'),
redirect(r'^projects/inspector(/.*)?$',
'https://developer.mozilla.org/docs/Tools/Add-ons/DOM_Inspector'),
redirect(r'^projects/javaconnect(/.*)?$',
'http://developer.mozilla.org/en/JavaXPCOM'),
redirect(r'^projects/minefield(/.*)?$', 'firefox.channel.desktop'),
redirect(r'^projects/minimo(/.*)?$', 'https://wiki.mozilla.org/Mobile'),
redirect(r'^projects/namoroka(/.*)?$', 'firefox.channel.desktop'),
redirect(r'^projects/nspr(?:/.*)?$', 'https://developer.mozilla.org/docs/NSPR'),
redirect(r'^projects/netlib(/.*)?$',
'https://developer.mozilla.org/docs/Mozilla/Projects/Necko'),
redirect(r'^projects/plugins(/.*)?$', 'https://developer.mozilla.org/Add-ons/Plugins'),
redirect(r'^projects/rt-messaging(/.*)?$', 'http://chatzilla.hacksrus.com/'),
redirect(r'^projects/shiretoko(/.*)?$', 'firefox.channel.desktop'),
redirect(r'^projects/string(/.*)?$',
'https://developer.mozilla.org/en/XPCOM_string_guide'),
redirect(r'^projects/tech-evangelism(/.*)?$',
'https://wiki.mozilla.org/Evangelism'),
redirect(r'^projects/venkman(/.*)?$',
'https://developer.mozilla.org/docs/Archive/Mozilla/Venkman'),
redirect(r'^projects/webservices/examples/babelfish-wsdl(/.*)?$',
'https://developer.mozilla.org/docs/SOAP_in_Gecko-based_Browsers'),
redirect(r'^projects/xbl(/.*)?$', 'https://developer.mozilla.org/docs/Mozilla/Tech/XBL'),
redirect(r'^projects/xforms(/.*)?$', 'https://developer.mozilla.org/docs/Archive/Web/XForms'),
redirect(r'^projects/xpcom(/.*)?$', 'https://developer.mozilla.org/docs/Mozilla/Tech/XPCOM'),
redirect(r'^projects/xpinstall(/.*)?$',
'https://developer.mozilla.org/docs/Archive/Mozilla/XPInstall'),
redirect(r'^projects/xslt(/.*)?$', 'https://developer.mozilla.org/docs/Web/XSLT'),
redirect(r'^projects/xul(/.*)?$', 'https://developer.mozilla.org/docs/Mozilla/Tech/XUL'),
redirect(r'^quality/help(/.*)?$', 'http://quality.mozilla.org/get-involved'),
redirect(r'^quality(/.*)?$', 'http://quality.mozilla.org/'),
# Bug 654614 /blocklist -> addons.m.o/blocked
redirect(r'^blocklist(/.*)?$', 'https://addons.mozilla.org/blocked/'),
redirect('^products/firebird$', 'firefox.family.index'),
redirect('^products/firebird/download/$', 'firefox.new'),
redirect('^products/firefox/add-engines\.html$',
'https://addons.mozilla.org/search-engines.php'),
redirect('^products/firefox/all$', '/firefox/all/'),
redirect('^products/firefox/all\.html$', '/firefox/all/'),
redirect('^products/firefox/banners\.html$', '/contribute/friends/'),
redirect('^products/firefox/buttons\.html$', '/contribute/friends/'),
redirect('^products/firefox/download', 'firefox.new'),
redirect('^products/firefox/get$', 'firefox.new'),
redirect('^products/firefox/$', 'firefox.family.index'),
redirect('^products/firefox/live-bookmarks', '/firefox/features/'),
redirect('^products/firefox/mirrors\.html$', 'http://www-archive.mozilla.org/mirrors.html'),
redirect('^products/firefox/releases/$', '/firefox/releases/'),
redirect('^products/firefox/releases/0\.9\.2\.html$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes'
'/en-US/firefox/releases/0.9.1.html'),
redirect('^products/firefox/releases/0\.10\.1\.html$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes'
'/en-US/firefox/releases/0.10.html'),
redirect('^products/firefox/search', '/firefox/features/'),
redirect('^products/firefox/shelf\.html$', 'https://blog.mozilla.org/press/awards/'),
redirect('^products/firefox/smart-keywords\.html$',
'https://support.mozilla.org/en-US/kb/Smart+keywords'),
redirect('^products/firefox/support/$', 'https://support.mozilla.org/'),
redirect('^products/firefox/switch', 'firefox.new'),
redirect('^products/firefox/system-requirements', '/firefox/system-requirements/'),
redirect('^products/firefox/tabbed-browsing', 'firefox.desktop.index'),
redirect('^products/firefox/text-zoom\.html$',
'https://support.mozilla.org/kb/font-size-and-zoom-increase-size-of-web-pages'),
redirect('^products/firefox/themes$', 'https://addons.mozilla.org/themes/'),
redirect('^products/firefox/themes\.html$', 'https://addons.mozilla.org/themes/'),
redirect('^products/firefox/ui-customize\.html$',
'https://support.mozilla.org/kb/customize-firefox-controls-buttons-and-toolbars'),
redirect('^products/firefox/upgrade', 'firefox.new'),
redirect('^products/firefox/why/$', 'firefox.desktop.index'),
# bug 857246 redirect /products/firefox/start/ to start.mozilla.org
redirect(r'^products/firefox/start/?$', 'http://start.mozilla.org'),
redirect(r'^products/firefox', 'firefox.family.index'),
# bug 1260423
redirect(r'^firefox/choose/?$', 'firefox.new'),
# bug 1283397
redirect(r'^firefox/pocket/?$', 'https://getpocket.com/firefox/'),
# bug 1288552 - redirect /secondrun/ traffic from funnelcake test
redirect(r'^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/secondrun(?:/.*)?',
'firefox.mobile-download', query=False),
# bug 1293539
redirect(r'^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/tour/?$',
'https://support.mozilla.org/kb/get-started-firefox-overview-main-features'),
# bug 1295332
redirect(r'^hello/?$', 'https://support.mozilla.org/kb/hello-status'),
redirect(r'^firefox/hello/?$', 'https://support.mozilla.org/kb/hello-status'),
redirect(r'^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/hello/start/?$', 'https://support.mozilla.org/kb/hello-status'),
# bug 1299947, 1326383
redirect(r'^firefox/channel/?$', firefox_channel(), cache_timeout=0),
# Bug 1277196
redirect(r'^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/firstrun/learnmore/?$', 'firefox.features'),
redirect(r'^firefox/windows-10/welcome/?$', 'https://support.mozilla.org/kb/how-change-your-default-browser-windows-10'),
)
|
CSCI-462-01-2017/bedrock
|
bedrock/firefox/redirects.py
|
Python
|
mpl-2.0
| 26,579
|
import base64
import hashlib
from six.moves.http_client import HTTPConnection
import io
import json
import os
import threading
import traceback
import socket
from six.moves.urllib.parse import urljoin, urlsplit, urlunsplit
from abc import ABCMeta, abstractmethod
from ..testrunner import Stop
from .protocol import Protocol, BaseProtocolPart
here = os.path.split(__file__)[0]
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type == "reftest":
executor_kwargs["screenshot_cache"] = cache_manager.dict()
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN"}
def __call__(self, test, result, extra=None):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message, extra=extra, stack=stack)
return (harness_result,
[test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
for st_name, st_status, st_message, st_stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def hash_screenshot(data):
"""Computes the sha1 checksum of a base64-encoded screenshot."""
return hashlib.sha1(base64.b64decode(data)).hexdigest()
def _ensure_hash_in_reftest_screenshots(extra):
"""Make sure reftest_screenshots have hashes.
Marionette internal reftest runner does not produce hashes.
"""
log_data = extra.get("reftest_screenshots")
if not log_data:
return
for item in log_data:
if type(item) != dict:
# Skip relation strings.
continue
if "hash" not in item:
item["hash"] = hash_screenshot(item["screenshot"])
def reftest_result_converter(self, test, result):
extra = result.get("extra", {})
_ensure_hash_in_reftest_screenshots(extra)
return (test.result_cls(
result["status"],
result["message"],
extra=extra,
stack=result.get("stack")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TestExecutor(object):
__metaclass__ = ABCMeta
test_type = None
convert_result = None
supports_testdriver = False
supports_jsshell = False
def __init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
@property
def logger(self):
"""StructuredLogger for this executor"""
if self.runner is not None:
return self.runner.logger
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def reset(self):
"""Re-initialize internal state to facilitate repeated test execution
as implemented by the `--rerun` command-line argument."""
pass
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
try:
result = self.do_test(test)
except Exception as e:
self.logger.warning(traceback.format_exc(e))
result = self.result_from_exception(test, e)
if result is Stop:
return result
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol):
return "%s://%s:%s" % (protocol,
self.server_config["browser_host"],
self.server_config["ports"][protocol][0])
def test_url(self, test):
return urljoin(self.server_url(test.environment["protocol"]), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "INTERNAL-ERROR"
message = unicode(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls(status, message), []
def wait(self):
self.protocol.base.wait()
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, **kwargs):
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi):
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.executor.screenshot(test, viewport_size, dpi)
if not success:
return False, data
screenshot = data
hash_value = hash_screenshot(data)
self.screenshot_cache[key] = (hash_value, screenshot)
rv = (hash_value, screenshot)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def reset(self):
self.screenshot_cache.clear()
def is_pass(self, hashes, screenshots, relation, fuzzy):
assert relation in ("==", "!=")
if not fuzzy or fuzzy == ((0,0), (0,0)):
equal = hashes[0] == hashes[1]
else:
max_per_channel, pixels_different = self.get_differences(screenshots)
allowed_per_channel, allowed_different = fuzzy
self.logger.info("Allowed %s pixels different, maximum difference per channel %s" %
("-".join(str(item) for item in allowed_different),
"-".join(str(item) for item in allowed_per_channel)))
equal = ((pixels_different == 0 and allowed_different[0] == 0) or
(max_per_channel == 0 and allowed_per_channel[0] == 0) or
(allowed_per_channel[0] <= max_per_channel <= allowed_per_channel[1] and
allowed_different[0] <= pixels_different <= allowed_different[1]))
return equal if relation == "==" else not equal
def get_differences(self, screenshots):
from PIL import Image, ImageChops, ImageStat
lhs = Image.open(io.BytesIO(base64.b64decode(screenshots[0]))).convert("RGB")
rhs = Image.open(io.BytesIO(base64.b64decode(screenshots[1]))).convert("RGB")
diff = ImageChops.difference(lhs, rhs)
minimal_diff = diff.crop(diff.getbbox())
mask = minimal_diff.convert("L", dither=None)
stat = ImageStat.Stat(minimal_diff, mask)
per_channel = max(item[1] for item in stat.extrema)
count = stat.count[0]
self.logger.info("Found %s pixels different, maximum difference per channel %s" %
(count, per_channel))
return per_channel, count
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
while stack:
hashes = [None, None]
screenshots = [None, None]
nodes, relation = stack.pop()
fuzzy = self.get_fuzzy(test, nodes, relation)
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
if self.is_pass(hashes, screenshots, relation, fuzzy):
fuzzy = self.get_fuzzy(test, nodes, relation)
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1]) for item in reversed(nodes[1].references)))
else:
# We passed
return {"status":"PASS", "message": None}
# We failed, so construct a failure message
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi)
if success:
screenshots[i] = screenshot
log_data = [
{"url": nodes[0].url, "screenshot": screenshots[0], "hash": hashes[0]},
relation,
{"url": nodes[1].url, "screenshot": screenshots[1], "hash": hashes[1]},
]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def get_fuzzy(self, root_test, test_nodes, relation):
full_key = tuple([item.url for item in test_nodes] + [relation])
ref_only_key = test_nodes[1].url
fuzzy_override = root_test.fuzzy_override
fuzzy = test_nodes[0].fuzzy
sources = [fuzzy_override, fuzzy]
keys = [full_key, ref_only_key, None]
value = None
for source in sources:
for key in keys:
if key in source:
value = source[key]
break
if value:
break
return value
def retake_screenshot(self, node, viewport_size, dpi):
success, data = self.executor.screenshot(node, viewport_size, dpi)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = None
def __init__(self, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, **kwargs):
self.do_delayed_imports()
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
self.protocol = self.protocol_cls(self, browser)
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session_config,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session_config, path, timeout):
return pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout)
def do_delayed_imports(self):
global pytestrunner
from . import pytestrunner
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class ConnectionlessBaseProtocolPart(BaseProtocolPart):
def execute_script(self, script, asynchronous=False):
pass
def set_timeout(self, timeout):
pass
def wait(self):
pass
def set_window(self, handle):
pass
class ConnectionlessProtocol(Protocol):
implements = [ConnectionlessBaseProtocolPart]
def connect(self):
pass
def after_connect(self):
pass
class WebDriverProtocol(Protocol):
server_cls = None
implements = [ConnectionlessBaseProtocolPart]
def __init__(self, executor, browser):
Protocol.__init__(self, executor, browser)
self.webdriver_binary = executor.webdriver_binary
self.webdriver_args = executor.webdriver_args
self.capabilities = self.executor.capabilities
self.session_config = None
self.server = None
def connect(self):
"""Connect to browser via the HTTP server."""
self.server = self.server_cls(
self.logger,
binary=self.webdriver_binary,
args=self.webdriver_args)
self.server.start(block=False)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.session_config = {"host": self.server.host,
"port": self.server.port,
"capabilities": self.capabilities}
def after_connect(self):
pass
def teardown(self):
if self.server is not None and self.server.is_alive:
self.server.stop()
@property
def is_alive(self):
"""Test that the connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
class CallbackHandler(object):
"""Handle callbacks from testdriver-using tests.
The default implementation here makes sense for things that are roughly like
WebDriver. Things that are more different to WebDriver may need to create a
fully custom implementation."""
def __init__(self, logger, protocol, test_window):
self.protocol = protocol
self.test_window = test_window
self.logger = logger
self.callbacks = {
"action": self.process_action,
"complete": self.process_complete
}
self.actions = {
"click": ClickAction(self.logger, self.protocol),
"send_keys": SendKeysAction(self.logger, self.protocol),
"action_sequence": ActionSequenceAction(self.logger, self.protocol),
"generate_test_report": GenerateTestReportAction(self.logger, self.protocol)
}
def __call__(self, result):
url, command, payload = result
self.logger.debug("Got async callback: %s" % result[1])
try:
callback = self.callbacks[command]
except KeyError:
raise ValueError("Unknown callback type %r" % result[1])
return callback(url, payload)
def process_complete(self, url, payload):
rv = [strip_server(url)] + payload
return True, rv
def process_action(self, url, payload):
action = payload["action"]
self.logger.debug("Got action: %s" % action)
try:
action_handler = self.actions[action]
except KeyError:
raise ValueError("Unknown action %s" % action)
try:
result = action_handler(payload)
except Exception:
self.logger.warning("Action %s failed" % action)
self.logger.warning(traceback.format_exc())
self._send_message("complete", "error")
raise
else:
self.logger.debug("Action %s completed with result %s" % (action, result))
return_message = {"result": result}
self._send_message("complete", "success", json.dumps(return_message))
return False, None
def _send_message(self, message_type, status, message=None):
self.protocol.testdriver.send_message(message_type, status, message=message)
class ClickAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
selector = payload["selector"]
element = self.protocol.select.element_by_selector(selector)
self.logger.debug("Clicking element: %s" % selector)
self.protocol.click.element(element)
class SendKeysAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
selector = payload["selector"]
keys = payload["keys"]
element = self.protocol.select.element_by_selector(selector)
self.logger.debug("Sending keys to element: %s" % selector)
self.protocol.send_keys.send_keys(element, keys)
class ActionSequenceAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
# TODO: some sort of shallow error checking
actions = payload["actions"]
for actionSequence in actions:
if actionSequence["type"] == "pointer":
for action in actionSequence["actions"]:
if (action["type"] == "pointerMove" and
isinstance(action["origin"], dict)):
action["origin"] = self.get_element(action["origin"]["selector"], action["frame"]["frame"])
self.protocol.action_sequence.send_actions({"actions": actions})
def get_element(self, element_selector, frame):
element = self.protocol.select.element_by_selector(element_selector, frame)
return element
class GenerateTestReportAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
message = payload["message"]
self.logger.debug("Generating test report: %s" % message)
self.protocol.generate_test_report.generate_test_report(message)
|
larsbergstrom/servo
|
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/executors/base.py
|
Python
|
mpl-2.0
| 23,868
|
import datetime
import hashlib
import logging
import os
from itertools import islice
from smtplib import SMTPServerDisconnected
from urllib.parse import parse_qsl, ParseResult, urlparse, urlsplit, urlunsplit
import requests
from babel import dates, localedata
from celery import chain, chord
from django.conf import settings
from django.core.mail import EmailMultiAlternatives, get_connection
from django.core.paginator import EmptyPage, InvalidPage, Paginator
from django.http import QueryDict
from django.shortcuts import _get_queryset, redirect
from django.utils.cache import patch_cache_control
from django.utils.encoding import force_text, smart_bytes
from django.utils.http import urlencode
from django.utils.translation import gettext_lazy as _
from polib import pofile
from pyquery import PyQuery as pq
from pytz import timezone
from redo import retrying
from requests.adapters import HTTPAdapter
from taggit.utils import split_strip
from urllib3.util.retry import Retry
from .exceptions import DateTimeFormatError
log = logging.getLogger("kuma.core.utils")
def to_html(pq):
"""
Return valid HTML for the given PyQuery instance.
It uses "method='html'" when calling the "html" method on the given
PyQuery instance in order to prevent the improper closure of some empty
HTML elements. For example, without "method='html'" the output of an empty
"iframe" element would be "<iframe/>", which is illegal in HTML, instead of
"<iframe></iframe>".
"""
return pq.html(method="html")
def is_wiki(request):
return request.get_host() == settings.WIKI_HOST
def redirect_to_wiki(request, permanent=True):
request.META["HTTP_HOST"] = settings.WIKI_HOST
return redirect(request.build_absolute_uri(), permanent=permanent)
def is_untrusted(request):
return request.get_host() in (settings.ATTACHMENT_ORIGIN, settings.ATTACHMENT_HOST,)
def paginate(request, queryset, per_page=20):
"""Get a Paginator, abstracting some common paging actions."""
paginator = Paginator(queryset, per_page)
# Get the page from the request, make sure it's an int.
try:
page = int(request.GET.get("page", 1))
except ValueError:
page = 1
# Get a page of results, or the first page if there's a problem.
try:
paginated = paginator.page(page)
except (EmptyPage, InvalidPage):
paginated = paginator.page(1)
base = request.build_absolute_uri(request.path)
items = [
(k, v) for k in request.GET if k != "page" for v in request.GET.getlist(k) if v
]
qsa = urlencode(items)
paginated.url = f"{base}?{qsa}"
return paginated
def smart_int(string, fallback=0):
"""Convert a string to int, with fallback for invalid strings or types."""
try:
return int(float(string))
except (ValueError, TypeError, OverflowError):
return fallback
def strings_are_translated(strings, locale):
# http://stackoverflow.com/a/24339946/571420
pofile_path = os.path.join(
settings.ROOT, "locale", locale, "LC_MESSAGES", "django.po"
)
try:
po = pofile(pofile_path)
except IOError: # in case the file doesn't exist or couldn't be parsed
return False
all_strings_translated = True
for string in strings:
if not any(
e
for e in po
if e.msgid == string
and (e.translated() and "fuzzy" not in e.flags)
and not e.obsolete
):
all_strings_translated = False
return all_strings_translated
def generate_filename_and_delete_previous(ffile, name, before_delete=None):
"""Generate a new filename for a file upload field; delete the previously
uploaded file."""
new_filename = ffile.field.generate_filename(ffile.instance, name)
try:
# HACK: Speculatively re-fetching the original object makes me feel
# wasteful and dirty. But, I can't think of another way to get
# to the original field's value. Should be cached, though.
# see also - http://code.djangoproject.com/ticket/11663#comment:10
orig_instance = ffile.instance.__class__.objects.get(id=ffile.instance.id)
orig_field_file = getattr(orig_instance, ffile.field.name)
orig_filename = orig_field_file.name
if orig_filename and new_filename != orig_filename:
if before_delete:
before_delete(orig_field_file)
orig_field_file.delete()
except ffile.instance.__class__.DoesNotExist:
pass
return new_filename
def get_object_or_none(klass, *args, **kwargs):
"""
A tool like Django's get_object_or_404 but returns None in case
of a DoesNotExist exception.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
return None
def parse_tags(tagstring, sorted=True):
"""
Parses tag input, with multiple word input being activated and
delineated by commas and double quotes. Quotes take precedence, so
they may contain commas.
Returns a sorted list of unique tag names, unless sorted=False.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
if not tagstring:
return []
tagstring = force_text(tagstring)
# Special case - if there are no commas or double quotes in the
# input, we don't *do* a recall... I mean, we know we only need to
# split on spaces.
if "," not in tagstring and '"' not in tagstring:
words = list(split_strip(tagstring, " "))
if sorted:
words.sort()
return words
words = []
buffer = []
# Defer splitting of non-quoted sections until we know if there are
# any unquoted commas.
to_be_split = []
saw_loose_comma = False
open_quote = False
i = iter(tagstring)
try:
while True:
c = next(i)
if c == '"':
if buffer:
to_be_split.append("".join(buffer))
buffer = []
# Find the matching quote
open_quote = True
c = next(i)
while c != '"':
buffer.append(c)
c = next(i)
if buffer:
word = "".join(buffer).strip()
if word:
words.append(word)
buffer = []
open_quote = False
else:
if not saw_loose_comma and c == ",":
saw_loose_comma = True
buffer.append(c)
except StopIteration:
# If we were parsing an open quote which was never closed treat
# the buffer as unquoted.
if buffer:
if open_quote and "," in buffer:
saw_loose_comma = True
to_be_split.append("".join(buffer))
if to_be_split:
if saw_loose_comma:
delimiter = ","
else:
delimiter = " "
for chunk in to_be_split:
words.extend(split_strip(chunk, delimiter))
words = list(words)
if sorted:
words.sort()
return words
def chunked(iterable, n):
"""Return chunks of n length of iterable.
If ``len(iterable) % n != 0``, then the last chunk will have
length less than n.
Example:
>>> chunked([1, 2, 3, 4, 5], 2)
[(1, 2), (3, 4), (5,)]
:arg iterable: the iterable
:arg n: the chunk length
:returns: generator of chunks from the iterable
"""
iterable = iter(iterable)
while True:
t = tuple(islice(iterable, n))
if t:
yield t
else:
return
def chord_flow(pre_task, tasks, post_task):
if settings.CELERY_TASK_ALWAYS_EAGER:
# Eager mode and chords don't get along. So we serialize
# the tasks as a workaround.
tasks.insert(0, pre_task)
tasks.append(post_task)
return chain(*tasks)
else:
return chain(pre_task, chord(header=tasks, body=post_task))
def get_unique(
content_type,
object_pk,
name=None,
request=None,
ip=None,
user_agent=None,
user=None,
):
"""Extract a set of unique identifiers from the request.
This set will be made up of one of the following combinations, depending
on what's available:
* user, None, None, unique_MD5_hash
* None, ip, user_agent, unique_MD5_hash
"""
if request:
if request.user.is_authenticated:
user = request.user
ip = user_agent = None
else:
user = None
ip = request.META.get("REMOTE_ADDR", "")
user_agent = request.META.get("HTTP_USER_AGENT", "")[:255]
# HACK: Build a hash of the fields that should be unique, let MySQL
# chew on that for a unique index. Note that any changes to this algo
# will create all new unique hashes that don't match any existing ones.
hash_text = "\n".join(
(
content_type.pk,
object_pk,
name or "",
ip,
user_agent,
user.pk if user else "None",
)
)
unique_hash = hashlib.md5(hash_text.encode()).hexdigest()
return (user, ip, user_agent, unique_hash)
def urlparams(url_, fragment=None, query_dict=None, **query):
"""
Add a fragment and/or query parameters to a URL.
New query params will be appended to exising parameters, except duplicate
names, which will be replaced.
"""
url_ = urlparse(url_)
fragment = fragment if fragment is not None else url_.fragment
q = url_.query
new_query_dict = (
QueryDict(smart_bytes(q), mutable=True) if q else QueryDict("", mutable=True)
)
if query_dict:
for k, l in query_dict.lists():
new_query_dict[k] = None # Replace, don't append.
for v in l:
new_query_dict.appendlist(k, v)
for k, v in query.items():
# Replace, don't append.
if isinstance(v, list):
new_query_dict.setlist(k, v)
else:
new_query_dict[k] = v
query_string = urlencode(
[(k, v) for k, l in new_query_dict.lists() for v in l if v is not None]
)
new = ParseResult(
url_.scheme, url_.netloc, url_.path, url_.params, query_string, fragment
)
return new.geturl()
def format_date_time(request, value, format="shortdatetime"):
"""
Returns date/time formatted using babel's locale settings. Uses the
timezone from settings.py
"""
if not isinstance(value, datetime.datetime):
if isinstance(value, datetime.date):
# Turn a date into a datetime
value = datetime.datetime.combine(value, datetime.datetime.min.time())
else:
# Expecting datetime value
raise ValueError
default_tz = timezone(settings.TIME_ZONE)
tzvalue = default_tz.localize(value)
user = request.user
try:
if user.is_authenticated and user.timezone:
user_tz = timezone(user.timezone)
tzvalue = user_tz.normalize(tzvalue.astimezone(user_tz))
except AttributeError:
pass
locale = _get_request_locale(request)
try:
formatted = format_date_value(value, tzvalue, locale, format)
except KeyError:
# Babel sometimes stumbles over missing formatters in some locales
# e.g. bug #1247086
# we fall back formatting the value with the default language code
formatted = format_date_value(
value, tzvalue, language_to_locale(settings.LANGUAGE_CODE), format
)
return formatted, tzvalue
def _get_request_locale(request):
"""Return locale from the request, falling back to a default if invalid."""
locale = request.LANGUAGE_CODE
if not localedata.exists(locale):
locale = settings.LANGUAGE_CODE
return language_to_locale(locale)
def format_date_value(value, tzvalue, locale, format):
if format == "shortdatetime":
# Check if the date is today
if value.toordinal() == datetime.date.today().toordinal():
formatted = dates.format_time(tzvalue, format="short", locale=locale)
return _("Today at %s") % formatted
else:
return dates.format_datetime(tzvalue, format="short", locale=locale)
elif format == "longdatetime":
return dates.format_datetime(tzvalue, format="long", locale=locale)
elif format == "date":
return dates.format_date(tzvalue, locale=locale)
elif format == "time":
return dates.format_time(tzvalue, locale=locale)
elif format == "datetime":
return dates.format_datetime(tzvalue, locale=locale)
else:
# Unknown format
raise DateTimeFormatError
def language_to_locale(language_code):
"""
Convert language codes to locale names used by Babel, Django
Kuma uses a dash for regions, like en-US, zh-CN.
Babel and Django use underscore, like en_US, zh_CN.
The codes are identical when there is no region, like fr, es.
https://docs.djangoproject.com/en/1.11/topics/i18n/#definitions
"""
return language_code.replace("-", "_")
def add_shared_cache_control(response, **kwargs):
"""
Adds a Cache-Control header for shared caches, like CDNs, to the
provided response.
Default settings (which can be overridden or extended):
- max-age=0 - Don't use browser cache without asking if still valid
- s-maxage=CACHE_CONTROL_DEFAULT_SHARED_MAX_AGE - Cache in the shared
cache for the default perioid of time
- public - Allow intermediate proxies to cache response
"""
nocache = response.has_header("Cache-Control") and (
"no-cache" in response["Cache-Control"]
or "no-store" in response["Cache-Control"]
)
if nocache:
return
# Set the default values.
cc_kwargs = {
"public": True,
"max_age": 0,
"s_maxage": settings.CACHE_CONTROL_DEFAULT_SHARED_MAX_AGE,
}
# Override the default values and/or add new ones.
cc_kwargs.update(kwargs)
patch_cache_control(response, **cc_kwargs)
def order_params(original_url):
"""Standardize order of query parameters."""
bits = urlsplit(original_url)
qs = sorted(parse_qsl(bits.query, keep_blank_values=True))
new_qs = urlencode(qs)
new_url = urlunsplit((bits.scheme, bits.netloc, bits.path, new_qs, bits.fragment))
return new_url
def requests_retry_session(
retries=3, backoff_factor=0.3, status_forcelist=(500, 502, 504),
):
"""Opinionated wrapper that creates a requests session with a
HTTPAdapter that sets up a Retry policy that includes connection
retries.
If you do the more naive retry by simply setting a number. E.g.::
adapter = HTTPAdapter(max_retries=3)
then it will raise immediately on any connection errors.
Retrying on connection errors guards better on unpredictable networks.
From http://docs.python-requests.org/en/master/api/?highlight=retries#requests.adapters.HTTPAdapter
it says: "By default, Requests does not retry failed connections."
The backoff_factor is documented here:
https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#urllib3.util.retry.Retry
A default of retries=3 and backoff_factor=0.3 means it will sleep like::
[0.3, 0.6, 1.2]
""" # noqa
session = requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
def safer_pyquery(*args, **kwargs):
"""
PyQuery is magically clumsy in how it handles its arguments. A more
ideal and explicit constructor would be:
>>> from pyquery import PyQuery as pq
>>> parsed = pq(html=my_html_string)
>>> parsed = pq(url=definitely_a_url_string)
But instead, you're expected to use it like this:
>>> from pyquery import PyQuery as pq
>>> parsed = pq(my_html_string)
>>> parsed = pq(definitely_a_url_string)
...and PyQuery attempts to be smart and look at that first argument
and if it looks like a URL, it first calls `requests.get()` on it.
This function is a thin wrapper on that constructor that prevents
that dangerous code to ever get a chance.
NOTE! As of May 10 2019, this risk exists the the latest release of
PyQuery. Hopefully it will be fixed but it would a massively disruptive
change and thus unlikely to happen any time soon.
NOTE 2! Unlikely to be fixed by core pyquery team any time soon
https://github.com/gawel/pyquery/issues/203
"""
# This "if" statement is exactly what PyQuery's constructor does.
# We'll run it ourselves once and if it matches, "ruin" it by
# injecting that extra space.
if (
len(args) >= 1
and isinstance(args[0], str)
and args[0].split("://", 1)[0] in ("http", "https")
):
args = (f" {args[0]}",) + args[1:]
return pq(*args, **kwargs)
def send_mail_retrying(
subject,
message,
from_email,
recipient_list,
fail_silently=False,
auth_user=None,
auth_password=None,
connection=None,
html_message=None,
attachment=None,
**kwargs,
):
"""Copied verbatim from django.core.mail.send_mail but with the override
that we're using our EmailMultiAlternativesRetrying class instead.
See its doc string for its full documentation.
The only difference is that this function allows for setting your
own custom 'retrying' keyword argument.
"""
connection = connection or get_connection(
username=auth_user, password=auth_password, fail_silently=fail_silently,
)
mail = EmailMultiAlternativesRetrying(
subject, message, from_email, recipient_list, connection=connection
)
if html_message:
mail.attach_alternative(html_message, "text/html")
if attachment:
mail.attach(attachment["name"], attachment["bytes"], attachment["mime"])
return mail.send(**kwargs)
class EmailMultiAlternativesRetrying(EmailMultiAlternatives):
"""
Thin wrapper on django.core.mail.EmailMultiAlternatives that adds
a retrying functionality. By default, the only override is that
we're very explicit about the of exceptions we treat as retry'able.
The list of exceptions we use to trigger a retry are:
* smtplib.SMTPServerDisconnected
Only list exceptions that have been known to happen and are safe.
"""
def send(self, *args, retry_options=None, **kwargs):
# See https://github.com/mozilla-releng/redo
# for a list of the default options to the redo.retry function
# which the redo.retrying context manager wraps.
retry_options = retry_options or {
"retry_exceptions": (SMTPServerDisconnected,),
# The default in redo is 60 seconds. Let's tone that down.
"sleeptime": 3,
}
parent_method = super(EmailMultiAlternativesRetrying, self).send
with retrying(parent_method, **retry_options) as method:
return method(*args, **kwargs)
|
Elchi3/kuma
|
kuma/core/utils.py
|
Python
|
mpl-2.0
| 19,446
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt, cint
from frappe import _
import frappe.defaults
from erpnext.stock.utils import update_bin
from erpnext.controllers.buying_controller import BuyingController
class PurchaseReceipt(BuyingController):
tname = 'Purchase Receipt Item'
fname = 'purchase_receipt_details'
def __init__(self, arg1, arg2=None):
super(PurchaseReceipt, self).__init__(arg1, arg2)
self.status_updater = [{
'source_dt': 'Purchase Receipt Item',
'target_dt': 'Purchase Order Item',
'join_field': 'prevdoc_detail_docname',
'target_field': 'received_qty',
'target_parent_dt': 'Purchase Order',
'target_parent_field': 'per_received',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'prevdoc_docname',
}]
def onload(self):
billed_qty = frappe.db.sql("""select sum(ifnull(qty, 0)) from `tabPurchase Invoice Item`
where purchase_receipt=%s""", self.name)
if billed_qty:
total_qty = sum((item.qty for item in self.get("purchase_receipt_details")))
self.set("__billing_complete", billed_qty[0][0] == total_qty)
def validate(self):
super(PurchaseReceipt, self).validate()
self.po_required()
if not self.status:
self.status = "Draft"
from erpnext.utilities import validate_status
validate_status(self.status, ["Draft", "Submitted", "Cancelled"])
self.validate_with_previous_doc()
self.validate_rejected_warehouse()
self.validate_accepted_rejected_qty()
self.validate_inspection()
self.validate_uom_is_integer("uom", ["qty", "received_qty"])
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.validate_challan_no()
pc_obj = frappe.get_doc('Purchase Common')
pc_obj.validate_for_items(self)
self.check_for_stopped_status(pc_obj)
# sub-contracting
self.validate_for_subcontracting()
self.update_raw_materials_supplied("pr_raw_material_details")
self.update_valuation_rate("purchase_receipt_details")
def validate_rejected_warehouse(self):
for d in self.get("purchase_receipt_details"):
if flt(d.rejected_qty) and not d.rejected_warehouse:
d.rejected_warehouse = self.rejected_warehouse
if not d.rejected_warehouse:
frappe.throw(_("Rejected Warehouse is mandatory against regected item"))
# validate accepted and rejected qty
def validate_accepted_rejected_qty(self):
for d in self.get("purchase_receipt_details"):
if not flt(d.received_qty) and flt(d.qty):
d.received_qty = flt(d.qty) - flt(d.rejected_qty)
elif not flt(d.qty) and flt(d.rejected_qty):
d.qty = flt(d.received_qty) - flt(d.rejected_qty)
elif not flt(d.rejected_qty):
d.rejected_qty = flt(d.received_qty) - flt(d.qty)
# Check Received Qty = Accepted Qty + Rejected Qty
if ((flt(d.qty) + flt(d.rejected_qty)) != flt(d.received_qty)):
frappe.throw(_("Accepted + Rejected Qty must be equal to Received quantity for Item {0}").format(d.item_code))
def validate_challan_no(self):
"Validate if same challan no exists for same supplier in a submitted purchase receipt"
if self.challan_no:
exists = frappe.db.sql_list("""select name from `tabPurchase Receipt`
where docstatus=1 and name!=%s and supplier=%s and challan_no=%s
and fiscal_year=%s""", (self.name, self.supplier, self.challan_no, self.doc.fiscal_year))
if exists:
frappe.throw(_("Supplier delivery number duplicate in {0}").format(exists))
def validate_with_previous_doc(self):
super(PurchaseReceipt, self).validate_with_previous_doc(self.tname, {
"Purchase Order": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Purchase Order Item": {
"ref_dn_field": "prevdoc_detail_docname",
"compare_fields": [["project_name", "="], ["uom", "="], ["item_code", "="]],
"is_child_table": True
}
})
if cint(frappe.defaults.get_global_default('maintain_same_rate')):
super(PurchaseReceipt, self).validate_with_previous_doc(self.tname, {
"Purchase Order Item": {
"ref_dn_field": "prevdoc_detail_docname",
"compare_fields": [["rate", "="]],
"is_child_table": True
}
})
def po_required(self):
if frappe.db.get_value("Buying Settings", None, "po_required") == 'Yes':
for d in self.get('purchase_receipt_details'):
if not d.prevdoc_docname:
frappe.throw(_("Purchase Order number required for Item {0}").format(d.item_code))
def update_stock(self):
sl_entries = []
stock_items = self.get_stock_items()
for d in self.get('purchase_receipt_details'):
if d.item_code in stock_items and d.warehouse:
pr_qty = flt(d.qty) * flt(d.conversion_factor)
if pr_qty:
sl_entries.append(self.get_sl_entries(d, {
"actual_qty": flt(pr_qty),
"serial_no": cstr(d.serial_no).strip(),
"incoming_rate": d.valuation_rate
}))
if flt(d.rejected_qty) > 0:
sl_entries.append(self.get_sl_entries(d, {
"warehouse": d.rejected_warehouse,
"actual_qty": flt(d.rejected_qty) * flt(d.conversion_factor),
"serial_no": cstr(d.rejected_serial_no).strip(),
"incoming_rate": d.valuation_rate
}))
self.bk_flush_supp_wh(sl_entries)
self.make_sl_entries(sl_entries)
def update_ordered_qty(self):
stock_items = self.get_stock_items()
for d in self.get("purchase_receipt_details"):
if d.item_code in stock_items and d.warehouse \
and cstr(d.prevdoc_doctype) == 'Purchase Order':
already_received_qty = self.get_already_received_qty(d.prevdoc_docname,
d.prevdoc_detail_docname)
po_qty, ordered_warehouse = self.get_po_qty_and_warehouse(d.prevdoc_detail_docname)
if not ordered_warehouse:
frappe.throw(_("Warehouse is missing in Purchase Order"))
if already_received_qty + d.qty > po_qty:
ordered_qty = - (po_qty - already_received_qty) * flt(d.conversion_factor)
else:
ordered_qty = - flt(d.qty) * flt(d.conversion_factor)
update_bin({
"item_code": d.item_code,
"warehouse": ordered_warehouse,
"posting_date": self.posting_date,
"ordered_qty": flt(ordered_qty) if self.docstatus==1 else -flt(ordered_qty)
})
def get_already_received_qty(self, po, po_detail):
qty = frappe.db.sql("""select sum(qty) from `tabPurchase Receipt Item`
where prevdoc_detail_docname = %s and docstatus = 1
and prevdoc_doctype='Purchase Order' and prevdoc_docname=%s
and parent != %s""", (po_detail, po, self.name))
return qty and flt(qty[0][0]) or 0.0
def get_po_qty_and_warehouse(self, po_detail):
po_qty, po_warehouse = frappe.db.get_value("Purchase Order Item", po_detail,
["qty", "warehouse"])
return po_qty, po_warehouse
def bk_flush_supp_wh(self, sl_entries):
for d in self.get('pr_raw_material_details'):
# negative quantity is passed as raw material qty has to be decreased
# when PR is submitted and it has to be increased when PR is cancelled
sl_entries.append(self.get_sl_entries(d, {
"item_code": d.rm_item_code,
"warehouse": self.supplier_warehouse,
"actual_qty": -1*flt(d.consumed_qty),
"incoming_rate": 0
}))
def validate_inspection(self):
for d in self.get('purchase_receipt_details'): #Enter inspection date for all items that require inspection
ins_reqd = frappe.db.sql("select inspection_required from `tabItem` where name = %s",
(d.item_code,), as_dict = 1)
ins_reqd = ins_reqd and ins_reqd[0]['inspection_required'] or 'No'
if ins_reqd == 'Yes' and not d.qa_no:
frappe.msgprint(_("Quality Inspection required for Item {0}").format(d.item_code))
# Check for Stopped status
def check_for_stopped_status(self, pc_obj):
check_list =[]
for d in self.get('purchase_receipt_details'):
if d.meta.get_field('prevdoc_docname') and d.prevdoc_docname and d.prevdoc_docname not in check_list:
check_list.append(d.prevdoc_docname)
pc_obj.check_for_stopped_status( d.prevdoc_doctype, d.prevdoc_docname)
# on submit
def on_submit(self):
purchase_controller = frappe.get_doc("Purchase Common")
# Check for Approving Authority
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.grand_total)
# Set status as Submitted
frappe.db.set(self, 'status', 'Submitted')
self.update_prevdoc_status()
self.update_ordered_qty()
self.update_stock()
from erpnext.stock.doctype.serial_no.serial_no import update_serial_nos_after_submit
update_serial_nos_after_submit(self, "purchase_receipt_details")
purchase_controller.update_last_purchase_rate(self, 1)
self.make_gl_entries()
def check_next_docstatus(self):
submit_rv = frappe.db.sql("""select t1.name
from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2
where t1.name = t2.parent and t2.purchase_receipt = %s and t1.docstatus = 1""",
(self.name))
if submit_rv:
frappe.throw(_("Purchase Invoice {0} is already submitted").format(self.submit_rv[0][0]))
def on_cancel(self):
pc_obj = frappe.get_doc('Purchase Common')
self.check_for_stopped_status(pc_obj)
# Check if Purchase Invoice has been submitted against current Purchase Order
submitted = frappe.db.sql("""select t1.name
from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2
where t1.name = t2.parent and t2.purchase_receipt = %s and t1.docstatus = 1""",
self.name)
if submitted:
frappe.throw(_("Purchase Invoice {0} is already submitted").format(submitted[0][0]))
frappe.db.set(self,'status','Cancelled')
self.update_ordered_qty()
self.update_stock()
self.update_prevdoc_status()
pc_obj.update_last_purchase_rate(self, 0)
self.make_cancel_gl_entries()
def get_current_stock(self):
for d in self.get('pr_raw_material_details'):
if self.supplier_warehouse:
bin = frappe.db.sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.rm_item_code, self.supplier_warehouse), as_dict = 1)
d.current_stock = bin and flt(bin[0]['actual_qty']) or 0
def get_rate(self,arg):
return frappe.get_doc('Purchase Common').get_rate(arg,self)
def get_gl_entries(self, warehouse_account=None):
against_stock_account = self.get_company_default("stock_received_but_not_billed")
gl_entries = super(PurchaseReceipt, self).get_gl_entries(warehouse_account, against_stock_account)
return gl_entries
@frappe.whitelist()
def make_purchase_invoice(source_name, target_doc=None):
from frappe.model.mapper import get_mapped_doc
invoiced_qty_map = get_invoiced_qty_map(source_name)
def set_missing_values(source, target):
if len(target.get("entries")) == 0:
frappe.throw(_("All items have already been invoiced"))
doc = frappe.get_doc(target)
doc.run_method("set_missing_values")
doc.run_method("calculate_taxes_and_totals")
def update_item(source_doc, target_doc, source_parent):
target_doc.qty = source_doc.qty - invoiced_qty_map.get(source_doc.name, 0)
doclist = get_mapped_doc("Purchase Receipt", source_name, {
"Purchase Receipt": {
"doctype": "Purchase Invoice",
"validation": {
"docstatus": ["=", 1],
}
},
"Purchase Receipt Item": {
"doctype": "Purchase Invoice Item",
"field_map": {
"name": "pr_detail",
"parent": "purchase_receipt",
"prevdoc_detail_docname": "po_detail",
"prevdoc_docname": "purchase_order",
},
"postprocess": update_item,
"filter": lambda d: d.qty - invoiced_qty_map.get(d.name, 0)<=0
},
"Purchase Taxes and Charges": {
"doctype": "Purchase Taxes and Charges",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doclist
def get_invoiced_qty_map(purchase_receipt):
"""returns a map: {pr_detail: invoiced_qty}"""
invoiced_qty_map = {}
for pr_detail, qty in frappe.db.sql("""select pr_detail, qty from `tabPurchase Invoice Item`
where purchase_receipt=%s and docstatus=1""", purchase_receipt):
if not invoiced_qty_map.get(pr_detail):
invoiced_qty_map[pr_detail] = 0
invoiced_qty_map[pr_detail] += qty
return invoiced_qty_map
|
gangadhar-kadam/hrerp
|
erpnext/stock/doctype/purchase_receipt/purchase_receipt.py
|
Python
|
agpl-3.0
| 12,110
|
#
# Copyright (c) 2006, 2007 Canonical
#
# Written by Gustavo Niemeyer <gustavo@niemeyer.net>
#
# This file is part of Storm Object Relational Mapper.
#
# Storm is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# Storm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Manage database shemas.
The L{Schema} class can be used to create, drop, clean and upgrade database
schemas.
A database L{Schema} is defined by the series of SQL statements that should be
used to create, drop and clear the schema, respectively and by a patch module
used to upgrade it (see also L{PatchApplier}).
For example:
>>> creates = ['CREATE TABLE person (id INTEGER, name TEXT)']
>>> drops = ['DROP TABLE person']
>>> deletes = ['DELETE FROM person']
>>> import patch_module
>>> Schema(creates, drops, deletes, patch_module)
where patch_module is a Python module containing database patches used to
upgrade the schema over time.
"""
from storm.locals import StormError
from storm.schema.patch import PatchApplier
class Schema(object):
"""Create, drop, clean and patch table schemas.
@param creates: A list of C{CREATE TABLE} statements.
@param drops: A list of C{DROP TABLE} statements.
@param deletes: A list of C{DELETE FROM} statements.
@param patch_package: The Python package containing patch modules to apply.
@param committer: Optionally a committer to pass to the L{PatchApplier}.
@see: L{PatchApplier}.
"""
_create_patch = "CREATE TABLE patch (version INTEGER NOT NULL PRIMARY KEY)"
_drop_patch = "DROP TABLE IF EXISTS patch"
_autocommit = True
def __init__(self, creates, drops, deletes, patch_package, committer=None):
self._creates = creates
self._drops = drops
self._deletes = deletes
self._patch_package = patch_package
self._committer = committer
def _execute_statements(self, store, statements):
"""Execute the given statements in the given store."""
for statement in statements:
try:
store.execute(statement)
except Exception:
print "Error running %s" % statement
raise
if self._autocommit:
store.commit()
def autocommit(self, flag):
"""Control whether to automatically commit/rollback schema changes.
The default is C{True}, if set to C{False} it's up to the calling code
to handle commits and rollbacks.
@note: In case of rollback the exception will just be propagated, and
no rollback on the store will be performed.
"""
self._autocommit = flag
def create(self, store):
"""Run C{CREATE TABLE} SQL statements with C{store}."""
self._execute_statements(store, [self._create_patch])
self._execute_statements(store, self._creates)
def drop(self, store):
"""Run C{DROP TABLE} SQL statements with C{store}."""
self._execute_statements(store, self._drops)
self._execute_statements(store, [self._drop_patch])
def delete(self, store):
"""Run C{DELETE FROM} SQL statements with C{store}."""
self._execute_statements(store, self._deletes)
def upgrade(self, store):
"""Upgrade C{store} to have the latest schema.
If a schema isn't present a new one will be created. Unapplied
patches will be applied to an existing schema.
"""
class NoopCommitter(object):
commit = lambda _: None
rollback = lambda _: None
committer = self._committer if self._autocommit else NoopCommitter()
patch_applier = PatchApplier(store, self._patch_package, committer)
try:
store.execute("SELECT * FROM patch WHERE version=0")
except StormError:
# No schema at all. Create it from the ground.
store.rollback()
self.create(store)
patch_applier.mark_applied_all()
if self._autocommit:
store.commit()
else:
patch_applier.apply_all()
|
nwokeo/supysonic
|
venv/lib/python2.7/site-packages/storm/schema/schema.py
|
Python
|
agpl-3.0
| 4,591
|
"""
This module contains all signals.
"""
from django.dispatch import Signal
# Signal that fires when a user is graded
COURSE_GRADE_CHANGED = Signal(providing_args=["user", "course_grade", "course_key", "deadline"])
# Signal that fires when a user is awarded a certificate in a course (in the certificates django app)
# TODO: runtime coupling between apps will be reduced if this event is changed to carry a username
# rather than a User object; however, this will require changes to the milestones and badges APIs
COURSE_CERT_AWARDED = Signal(providing_args=["user", "course_key", "mode", "status"])
# Signal that indicates that a user has passed a course.
COURSE_GRADE_NOW_PASSED = Signal(
providing_args=[
'user', # user object
'course_key', # course.id
]
)
|
miptliot/edx-platform
|
openedx/core/djangoapps/signals/signals.py
|
Python
|
agpl-3.0
| 791
|
#file for building up and organizing the output
from collections import defaultdict
from datetime import timedelta, datetime
from FileQuery import GetTimes
class Builder:
""" class for building up the data to be displayed by the display class
"""
def __init__(self):
self.GetTimes = GetTimes()
def make_app_list(self):
finalized = {}
time_data = []
todays_apps = self.get_todays_apps()
time_list = self.make_time_list(todays_apps)
present_list = self.make_present_list(time_list)
for names in todays_apps:
finalized[names] = time_data
for time, present in zip(time_list, present_list):
time_data = [time[1] , present[1]]
finalized[time[0]] = time_data
return finalized
def get_todays_apps(self):
#transforms the list of times and names into a dict sorted by name
todays_apps_all = self.GetTimes.get_day()
todays_apps_sorted = defaultdict(list)
for app in todays_apps_all:
if app[0] is not '':#screen saver is null string
todays_apps_sorted[app[0]].append(app[1])
return todays_apps_sorted
def make_time_list(self, todays_apps):
#returns the raw time for each app in a list of tups with time delta
#Object to rep mins
raw_hours = []
for entry in todays_apps:
time_segs = len(todays_apps[entry])
mins = timedelta(minutes=time_segs)
raw_hours.append((entry, mins))
return raw_hours
def make_present_list(self,time_list):
raw_present = []
total_time = 0
#add all time from the list
for times in time_list:
total_time = total_time + times[1].total_seconds()
total_time = total_time / 60
# get avg per entry
for time in time_list:
avg = (time[1].total_seconds() / 60) / total_time * 100
raw_present.append((time[0],round(avg,0)))
return raw_present
|
adamRogerson/TerminalHours
|
newHours/old/old-Builder.py
|
Python
|
agpl-3.0
| 2,116
|
"""
This module provides date summary blocks for the Course Info
page. Each block gives information about a particular
course-run-specific date which will be displayed to the user.
"""
import datetime
import crum
from babel.dates import format_timedelta
from django.conf import settings
from django.urls import reverse
from django.utils.formats import date_format
from django.utils.functional import cached_property
from django.utils.translation import get_language, to_locale
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
from lazy import lazy
from pytz import utc
from common.djangoapps.course_modes.models import CourseMode, get_cosmetic_verified_display_price
from lms.djangoapps.certificates.api import get_active_web_certificate, can_show_certificate_available_date_field
from lms.djangoapps.courseware.utils import verified_upgrade_deadline_link, can_show_verified_upgrade
from lms.djangoapps.verify_student.models import VerificationDeadline
from lms.djangoapps.verify_student.services import IDVerificationService
from openedx.core.djangoapps.agreements.toggles import is_integrity_signature_enabled
from openedx.core.djangolib.markup import HTML, Text
from openedx.features.course_duration_limits.access import get_user_course_expiration_date
from openedx.features.course_experience import RELATIVE_DATES_FLAG, UPGRADE_DEADLINE_MESSAGE, CourseHomeMessages
from common.djangoapps.student.models import CourseEnrollment
from .context_processor import user_timezone_locale_prefs
class DateSummary:
"""Base class for all date summary blocks."""
# A consistent representation of the current time.
_current_time = None
@property
def current_time(self):
"""
Returns a consistent current time.
"""
if self._current_time is None:
self._current_time = datetime.datetime.now(utc)
return self._current_time
@property
def css_class(self):
"""
The CSS class of this summary. Indicates the type of information
this summary block contains, and its urgency.
"""
return ''
@property
def date_type(self):
return 'event'
@property
def title(self):
"""The title of this summary."""
return ''
@property
def title_html(self):
"""The title as html for this summary."""
return ''
@property
def description(self):
"""The detail text displayed by this summary."""
return ''
@property
def extra_info(self):
"""Extra detail to display as a tooltip."""
return None
def register_alerts(self, request, course):
"""
Registers any relevant course alerts given the current request.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@property
def date(self):
"""This summary's date."""
return None
@property
def date_format(self):
"""
The format to display this date in. By default, displays like Jan
01, 2015.
"""
return '%b %d, %Y'
@property
def link(self):
"""The location to link to for more information."""
return ''
@property
def link_text(self):
"""The text of the link."""
return ''
def __init__(self, course, user, course_id=None):
self.course = course
self.user = user
self.course_id = course_id or self.course.id
@property
def relative_datestring(self):
"""
Return this block's date in a human-readable format. If the date
is None, returns the empty string.
"""
if self.date is None:
return ''
locale = to_locale(get_language())
delta = self.date - self.current_time
try:
relative_date = format_timedelta(delta, locale=locale)
# Babel doesn't have translations for Esperanto, so we get
# a KeyError when testing translations with
# ?preview-lang=eo. This should not happen with any other
# languages. See https://github.com/python-babel/babel/issues/107
except KeyError:
relative_date = format_timedelta(delta)
date_has_passed = delta.days < 0
# Translators: 'absolute' is a date such as "Jan 01,
# 2020". 'relative' is a fuzzy description of the time until
# 'absolute'. For example, 'absolute' might be "Jan 01, 2020",
# and if today were December 5th, 2020, 'relative' would be "1
# month".
date_format = _("{relative} ago - {absolute}") if date_has_passed else _("in {relative} - {absolute}") # lint-amnesty, pylint: disable=redefined-outer-name
return date_format.format(
relative=relative_date,
absolute='{date}',
)
@lazy
def is_allowed(self):
"""
Whether or not this summary block is applicable or active for its course.
For example, a DateSummary might only make sense for a self-paced course, and
you could restrict it here.
You should not make time-sensitive checks here. That sort of thing belongs in
is_enabled.
"""
return True
@property
def is_enabled(self):
"""
Whether or not this summary block should be shown.
By default, the summary is only shown if its date is in the
future.
"""
return (
self.date is not None and
self.current_time.date() <= self.date.date()
)
def deadline_has_passed(self):
"""
Return True if a deadline (the date) exists, and has already passed.
Returns False otherwise.
"""
deadline = self.date
return deadline is not None and deadline <= self.current_time
@property
def time_remaining_string(self):
"""
Returns the time remaining as a localized string.
"""
locale = to_locale(get_language())
return format_timedelta(self.date - self.current_time, locale=locale)
def date_html(self, date_format='shortDate'): # lint-amnesty, pylint: disable=redefined-outer-name
"""
Returns a representation of the date as HTML.
Note: this returns a span that will be localized on the client.
"""
user_timezone = user_timezone_locale_prefs(crum.get_current_request())['user_timezone']
return HTML(
'<span class="date localized-datetime" data-format="{date_format}" data-datetime="{date_time}"'
' data-timezone="{user_timezone}" data-language="{user_language}">'
'</span>'
).format(
date_format=date_format,
date_time=self.date,
user_timezone=user_timezone,
user_language=get_language(),
)
@property
def long_date_html(self):
"""
Returns a long representation of the date as HTML.
Note: this returns a span that will be localized on the client.
"""
return self.date_html(date_format='shortDate')
@property
def short_time_html(self):
"""
Returns a short representation of the time as HTML.
Note: this returns a span that will be localized on the client.
"""
return self.date_html(date_format='shortTime')
def __repr__(self):
return 'DateSummary: "{title}" {date} is_enabled={is_enabled}'.format(
title=self.title,
date=self.date,
is_enabled=self.is_enabled
)
class TodaysDate(DateSummary):
"""
Displays today's date.
"""
css_class = 'todays-date'
is_enabled = True
# The date is shown in the title, no need to display it again.
def get_context(self):
context = super().get_context() # lint-amnesty, pylint: disable=no-member, super-with-arguments
context['date'] = ''
return context
@property
def date(self):
return self.current_time
@property
def date_type(self):
return 'todays-date'
@property
def title(self):
return 'current_datetime'
class CourseStartDate(DateSummary):
"""
Displays the start date of the course.
"""
css_class = 'start-date'
@property
def date(self):
if not self.course.self_paced:
return self.course.start
else:
enrollment = CourseEnrollment.get_enrollment(self.user, self.course_id)
return max(enrollment.created, self.course.start) if enrollment else self.course.start
@property
def date_type(self):
return 'course-start-date'
@property
def title(self):
enrollment = CourseEnrollment.get_enrollment(self.user, self.course_id)
if enrollment and self.course.end and enrollment.created > self.course.end:
return gettext_lazy('Enrollment Date')
return gettext_lazy('Course starts')
def register_alerts(self, request, course):
"""
Registers an alert if the course has not started yet.
"""
is_enrolled = CourseEnrollment.get_enrollment(request.user, course.id)
if not course.start or not is_enrolled:
return
days_until_start = (course.start - self.current_time).days
if course.start > self.current_time:
if days_until_start > 0:
CourseHomeMessages.register_info_message(
request,
Text(_(
"Don't forget to add a calendar reminder!"
)),
title=Text(_("Course starts in {time_remaining_string} on {course_start_date}.")).format(
time_remaining_string=self.time_remaining_string,
course_start_date=self.long_date_html,
)
)
else:
CourseHomeMessages.register_info_message(
request,
Text(_("Course starts in {time_remaining_string} at {course_start_time}.")).format(
time_remaining_string=self.time_remaining_string,
course_start_time=self.short_time_html,
)
)
class CourseEndDate(DateSummary):
"""
Displays the end date of the course.
"""
css_class = 'end-date'
title = gettext_lazy('Course ends')
is_enabled = True
@property
def description(self):
"""
Returns a description for what experience changes a learner encounters when the course end date passes.
Note that this currently contains 4 scenarios:
1. End date is in the future and learner is enrolled in a certificate earning mode
2. End date is in the future and learner is not enrolled at all or not enrolled
in a certificate earning mode
3. End date is in the past
4. End date does not exist (and now neither does the description)
"""
if self.date and self.current_time <= self.date:
mode, is_active = CourseEnrollment.enrollment_mode_for_user(self.user, self.course_id)
if is_active and CourseMode.is_eligible_for_certificate(mode):
return _('After this date, the course will be archived, which means you can review the '
'course content but can no longer participate in graded assignments or work towards earning '
'a certificate.')
else:
return _('After the course ends, the course content will be archived and no longer active.')
elif self.date:
return _('This course is archived, which means you can review course content but it is no longer active.')
else:
return ''
@property
def date(self):
"""
Returns the course end date, if applicable.
For self-paced courses using Personalized Learner Schedules, the end date is only displayed
if it is within 365 days.
"""
if self.course.self_paced and RELATIVE_DATES_FLAG.is_enabled(self.course_id):
one_year = datetime.timedelta(days=365)
if self.course.end and self.course.end < (self.current_time + one_year):
return self.course.end
return None
return self.course.end
@property
def date_type(self):
return 'course-end-date'
def register_alerts(self, request, course):
"""
Registers an alert if the end date is approaching.
"""
is_enrolled = CourseEnrollment.get_enrollment(request.user, course.id)
if not course.start or self.current_time < course.start or not is_enrolled:
return
days_until_end = (course.end - self.current_time).days
if course.end > self.current_time and days_until_end <= settings.COURSE_MESSAGE_ALERT_DURATION_IN_DAYS:
if days_until_end > 0:
CourseHomeMessages.register_info_message(
request,
Text(self.description),
title=Text(_('This course is ending in {time_remaining_string} on {course_end_date}.')).format(
time_remaining_string=self.time_remaining_string,
course_end_date=self.long_date_html,
)
)
else:
CourseHomeMessages.register_info_message(
request,
Text(self.description),
title=Text(_('This course is ending in {time_remaining_string} at {course_end_time}.')).format(
time_remaining_string=self.time_remaining_string,
course_end_time=self.short_time_html,
)
)
class CourseAssignmentDate(DateSummary):
"""
Displays due dates for homework assignments with a link to the homework
assignment if the link is provided.
"""
css_class = 'assignment'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.assignment_date = None
self.assignment_link = ''
self.assignment_title = None
self.assignment_title_html = None
self.first_component_block_id = None
self.contains_gated_content = False
self.complete = None
self.past_due = None
self._extra_info = None
@property
def date(self):
return self.assignment_date
@date.setter
def date(self, date):
self.assignment_date = date
@property
def date_type(self):
return 'assignment-due-date'
@property
def link(self):
return self.assignment_link
@property
def extra_info(self):
return self._extra_info
@link.setter
def link(self, link):
self.assignment_link = link
@property
def title(self):
return self.assignment_title
@property
def title_html(self):
return self.assignment_title_html
def set_title(self, title, link=None):
""" Used to set the title_html and title properties for the assignment date block """
if link:
self.assignment_title_html = HTML(
'<a href="{assignment_link}">{assignment_title}</a>'
).format(assignment_link=link, assignment_title=title)
self.assignment_title = title
class CourseExpiredDate(DateSummary):
"""
Displays the course expiration date for Audit learners (if enabled)
"""
css_class = 'course-expired'
@property
def date(self):
return get_user_course_expiration_date(self.user, self.course)
@property
def date_type(self):
return 'course-expired-date'
@property
def description(self):
return _('You lose all access to this course, including your progress.')
@property
def title(self):
return _('Audit Access Expires')
@lazy
def is_allowed(self):
return RELATIVE_DATES_FLAG.is_enabled(self.course.id)
class CertificateAvailableDate(DateSummary):
"""
Displays the certificate available date of the course.
"""
css_class = 'certificate-available-date'
title = gettext_lazy('Certificate Available')
@lazy
def is_allowed(self):
return (
can_show_certificate_available_date_field(self.course) and
self.has_certificate_modes and
get_active_web_certificate(self.course)
)
@property
def description(self):
return _('Day certificates will become available for passing verified learners.')
@property
def date(self):
return self.course.certificate_available_date
@property
def date_type(self):
return 'certificate-available-date'
@property
def has_certificate_modes(self):
return any(
mode.slug for mode in CourseMode.modes_for_course(
course_id=self.course.id, include_expired=True
) if mode.slug != CourseMode.AUDIT
)
def register_alerts(self, request, course):
"""
Registers an alert close to the certificate delivery date.
"""
is_enrolled = CourseEnrollment.get_enrollment(request.user, course.id)
if not is_enrolled or not self.is_enabled or course.end > self.current_time:
return
if self.date > self.current_time:
CourseHomeMessages.register_info_message(
request,
Text(_(
'If you have earned a certificate, you will be able to access it {time_remaining_string}'
' from now. You will also be able to view your certificates on your {learner_profile_link}.'
)).format(
time_remaining_string=self.time_remaining_string,
learner_profile_link=HTML(
'<a href="{learner_profile_url}">{learner_profile_name}</a>'
).format(
learner_profile_url=reverse('learner_profile', kwargs={'username': request.user.username}),
learner_profile_name=_('Learner Profile'),
),
),
title=Text(_('We are working on generating course certificates.'))
)
class VerifiedUpgradeDeadlineDate(DateSummary):
"""
Displays the date before which learners must upgrade to the
Verified track.
"""
css_class = 'verified-upgrade-deadline'
link_text = gettext_lazy('Upgrade to Verified Certificate')
@property
def link(self):
return verified_upgrade_deadline_link(self.user, self.course, self.course_id)
@cached_property
def enrollment(self):
return CourseEnrollment.get_enrollment(self.user, self.course_id)
@lazy
def is_allowed(self):
return can_show_verified_upgrade(self.user, self.enrollment, self.course)
@lazy
def date(self): # lint-amnesty, pylint: disable=invalid-overridden-method
if self.enrollment:
return self.enrollment.upgrade_deadline
else:
return None
@property
def date_type(self):
return 'verified-upgrade-deadline'
@property
def title(self):
dynamic_deadline = self._dynamic_deadline()
if dynamic_deadline is not None:
return _('Upgrade to Verified Certificate')
return _('Verification Upgrade Deadline')
def _dynamic_deadline(self):
if not self.enrollment:
return None
return self.enrollment.dynamic_upgrade_deadline
@property
def description(self):
dynamic_deadline = self._dynamic_deadline()
if dynamic_deadline is not None:
return _('Don\'t miss the opportunity to highlight your new knowledge and skills by earning a verified'
' certificate.')
return _('You are still eligible to upgrade to a Verified Certificate! '
'Pursue it to highlight the knowledge and skills you gain in this course.')
@property
def relative_datestring(self):
dynamic_deadline = self._dynamic_deadline()
if dynamic_deadline is None:
return super().relative_datestring
if self.date is None or self.deadline_has_passed():
return ' '
# Translators: This describes the time by which the user
# should upgrade to the verified track. 'date' will be
# their personalized verified upgrade deadline formatted
# according to their locale.
return _('by {date}')
def register_alerts(self, request, course):
"""
Registers an alert if the verification deadline is approaching.
"""
upgrade_price = get_cosmetic_verified_display_price(course)
if not UPGRADE_DEADLINE_MESSAGE.is_enabled(course.id) or not self.is_enabled or not upgrade_price:
return
days_left_to_upgrade = (self.date - self.current_time).days
if self.date > self.current_time and days_left_to_upgrade <= settings.COURSE_MESSAGE_ALERT_DURATION_IN_DAYS:
upgrade_message = _(
"Don't forget, you have {time_remaining_string} left to upgrade to a Verified Certificate."
).format(time_remaining_string=self.time_remaining_string)
if self._dynamic_deadline() is not None:
upgrade_message = _(
"Don't forget to upgrade to a verified certificate by {localized_date}."
).format(localized_date=date_format(self.date))
CourseHomeMessages.register_info_message(
request,
Text(_(
'In order to qualify for a certificate, you must meet all course grading '
'requirements, upgrade before the course deadline, and successfully verify '
'your identity on {platform_name} if you have not done so already.{button_panel}'
)).format(
platform_name=settings.PLATFORM_NAME,
button_panel=HTML(
'<div class="message-actions">'
'<a id="certificate_upsell" class="btn btn-upgrade"'
'data-creative="original_message" data-position="course_message"'
'href="{upgrade_url}">{upgrade_label}</a>'
'</div>'
).format(
upgrade_url=self.link,
upgrade_label=Text(_('Upgrade ({upgrade_price})')).format(upgrade_price=upgrade_price),
)
),
title=Text(upgrade_message)
)
class VerificationDeadlineDate(DateSummary):
"""
Displays the date by which the user must complete the verification
process.
"""
is_enabled = True
@property
def css_class(self):
base_state = 'verification-deadline'
if self.deadline_has_passed():
return base_state + '-passed'
elif self.must_retry():
return base_state + '-retry'
else:
return base_state + '-upcoming'
@property
def link_text(self):
return self.link_table[self.css_class][0]
@property
def link(self):
return self.link_table[self.css_class][1]
@property
def link_table(self):
"""Maps verification state to a tuple of link text and location."""
return {
'verification-deadline-passed': (_('Learn More'), ''),
'verification-deadline-retry': (
_('Retry Verification'),
IDVerificationService.get_verify_location(),
),
'verification-deadline-upcoming': (
_('Verify My Identity'),
IDVerificationService.get_verify_location(self.course_id),
)
}
@property
def title(self):
if self.deadline_has_passed():
return _('Missed Verification Deadline')
return _('Verification Deadline')
@property
def description(self):
if self.deadline_has_passed():
return _(
"Unfortunately you missed this course's deadline for"
" a successful verification."
)
return _(
"You must successfully complete verification before"
" this date to qualify for a Verified Certificate."
)
@lazy
def date(self): # lint-amnesty, pylint: disable=invalid-overridden-method
return VerificationDeadline.deadline_for_course(self.course_id)
@property
def date_type(self):
return 'verification-deadline-date'
@lazy
def is_allowed(self):
mode, is_active = CourseEnrollment.enrollment_mode_for_user(self.user, self.course_id)
return (
is_active and
mode == 'verified' and
self.verification_status in ('expired', 'none', 'must_reverify') and
not is_integrity_signature_enabled(self.course_id)
)
@lazy
def verification_status(self):
"""Return the verification status for this user."""
verification_status = IDVerificationService.user_status(self.user)
return verification_status['status']
def must_retry(self):
"""Return True if the user must re-submit verification, False otherwise."""
return self.verification_status == 'must_reverify'
|
arbrandes/edx-platform
|
lms/djangoapps/courseware/date_summary.py
|
Python
|
agpl-3.0
| 25,568
|
"""
Views file for the Darklang Django App
"""
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation import ugettext as _
from web_fragments.fragment import Fragment
from openedx.core.djangoapps.dark_lang import DARK_LANGUAGE_KEY
from openedx.core.djangoapps.dark_lang.models import DarkLangConfig
from openedx.core.djangoapps.plugin_api.views import EdxFragmentView
from openedx.core.djangoapps.user_api.preferences.api import delete_user_preference, set_user_preference
from openedx.core.djangoapps.util.user_messages import PageLevelMessages
LANGUAGE_INPUT_FIELD = 'preview_language'
class PreviewLanguageFragmentView(EdxFragmentView):
"""
View used when a user is attempting to change the preview language using Darklang.
Expected Behavior:
GET - returns a form for setting/resetting the user's dark language
POST - updates or clears the setting to the given dark language
"""
def render_to_fragment(self, request, course_id=None, **kwargs): # lint-amnesty, pylint: disable=arguments-differ, unused-argument
"""
Renders the language preview view as a fragment.
"""
html = render_to_string('dark_lang/preview-language-fragment.html', {})
return Fragment(html)
def create_base_standalone_context(self, request, fragment, **kwargs):
"""
Creates the base context for rendering a fragment as a standalone page.
"""
return {
'uses_bootstrap': True,
}
def standalone_page_title(self, request, fragment, **kwargs):
"""
Returns the page title for the standalone update page.
"""
return _('Preview Language Administration')
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
"""
Renders the fragment to control the preview language.
"""
if not self._user_can_preview_languages(request.user):
raise Http404
return super().get(request, *args, **kwargs)
@method_decorator(login_required)
def post(self, request, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
Accept requests to update the preview language.
"""
if not self._user_can_preview_languages(request.user):
raise Http404
action = request.POST.get('action', None)
if action == 'set_preview_language':
self._set_preview_language(request)
elif action == 'reset_preview_language':
self._clear_preview_language(request)
return redirect(request.path)
def _user_can_preview_languages(self, user):
"""
Returns true if the specified user can preview languages.
"""
if not DarkLangConfig.current().enabled:
return False
return user and not user.is_anonymous
def _set_preview_language(self, request):
"""
Sets the preview language for the current user.
"""
preview_language = request.POST.get(LANGUAGE_INPUT_FIELD, '')
if not preview_language.strip():
PageLevelMessages.register_error_message(request, _('Language not provided'))
return
set_user_preference(request.user, DARK_LANGUAGE_KEY, preview_language)
PageLevelMessages.register_success_message(
request,
_('Language set to {preview_language}').format(
preview_language=preview_language
)
)
def _clear_preview_language(self, request):
"""
Clears the preview language for the current user.
"""
delete_user_preference(request.user, DARK_LANGUAGE_KEY)
if LANGUAGE_SESSION_KEY in request.session:
del request.session[LANGUAGE_SESSION_KEY]
PageLevelMessages.register_success_message(
request,
_('Language reset to the default')
)
|
eduNEXT/edunext-platform
|
openedx/core/djangoapps/dark_lang/views.py
|
Python
|
agpl-3.0
| 4,169
|
#!/usr/bin/python2.4
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
VERSION = "1.122.2-38.2791"
|
marcellodesales/svnedge-console
|
ext/solaris/pkg-toolkit/pkg/vendor-packages/pkg/__init__.py
|
Python
|
agpl-3.0
| 947
|
from django.contrib import messages
from django.contrib.auth import logout
from django.shortcuts import render_to_response, render
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.forms import ValidationError
from django.core.urlresolvers import reverse
from allauth.utils import email_address_exists, get_user_model
from allauth.account.utils import (perform_login, complete_signup,
user_email, user_username)
from allauth.account import app_settings as account_settings
from allauth.account.adapter import get_adapter as get_account_adapter
from allauth.exceptions import ImmediateHttpResponse
from .models import SocialLogin
from . import app_settings
from . import signals
from .adapter import get_adapter
User = get_user_model()
def _process_signup(request, sociallogin):
# If email is specified, check for duplicate and if so, no auto signup.
auto_signup = app_settings.AUTO_SIGNUP
email = user_email(sociallogin.account.user)
if auto_signup:
# Let's check if auto_signup is really possible...
if email:
if account_settings.UNIQUE_EMAIL:
if email_address_exists(email):
# Oops, another user already has this address. We
# cannot simply connect this social account to the
# existing user. Reason is that the email adress may
# not be verified, meaning, the user may be a hacker
# that has added your email address to his account in
# the hope that you fall in his trap. We cannot check
# on 'email_address.verified' either, because
# 'email_address' is not guaranteed to be verified.
auto_signup = False
# FIXME: We redirect to signup form -- user will
# see email address conflict only after posting
# whereas we detected it here already.
elif app_settings.EMAIL_REQUIRED:
# Nope, email is required and we don't have it yet...
auto_signup = False
if not auto_signup:
request.session['socialaccount_sociallogin'] = sociallogin.serialize()
url = reverse('socialaccount_signup')
ret = HttpResponseRedirect(url)
else:
# Ok, auto signup it is, at least the e-mail address is ok.
# We still need to check the username though...
if account_settings.USER_MODEL_USERNAME_FIELD:
username = user_username(sociallogin.account.user)
try:
get_account_adapter().clean_username(username)
except ValidationError:
# This username is no good ...
user_username(sociallogin.account.user, '')
# FIXME: This part contains a lot of duplication of logic
# ("closed" rendering, create user, send email, in active
# etc..)
try:
if not get_adapter().is_open_for_signup(request,
sociallogin):
return render(request,
"account/signup_closed.html")
except ImmediateHttpResponse as e:
return e.response
get_adapter().save_user(request, sociallogin, form=None)
ret = complete_social_signup(request, sociallogin)
return ret
def _login_social_account(request, sociallogin):
return perform_login(request, sociallogin.account.user,
email_verification=app_settings.EMAIL_VERIFICATION,
redirect_url=sociallogin.get_redirect_url(request),
signal_kwargs={"sociallogin": sociallogin})
def render_authentication_error(request, extra_context={}):
return render_to_response(
"socialaccount/authentication_error.html",
extra_context, context_instance=RequestContext(request))
def _add_social_account(request, sociallogin):
if request.user.is_anonymous():
# This should not happen. Simply redirect to the connections
# view (which has a login required)
return HttpResponseRedirect(reverse('socialaccount_connections'))
level = messages.INFO
message = 'socialaccount/messages/account_connected.txt'
if sociallogin.is_existing:
if sociallogin.account.user != request.user:
# Social account of other user. For now, this scenario
# is not supported. Issue is that one cannot simply
# remove the social account from the other user, as
# that may render the account unusable.
level = messages.ERROR
message = 'socialaccount/messages/account_connected_other.txt'
else:
# This account is already connected -- let's play along
# and render the standard "account connected" message
# without actually doing anything.
pass
else:
# New account, let's connect
sociallogin.connect(request, request.user)
try:
signals.social_account_added.send(sender=SocialLogin,
request=request,
sociallogin=sociallogin)
except ImmediateHttpResponse as e:
return e.response
default_next = get_adapter() \
.get_connect_redirect_url(request,
sociallogin.account)
next_url = sociallogin.get_redirect_url(request) or default_next
get_account_adapter().add_message(request, level, message)
return HttpResponseRedirect(next_url)
def complete_social_login(request, sociallogin):
assert not sociallogin.is_existing
sociallogin.lookup()
try:
get_adapter().pre_social_login(request, sociallogin)
signals.pre_social_login.send(sender=SocialLogin,
request=request,
sociallogin=sociallogin)
except ImmediateHttpResponse as e:
return e.response
if sociallogin.state.get('process') == 'connect':
return _add_social_account(request, sociallogin)
else:
return _complete_social_login(request, sociallogin)
def _complete_social_login(request, sociallogin):
if request.user.is_authenticated():
logout(request)
if sociallogin.is_existing:
# Login existing user
ret = _login_social_account(request, sociallogin)
else:
# New social user
ret = _process_signup(request, sociallogin)
return ret
def complete_social_signup(request, sociallogin):
return complete_signup(request,
sociallogin.account.user,
app_settings.EMAIL_VERIFICATION,
sociallogin.get_redirect_url(request),
signal_kwargs={'sociallogin': sociallogin})
# TODO: Factor out callable importing functionality
# See: account.utils.user_display
def import_path(path):
modname, _, attr = path.rpartition('.')
m = __import__(modname, fromlist=[attr])
return getattr(m, attr)
|
City-of-Bloomington/green-rental
|
allauth/socialaccount/helpers.py
|
Python
|
agpl-3.0
| 7,201
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'MailFeedRuleLine'
db.delete_table(u'core_mailfeedruleline')
# Adding field 'MailFeedRule.header_field'
db.add_column(u'core_mailfeedrule', 'header_field',
self.gf('django.db.models.fields.CharField')(default=u'any', max_length=10),
keep_default=False)
# Adding field 'MailFeedRule.other_header'
db.add_column(u'core_mailfeedrule', 'other_header',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'MailFeedRule.match_type'
db.add_column(u'core_mailfeedrule', 'match_type',
self.gf('django.db.models.fields.CharField')(default=u'contains', max_length=10),
keep_default=False)
# Adding field 'MailFeedRule.match_value'
db.add_column(u'core_mailfeedrule', 'match_value',
self.gf('django.db.models.fields.TextField')(default=u''),
keep_default=False)
# Adding field 'MailFeedRule.match_action'
db.add_column(u'core_mailfeedrule', 'match_action',
self.gf('django.db.models.fields.CharField')(default=u'store', max_length=10),
keep_default=False)
# Adding field 'MailFeedRule.scrap_refine'
db.add_column(u'core_mailfeedrule', 'scrap_refine',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'MailFeedRule.scrap_adblock'
db.add_column(u'core_mailfeedrule', 'scrap_adblock',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'MailFeedRule.clone_of'
db.add_column(u'core_mailfeedrule', 'clone_of',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.MailFeedRule'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding model 'MailFeedRuleLine'
db.create_table(u'core_mailfeedruleline', (
('header_field', self.gf('django.db.models.fields.CharField')(default=u'any', max_length=10)),
('match_type', self.gf('django.db.models.fields.CharField')(default=u'contains', max_length=10)),
('match_value', self.gf('django.db.models.fields.TextField')(max_length=255)),
('rule', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.MailFeedRule'])),
('other_header', self.gf('django.db.models.fields.CharField')(max_length=255)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('core', ['MailFeedRuleLine'])
# Deleting field 'MailFeedRule.header_field'
db.delete_column(u'core_mailfeedrule', 'header_field')
# Deleting field 'MailFeedRule.other_header'
db.delete_column(u'core_mailfeedrule', 'other_header')
# Deleting field 'MailFeedRule.match_type'
db.delete_column(u'core_mailfeedrule', 'match_type')
# Deleting field 'MailFeedRule.match_value'
db.delete_column(u'core_mailfeedrule', 'match_value')
# Deleting field 'MailFeedRule.match_action'
db.delete_column(u'core_mailfeedrule', 'match_action')
# Deleting field 'MailFeedRule.scrap_refine'
db.delete_column(u'core_mailfeedrule', 'scrap_refine')
# Deleting field 'MailFeedRule.scrap_adblock'
db.delete_column(u'core_mailfeedrule', 'scrap_adblock')
# Deleting field 'MailFeedRule.clone_of'
db.delete_column(u'core_mailfeedrule', 'clone_of_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.user': {
'Meta': {'object_name': 'User'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'email_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hash_codes': ('jsonfield.fields.JSONField', [], {'default': "{'unsubscribe': '85bc89526f9b4cc58774cb225761e755'}", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'register_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sent_emails': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.corepermissions': {
'Meta': {'object_name': 'CorePermissions'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.helpcontent': {
'Meta': {'ordering': "['ordering', 'id']", 'object_name': 'HelpContent'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_en': ('django.db.models.fields.TextField', [], {}),
'content_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'core.mailaccount': {
'Meta': {'unique_together': "(('user', 'hostname', 'username'),)", 'object_name': 'MailAccount'},
'conn_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_conn': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2007, 1, 1, 0, 0)'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_usable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'use_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'core.mailfeed': {
'Meta': {'object_name': 'MailFeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.mailfeedrule': {
'Meta': {'object_name': 'MailFeedRule'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailAccount']", 'null': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'header_field': ('django.db.models.fields.CharField', [], {'default': "u'any'", 'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailfeed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeed']"}),
'match_action': ('django.db.models.fields.CharField', [], {'default': "u'store'", 'max_length': '10'}),
'match_type': ('django.db.models.fields.CharField', [], {'default': "u'contains'", 'max_length': '10'}),
'match_value': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'other_header': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'scrap_adblock': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrap_refine': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core']
|
WillianPaiva/1flow
|
oneflow/core/migrations/0012_auto__del_mailfeedruleline__add_field_mailfeedrule_header_field__add_f.py
|
Python
|
agpl-3.0
| 12,382
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'AlertTypes'
db.delete_table('frontend_alerttypes')
# Deleting model 'Alerts'
db.delete_table('frontend_alerts')
# Removing M2M table for field alert_types on 'UserProfile'
db.delete_table('frontend_userprofile_alert_types')
def backwards(self, orm):
# Adding model 'AlertTypes'
db.create_table('frontend_alerttypes', (
('name', self.gf('django.db.models.fields.CharField')(max_length=100, unique=True, blank=True)),
('applies_to', self.gf('django.db.models.fields.CharField')(max_length=100)),
('label', self.gf('django.db.models.fields.CharField')(max_length=500, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('frontend', ['AlertTypes'])
# Adding model 'Alerts'
db.create_table('frontend_alerts', (
('event_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='event_alerts_set', null=True, to=orm['contenttypes.ContentType'])),
('datetime', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('meta', self.gf('django.db.models.fields.CharField')(max_length=1000, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('historicalgroup', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('event_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('message_level', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('message_type', self.gf('django.db.models.fields.CharField')(max_length=100)),
('message_value', self.gf('django.db.models.fields.CharField')(max_length=5000, null=True, blank=True)),
))
db.send_create_signal('frontend', ['Alerts'])
# Adding M2M table for field alert_types on 'UserProfile'
db.create_table('frontend_userprofile_alert_types', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('userprofile', models.ForeignKey(orm['frontend.userprofile'], null=False)),
('alerttypes', models.ForeignKey(orm['frontend.alerttypes'], null=False))
))
db.create_unique('frontend_userprofile_alert_types', ['userprofile_id', 'alerttypes_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'frontend.dataenquiry': {
'Meta': {'object_name': 'DataEnquiry'},
'application': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'broadcast': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'columns': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'date_of_enquiry': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'urls': ('django.db.models.fields.TextField', [], {}),
'visualisation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'frontend.message': {
'Meta': {'object_name': 'Message'},
'finish': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'frontend.userprofile': {
'Meta': {'ordering': "('-created_at',)", 'object_name': 'UserProfile'},
'alert_frequency': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'alerts_last_sent': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'beta_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'frontend.usertouserrole': {
'Meta': {'object_name': 'UserToUserRole'},
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_user'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['frontend']
|
lkundrak/scraperwiki
|
web/frontend/migrations/0021_remove_old_alerts_models.py
|
Python
|
agpl-3.0
| 9,945
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.utils import flt, cstr
@frappe.whitelist()
def enroll_student(source_name):
"""Creates a Student Record and returns a Program Enrollment.
:param source_name: Student Applicant.
"""
student = get_mapped_doc("Student Applicant", source_name,
{"Student Applicant": {
"doctype": "Student",
"field_map": {
"name": "student_applicant"
}
}}, ignore_permissions=True)
student.save()
program_enrollment = frappe.new_doc("Program Enrollment")
program_enrollment.student = student.name
program_enrollment.student_name = student.title
program_enrollment.program = frappe.db.get_value("Student Applicant", source_name, "program")
return program_enrollment
@frappe.whitelist()
def check_attendance_records_exist(course_schedule=None, student_batch=None, date=None):
"""Check if Attendance Records are made against the specified Course Schedule or Student Batch for given date.
:param course_schedule: Course Schedule.
:param student_batch: Student Batch.
:param date: Date.
"""
if course_schedule:
return frappe.get_list("Student Attendance", filters={"course_schedule": course_schedule})
else:
return frappe.get_list("Student Attendance", filters={"student_batch": student_batch, "date": date})
@frappe.whitelist()
def mark_attendance(students_present, students_absent, course_schedule=None, student_batch=None, date=None):
"""Creates Multiple Attendance Records.
:param students_present: Students Present JSON.
:param students_absent: Students Absent JSON.
:param course_schedule: Course Schedule.
:param student_batch: Student Batch.
:param date: Date.
"""
present = json.loads(students_present)
absent = json.loads(students_absent)
for d in present:
make_attendance_records(d["student"], d["student_name"], "Present", course_schedule, student_batch, date)
for d in absent:
make_attendance_records(d["student"], d["student_name"], "Absent", course_schedule, student_batch, date)
frappe.db.commit()
frappe.msgprint(_("Attendance has been marked successfully."))
def make_attendance_records(student, student_name, status, course_schedule=None, student_batch=None, date=None):
"""Creates Attendance Record.
:param student: Student.
:param student_name: Student Name.
:param course_schedule: Course Schedule.
:param status: Status (Present/Absent)
"""
student_attendance = frappe.new_doc("Student Attendance")
student_attendance.student = student
student_attendance.student_name = student_name
student_attendance.course_schedule = course_schedule
student_attendance.student_batch = student_batch
student_attendance.date = date
student_attendance.status = status
student_attendance.submit()
@frappe.whitelist()
def get_student_batch_students(student_batch):
"""Returns List of student, student_name, idx in Student Batch.
:param student_batch: Student Batch.
"""
students = frappe.get_list("Student Batch Student", fields=["student", "student_name", "idx"] ,
filters={"parent": student_batch, "active": 1}, order_by= "idx")
return students
@frappe.whitelist()
def get_student_group_students(student_group):
"""Returns List of student, student_name in Student Group.
:param student_group: Student Group.
"""
students = frappe.get_list("Student Group Student", fields=["student", "student_name"] ,
filters={"parent": student_group, "active": 1}, order_by= "idx")
return students
@frappe.whitelist()
def get_fee_structure(program, academic_term=None):
"""Returns Fee Structure.
:param program: Program.
:param academic_term: Academic Term.
"""
fee_structure = frappe.db.get_values("Fee Structure", {"program": program,
"academic_term": academic_term}, 'name', as_dict=True)
return fee_structure[0].name if fee_structure else None
@frappe.whitelist()
def get_fee_components(fee_structure):
"""Returns Fee Components.
:param fee_structure: Fee Structure.
"""
if fee_structure:
fs = frappe.get_list("Fee Component", fields=["fees_category", "amount"] , filters={"parent": fee_structure}, order_by= "idx")
return fs
@frappe.whitelist()
def get_fee_schedule(program, student_category=None):
"""Returns Fee Schedule.
:param program: Program.
:param student_category: Student Category
"""
fs = frappe.get_list("Program Fee", fields=["academic_term", "fee_structure", "due_date", "amount"] ,
filters={"parent": program, "student_category": student_category }, order_by= "idx")
return fs
@frappe.whitelist()
def collect_fees(fees, amt):
paid_amount = flt(amt) + flt(frappe.db.get_value("Fees", fees, "paid_amount"))
total_amount = flt(frappe.db.get_value("Fees", fees, "total_amount"))
frappe.db.set_value("Fees", fees, "paid_amount", paid_amount)
frappe.db.set_value("Fees", fees, "outstanding_amount", (total_amount - paid_amount))
return paid_amount
@frappe.whitelist()
def get_course_schedule_events(start, end, filters=None):
"""Returns events for Course Schedule Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Course Schedule", filters)
data = frappe.db.sql("""select name, course,
timestamp(schedule_date, from_time) as from_datetime,
timestamp(schedule_date, to_time) as to_datetime,
room, student_group, 0 as 'allDay'
from `tabCourse Schedule`
where ( schedule_date between %(start)s and %(end)s )
{conditions}""".format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return data
@frappe.whitelist()
def get_assessment_criteria(course):
"""Returns Assessmemt Criteria and their Weightage from Course Master.
:param Course: Course
"""
return frappe.get_list("Course Assessment Criteria", \
fields=["assessment_criteria", "weightage"], filters={"parent": course}, order_by= "idx")
@frappe.whitelist()
def get_assessment_students(assessment_plan, student_group=None, student_batch=None):
student_list = []
if student_group:
student_list = get_student_group_students(student_group)
elif student_batch:
student_list = get_student_batch_students(student_batch)
for i, student in enumerate(student_list):
result = get_result(student.student, assessment_plan)
if result:
student_result = {}
for d in result.details:
student_result.update({d.assessment_criteria: cstr(d.score) + " ("+ d.grade + ")"})
student_result.update({"total_score": cstr(result.total_score) + " (" + result.grade + ")"})
student.update({'assessment_details': student_result})
else:
student.update({'assessment_details': None})
return student_list
@frappe.whitelist()
def get_assessment_details(assessment_plan):
"""Returns Assessment Criteria and Maximum Score from Assessment Plan Master.
:param Assessment Plan: Assessment Plan
"""
return frappe.get_list("Assessment Plan Criteria", \
fields=["assessment_criteria", "maximum_score"], filters={"parent": assessment_plan}, order_by= "idx")
@frappe.whitelist()
def get_result(student, assessment_plan):
"""Returns Submitted Result of given student for specified Assessment Plan
:param Student: Student
:param Assessment Plan: Assessment Plan
"""
results = frappe.get_all("Assessment Result", filters={"student": student, "assessment_plan": assessment_plan, "docstatus": 1})
if results:
return frappe.get_doc("Assessment Result", results[0])
else:
return None
@frappe.whitelist()
def get_grade(grading_scale, percentage):
"""Returns Grade based on the Grading Scale and Score.
:param Grading Scale: Grading Scale
:param Percentage: Score Percentage Percentage
"""
grading_scale_intervals = {}
for d in frappe.get_all("Grading Scale Interval", fields=["grade_code", "threshold"], filters={"parent": grading_scale}):
grading_scale_intervals.update({d.threshold:d.grade_code})
intervals = sorted(grading_scale_intervals.keys(), key=float, reverse=True)
for interval in intervals:
if flt(percentage) >= interval:
grade = grading_scale_intervals.get(interval)
break
else:
grade = ""
return grade
@frappe.whitelist()
def mark_assessment_result(student, assessment_plan, scores):
student_score = json.loads(scores)
details = []
for s in student_score.keys():
details.append({
"assessment_criteria": s,
"score": flt(student_score[s])
})
assessment_result = frappe.new_doc("Assessment Result")
assessment_result.update({
"student": student,
"student_name": frappe.db.get_value("Student", student, "title"),
"assessment_plan": assessment_plan,
"details": details
})
assessment_result.save()
assessment_result.submit()
return assessment_result
|
njmube/erpnext
|
erpnext/schools/api.py
|
Python
|
agpl-3.0
| 8,908
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('predict', '0008_auto_20170214_0945'),
]
operations = [
migrations.AlterField(
model_name='predictstrain',
name='file_one',
field=models.ForeignKey(related_name='link_a', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='uploads.UploadFile', null=True),
),
migrations.AlterField(
model_name='predictstrain',
name='file_two',
field=models.ForeignKey(related_name='link_b', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='uploads.UploadFile', null=True),
),
migrations.AlterField(
model_name='predictstrain',
name='piperun',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='pipeline.PipelineRun', null=True),
),
]
|
IQSS/gentb-site
|
apps/predict/migrations/0009_auto_20170305_1442.py
|
Python
|
agpl-3.0
| 1,055
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0015_merge'),
]
operations = [
migrations.AlterField(
model_name='areasoltura',
name='distancia',
field=models.FloatField(null=True, verbose_name='Distância até o CETAS mais próximo', blank=True),
),
migrations.AlterField(
model_name='asv',
name='area_ha',
field=models.FloatField(null=True, verbose_name='Área da Propriedade (ha)', blank=True),
),
]
|
igor-rodrigues01/casv
|
casv/core/migrations/0016_auto_20151201_1703.py
|
Python
|
agpl-3.0
| 659
|
#############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class NetlibLapack(Package):
"""LAPACK version 3.X is a comprehensive FORTRAN library that does
linear algebra operations including matrix inversions, least squared
solutions to linear sets of equations, eigenvector analysis, singular
value decomposition, etc. It is a very comprehensive and reputable
package that has found extensive use in the scientific community.
"""
homepage = "http://www.netlib.org/lapack/"
url = "http://www.netlib.org/lapack/lapack-3.5.0.tgz"
version('3.8.0', '96591affdbf58c450d45c1daa540dbd2',
url='http://www.netlib.org/lapack/lapack-3.8.0.tar.gz')
version('3.7.1', 'dcdeeed73de152c4643ccc5b1aeb453c')
version('3.7.0', '697bb8d67c7d336a0f339cc9dd0fa72f')
version('3.6.1', '421b2cb72e15f237e144428f9c460ee0')
version('3.6.0', 'f2f6c67134e851fe189bb3ca1fbb5101')
version('3.5.0', 'b1d3e3e425b2e44a06760ff173104bdf')
version('3.4.2', '61bf1a8a4469d4bdb7604f5897179478')
version('3.4.1', '44c3869c38c8335c2b9c2a8bb276eb55')
version('3.4.0', '02d5706ec03ba885fc246e5fa10d8c70')
version('3.3.1', 'd0d533ec9a5b74933c2a1e84eedc58b4')
variant('debug', default=False,
description='Activates the Debug build type')
variant('shared', default=True, description="Build shared library version")
variant('external-blas', default=False,
description='Build lapack with an external blas')
variant('lapacke', default=True,
description='Activates the build of the LAPACKE C interface')
variant('xblas', default=False,
description='Builds extended precision routines using XBLAS')
patch('ibm-xl.patch', when='@3.7: %xl')
patch('ibm-xl.patch', when='@3.7: %xl_r')
# virtual dependency
provides('blas', when='~external-blas')
provides('lapack')
depends_on('cmake', type='build')
depends_on('blas', when='+external-blas')
depends_on('netlib-xblas+fortran+plain_blas', when='+xblas')
def patch(self):
# Fix cblas CMakeLists.txt -- has wrong case for subdirectory name.
if self.spec.satisfies('@3.6.0:'):
filter_file(
'${CMAKE_CURRENT_SOURCE_DIR}/CMAKE/',
'${CMAKE_CURRENT_SOURCE_DIR}/cmake/',
'CBLAS/CMakeLists.txt', string=True)
@property
def blas_libs(self):
shared = True if '+shared' in self.spec else False
query_parameters = self.spec.last_query.extra_parameters
query2libraries = {
tuple(): ['libblas'],
('c', 'fortran'): [
'libcblas',
'libblas',
],
('c',): [
'libcblas',
],
('fortran',): [
'libblas',
]
}
key = tuple(sorted(query_parameters))
libraries = query2libraries[key]
return find_libraries(
libraries, root=self.prefix, shared=shared, recursive=True
)
@property
def lapack_libs(self):
shared = True if '+shared' in self.spec else False
query_parameters = self.spec.last_query.extra_parameters
query2libraries = {
tuple(): ['liblapack'],
('c', 'fortran'): [
'liblapacke',
'liblapack',
],
('c',): [
'liblapacke',
],
('fortran',): [
'liblapack',
]
}
key = tuple(sorted(query_parameters))
libraries = query2libraries[key]
return find_libraries(
libraries, root=self.prefix, shared=shared, recursive=True
)
@property
def headers(self):
include_dir = self.spec.prefix.include
cblas_h = join_path(include_dir, 'cblas.h')
lapacke_h = join_path(include_dir, 'lapacke.h')
return HeaderList([cblas_h, lapacke_h])
def install_one(self, spec, prefix, shared):
cmake_args = [
'-DBUILD_SHARED_LIBS:BOOL=%s' % ('ON' if shared else 'OFF'),
'-DCMAKE_BUILD_TYPE:STRING=%s' % (
'Debug' if '+debug' in spec else 'Release'),
'-DLAPACKE:BOOL=%s' % (
'ON' if '+lapacke' in spec else 'OFF'),
'-DLAPACKE_WITH_TMG:BOOL=%s' % (
'ON' if '+lapacke' in spec else 'OFF')]
if spec.satisfies('@3.6.0:'):
cmake_args.extend(['-DCBLAS=ON']) # always build CBLAS
if self.compiler.name == 'intel':
# Intel compiler finds serious syntax issues when trying to
# build CBLAS and LapackE
cmake_args.extend(['-DCBLAS=OFF'])
cmake_args.extend(['-DLAPACKE:BOOL=OFF'])
if self.compiler.name == 'xl' or self.compiler.name == 'xl_r':
# use F77 compiler if IBM XL
cmake_args.extend([
'-DCMAKE_Fortran_COMPILER=%s' % self.compiler.f77,
'-DCMAKE_Fortran_FLAGS=%s' % (
' '.join(self.spec.compiler_flags['fflags'])),
])
# deprecated routines are commonly needed by, for example, suitesparse
# Note that OpenBLAS spack is built with deprecated routines
cmake_args.extend(['-DBUILD_DEPRECATED:BOOL=ON'])
if '+external-blas' in spec:
cmake_args.extend([
'-DUSE_OPTIMIZED_BLAS:BOOL=ON',
'-DBLAS_LIBRARIES:PATH=%s' % spec['blas'].libs.joined(';')
])
if spec.satisfies('+xblas'):
xblas_include_dir = spec['netlib-xblas'].prefix.include
xblas_library = spec['netlib-xblas'].libs.joined(';')
cmake_args.extend([
'-DXBLAS_INCLUDE_DIR={0}'.format(xblas_include_dir),
'-DXBLAS_LIBRARY={0}'.format(xblas_library)])
cmake_args.extend(std_cmake_args)
build_dir = 'spack-build' + ('-shared' if shared else '-static')
with working_dir(build_dir, create=True):
cmake('..', *cmake_args)
make()
make("install")
def install(self, spec, prefix):
# Always build static libraries.
self.install_one(spec, prefix, False)
# Build shared libraries if requested.
if '+shared' in spec:
self.install_one(spec, prefix, True)
|
tmerrick1/spack
|
var/spack/repos/builtin/packages/netlib-lapack/package.py
|
Python
|
lgpl-2.1
| 7,557
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class G4ndl(Package):
"""Geant4 Neutron data files with thermal cross sections """
homepage = "http://geant4.web.cern.ch"
url = "http://geant4-data.web.cern.ch/geant4-data/datasets/G4NDL.4.5.tar.gz"
tags = ['hep']
maintainers = ['drbenmorgan']
version('4.6', sha256='9d287cf2ae0fb887a2adce801ee74fb9be21b0d166dab49bcbee9408a5145408')
version('4.5', sha256='cba928a520a788f2bc8229c7ef57f83d0934bb0c6a18c31ef05ef4865edcdf8e')
def install(self, spec, prefix):
mkdirp(join_path(prefix.share, 'data'))
install_path = join_path(prefix.share, 'data', 'G4NDL{0}'
.format(self.version))
install_tree(self.stage.source_path, install_path)
def setup_dependent_run_environment(self, env, dependent_spec):
install_path = join_path(self.prefix.share, 'data', 'G4NDL{0}'
.format(self.version))
env.set('G4NEUTRONHPDATA', install_path)
def url_for_version(self, version):
"""Handle version string."""
return ("http://geant4-data.web.cern.ch/geant4-data/datasets/G4NDL.%s.tar.gz" % version)
|
iulian787/spack
|
var/spack/repos/builtin/packages/g4ndl/package.py
|
Python
|
lgpl-2.1
| 1,363
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('crackdown_elite_sand_trooper_hard')
mobileTemplate.setLevel(87)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("imperial")
mobileTemplate.setAssistRange(0)
mobileTemplate.setStalker(False)
mobileTemplate.setFaction("imperial")
mobileTemplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector()
templates.add('object/mobile/shared_dressed_stormtrooper_sandtrooper_m.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e11.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('elite_sandtrooper_commando_87', mobileTemplate)
return
|
ProjectSWGCore/NGECore2
|
scripts/mobiles/generic/faction/imperial/elite_sand_trooper_commando_87.py
|
Python
|
lgpl-3.0
| 1,453
|
# Launch it with py ../robotViewerLauncher.py +compensater.py +appli.py
import sys
import numpy as np
from numpy import *
from dynamic_graph import plug
import dynamic_graph.signal_base as dgsb
from dynamic_graph.sot.core import Stack_of_vector, MatrixHomoToPoseUTheta, OpPointModifier, Multiply_matrix_vector, MatrixHomoToPose, Selec_of_vector, Inverse_of_matrixHomo, Multiply_of_matrixHomo, MatrixHomoToPoseRollPitchYaw
from dynamic_graph.sot.application.state_observation import DGIMUModelBaseFlexEstimation, PositionStateReconstructor, Odometry, Filter, EstimatorInterface, DriftFromMocap
from dynamic_graph.sot.core.derivator import Derivator_of_Vector
from dynamic_graph.sot.core.matrix_util import matrixToTuple
from dynamic_graph.sot.application.state_observation import Calibrate
from dynamic_graph.sot.hrp2.dynamic_hrp2_14 import DynamicHrp2_14
from dynamic_graph.sot.core.feature_position import FeaturePosition
from dynamic_graph.sot.core import Task, FeatureGeneric
from dynamic_graph.sot.core import GainAdaptive
class HRP2ModelBaseFlexEstimatorIMUForceEncoders(DGIMUModelBaseFlexEstimation):
def __init__(self, robot, name='flextimatorEncoders'):
DGIMUModelBaseFlexEstimation.__init__(self,name)
self.setSamplingPeriod(0.005)
self.robot = robot
# Covariances
self.setProcessNoiseCovariance(matrixToTuple(np.diag((1e-8,)*12+(1e-4,)*3+(1e-4,)*3+(1e-4,)*3+(1e-4,)*3+(1.e-2,)*6+(1e-15,)*2+(1.e-8,)*3)))
self.setMeasurementNoiseCovariance(matrixToTuple(np.diag((1e-3,)*3+(1e-6,)*3)))
self.setUnmodeledForceVariance(1e-13)
self.setForceVariance(1e-4)
self.setAbsolutePosVariance(1e-4)
# Contact model definition
self.setContactModel(1)
self.setKfe(matrixToTuple(np.diag((40000,40000,40000))))
self.setKfv(matrixToTuple(np.diag((600,600,600))))
self.setKte(matrixToTuple(np.diag((600,600,600))))
self.setKtv(matrixToTuple(np.diag((60,60,60))))
#Estimator interface
self.interface=EstimatorInterface(name+"EstimatorInterface")
self.interface.setLeftHandSensorTransformation((0.,0.,1.57))
self.interface.setRightHandSensorTransformation((0.,0.,1.57))
self.interface.setFDInertiaDot(True)
# State and measurement definition
self.interface.setWithUnmodeledMeasurements(False)
self.interface.setWithModeledForces(True)
self.interface.setWithAbsolutePose(False)
self.setWithComBias(False)
# Contacts forces
plug (self.robot.device.forceLLEG,self.interface.force_lf)
plug (self.robot.device.forceRLEG,self.interface.force_rf)
plug (self.robot.device.forceLARM,self.interface.force_lh)
plug (self.robot.device.forceRARM,self.interface.force_rh)
# Selecting robotState
self.robot.device.robotState.value=46*(0.,)
self.robotState = Selec_of_vector('robotState')
plug(self.robot.device.robotState,self.robotState.sin)
self.robotState.selec(0,36)
# Reconstruction of the position of the free flyer from encoders
# Create dynamic with the free flyer at the origin of the control frame
self.robot.dynamicOdo=self.createDynamic(self.robotState.sout,'_dynamicOdo')
self.robot.dynamicOdo.inertia.recompute(1)
self.robot.dynamicOdo.waist.recompute(1)
# Reconstruction of the position of the contacts in dynamicOdo
self.leftFootPosOdo=Multiply_of_matrixHomo(name+"leftFootPosOdo")
plug(self.robot.dynamicOdo.signal('left-ankle'),self.leftFootPosOdo.sin1)
self.leftFootPosOdo.sin2.value=self.robot.forceSensorInLeftAnkle
self.rightFootPosOdo=Multiply_of_matrixHomo(name+"rightFootPosOdo")
plug(self.robot.dynamicOdo.signal('right-ankle'),self.rightFootPosOdo.sin1)
self.rightFootPosOdo.sin2.value=self.robot.forceSensorInRightAnkle
# Odometry
self.odometry=Odometry (name+'odometry')
plug (self.robot.frames['leftFootForceSensor'].position,self.odometry.leftFootPositionRef)
plug (self.robot.frames['rightFootForceSensor'].position,self.odometry.rightFootPositionRef)
plug (self.rightFootPosOdo.sout,self.odometry.rightFootPositionIn)
plug (self.leftFootPosOdo.sout,self.odometry.leftFootPositionIn)
plug (self.robot.device.forceLLEG,self.odometry.force_lf)
plug (self.robot.device.forceRLEG,self.odometry.force_rf)
self.odometry.setLeftFootPosition(self.robot.frames['leftFootForceSensor'].position.value)
self.odometry.setRightFootPosition(self.robot.frames['rightFootForceSensor'].position.value)
plug(self.interface.stackOfSupportContacts,self.odometry.stackOfSupportContacts)
# Create dynamicEncoders
self.robotStateWoFF = Selec_of_vector('robotStateWoFF')
plug(self.robot.device.robotState,self.robotStateWoFF.sin)
self.robotStateWoFF.selec(6,36)
self.stateEncoders = Stack_of_vector (name+'stateEncoders')
plug(self.odometry.freeFlyer,self.stateEncoders.sin1)
plug(self.robotStateWoFF.sout,self.stateEncoders.sin2)
self.stateEncoders.selec1 (0, 6)
self.stateEncoders.selec2 (0, 30)
self.robot.dynamicEncoders=self.createDynamic(self.stateEncoders.sout,'_dynamicEncoders')
# self.robot.dynamicEncoders=self.createDynamic(self.robotState.sout,'_dynamicEncoders')
# plug(self.odometry.freeFlyer,self.robot.dynamicEncoders.ffposition)
# self.robot.dynamicEncoders=self.createDynamic(self.robot.device.state,'_dynamicEncoders')
# Reconstruction of the position of the contacts in dynamicEncoders
self.leftFootPos=Multiply_of_matrixHomo("leftFootPos")
plug(self.robot.dynamicEncoders.signal('left-ankle'),self.leftFootPos.sin1)
self.leftFootPos.sin2.value=self.robot.forceSensorInLeftAnkle
self.rightFootPos=Multiply_of_matrixHomo("rightFootPos")
plug(self.robot.dynamicEncoders.signal('right-ankle'),self.rightFootPos.sin1)
self.rightFootPos.sin2.value=self.robot.forceSensorInRightAnkle
# Contacts velocities
self.leftFootVelocity = Multiply_matrix_vector ('leftFootVelocity')
plug(self.robot.frames['leftFootForceSensor'].jacobian,self.leftFootVelocity.sin1)
plug(self.robot.dynamicEncoders.velocity,self.leftFootVelocity.sin2)
self.rightFootVelocity = Multiply_matrix_vector ('rightFootVelocity')
plug(self.robot.frames['rightFootForceSensor'].jacobian,self.rightFootVelocity.sin1)
plug(self.robot.dynamicEncoders.velocity,self.rightFootVelocity.sin2)
# Contacts positions and velocities
plug (self.leftFootPos.sout,self.interface.position_lf)
plug (self.rightFootPos.sout,self.interface.position_rf)
plug (self.leftFootVelocity.sout,self.interface.velocity_lf)
plug (self.rightFootVelocity.sout,self.interface.velocity_rf)
plug (self.robot.dynamicEncoders.signal('right-wrist'),self.interface.position_lh)
plug (self.robot.dynamicEncoders.signal('left-wrist'),self.interface.position_rh)
# Compute contacts number
plug (self.interface.supportContactsNbr,self.contactNbr)
# Contacts model and config
plug(self.interface.contactsModel,self.contactsModel)
self.setWithConfigSignal(True)
plug(self.interface.config,self.config)
# Drift
self.drift = DriftFromMocap(name+'Drift')
# Compute measurement vector
plug(self.robot.device.accelerometer,self.interface.accelerometer)
plug(self.robot.device.gyrometer,self.interface.gyrometer)
plug(self.drift.driftVector,self.interface.drift)
plug(self.interface.measurement,self.measurement)
# Input reconstruction
# IMU Vector
# Creating an operational point for the IMU
self.robot.dynamicEncoders.createJacobian(name+'ChestJ_OpPoint','chest')
self.imuOpPoint = OpPointModifier(name+'IMU_oppoint')
self.imuOpPoint.setTransformation(matrixToTuple(np.linalg.inv(np.matrix(self.robot.dynamicEncoders.chest.value))*np.matrix(self.robot.frames['accelerometer'].position.value)))
self.imuOpPoint.setEndEffector(False)
plug (self.robot.dynamicEncoders.chest,self.imuOpPoint.positionIN)
plug (self.robot.dynamicEncoders.signal(name+'ChestJ_OpPoint'),self.imuOpPoint.jacobianIN)
# IMU position
self.PosAccelerometer=Multiply_of_matrixHomo(name+"PosAccelerometer")
plug(self.robot.dynamicEncoders.chest,self.PosAccelerometer.sin1)
self.PosAccelerometer.sin2.value=matrixToTuple(self.robot.accelerometerPosition)
self.inputPos = MatrixHomoToPoseUTheta(name+'InputPosition')
plug(self.PosAccelerometer.sout,self.inputPos.sin)
# IMU velocity
self.inputVel = Multiply_matrix_vector(name+'InputVelocity')
plug(self.imuOpPoint.jacobian,self.inputVel.sin1)
plug(self.robot.dynamicEncoders.velocity,self.inputVel.sin2)
# Concatenate
self.inputPosVel = Stack_of_vector (name+'InputPosVel')
plug(self.inputPos.sout,self.inputPosVel.sin1)
plug(self.inputVel.sout,self.inputPosVel.sin2)
self.inputPosVel.selec1 (0, 6)
self.inputPosVel.selec2 (0, 6)
# IMU Vector
self.IMUVector = PositionStateReconstructor (name+'EstimatorInput')
plug(self.inputPosVel.sout,self.IMUVector.sin)
self.IMUVector.inputFormat.value = '001111'
self.IMUVector.outputFormat.value = '011111'
self.IMUVector.setFiniteDifferencesInterval(2)
# CoM and derivatives
self.comIn=self.robot.dynamicEncoders.com
self.comVector = PositionStateReconstructor (name+'ComVector')
plug(self.comIn,self.comVector.sin)
self.comVector.inputFormat.value = '000001'
self.comVector.outputFormat.value = '010101'
self.comVector.setFiniteDifferencesInterval(20)
# Compute derivative of Angular Momentum
self.angMomDerivator = Derivator_of_Vector(name+'angMomDerivator')
plug(self.robot.dynamicEncoders.angularmomentum,self.angMomDerivator.sin)
self.angMomDerivator.dt.value = self.robot.timeStep
# Concatenate with interace estimator
plug(self.comVector.sout,self.interface.comVector)
plug(self.robot.dynamicEncoders.inertia,self.interface.inertia)
plug(self.robot.dynamicEncoders.angularmomentum,self.interface.angMomentum)
plug(self.angMomDerivator.sout,self.interface.dangMomentum)
self.interface.dinertia.value=(0,0,0,0,0,0)
plug(self.robot.dynamicEncoders.waist,self.interface.positionWaist)
plug(self.IMUVector.sout,self.interface.imuVector)
plug(self.interface.input,self.input)
self.robot.flextimator = self
# Create a dynamic ######################################
def createCenterOfMassFeatureAndTask(self,
dynamicTmp,
featureName, featureDesName,
taskName,
selec = '111',
ingain = 1.):
dynamicTmp.com.recompute(0)
dynamicTmp.Jcom.recompute(0)
featureCom = FeatureGeneric(featureName)
plug(dynamicTmp.com, featureCom.errorIN)
plug(dynamicTmp.Jcom, featureCom.jacobianIN)
featureCom.selec.value = selec
featureComDes = FeatureGeneric(featureDesName)
featureComDes.errorIN.value = dynamicTmp.com.value
featureCom.setReference(featureComDes.name)
taskCom = Task(taskName)
taskCom.add(featureName)
gainCom = GainAdaptive('gain'+taskName)
gainCom.setConstant(ingain)
plug(gainCom.gain, taskCom.controlGain)
plug(taskCom.error, gainCom.error)
return (featureCom, featureComDes, taskCom, gainCom)
def createOperationalPointFeatureAndTask(self,
dynamicTmp,
operationalPointName,
featureName,
taskName,
ingain = .2):
jacobianName = 'J{0}'.format(operationalPointName)
dynamicTmp.signal(operationalPointName).recompute(0)
dynamicTmp.signal(jacobianName).recompute(0)
feature = \
FeaturePosition(featureName,
dynamicTmp.signal(operationalPointName),
dynamicTmp.signal(jacobianName),
dynamicTmp.signal(operationalPointName).value)
task = Task(taskName)
task.add(featureName)
gain = GainAdaptive('gain'+taskName)
gain.setConstant(ingain)
plug(gain.gain, task.controlGain)
plug(task.error, gain.error)
return (feature, task, gain)
def createDynamic(self,state,name) :
# Create dynamic
self.dynamicTmp = self.robot.loadModelFromJrlDynamics(
self.robot.name + name,
self.robot.modelDir,
self.robot.modelName,
self.robot.specificitiesPath,
self.robot.jointRankPath,
DynamicHrp2_14)
self.dynamicTmp.dimension = self.dynamicTmp.getDimension()
if self.dynamicTmp.dimension != len(self.robot.halfSitting):
raise RuntimeError("Dimension of half-sitting: {0} differs from dimension of robot: {1}".format (len(self.halfSitting), self.dynamicTmp.dimension))
# Pluging position
plug(state, self.dynamicTmp.position)
self.derivative=True
# Pluging velocity
self.robot.enableVelocityDerivator = self.derivative
if self.robot.enableVelocityDerivator:
self.dynamicTmp.velocityDerivator = Derivator_of_Vector('velocityDerivator')
self.dynamicTmp.velocityDerivator.dt.value = self.robot.timeStep
plug(state, self.dynamicTmp.velocityDerivator.sin)
plug(self.dynamicTmp.velocityDerivator.sout, self.dynamicTmp.velocity)
else:
self.dynamicTmp.velocity.value = self.dynamicTmp.dimension*(0.,)
# Pluging acceleration
self.robot.enableAccelerationDerivator = self.derivative
if self.robot.enableAccelerationDerivator:
self.dynamicTmp.accelerationDerivator = Derivator_of_Vector('accelerationDerivator')
self.dynamicTmp.accelerationDerivator.dt.value = self.robot.timeStep
plug(self.dynamicTmp.velocityDerivator.sout, self.dynamicTmp.accelerationDerivator.sin)
plug(self.dynamicTmp.accelerationDerivator.sout, self.dynamicTmp.acceleration)
else:
self.dynamicTmp.acceleration.value = self.dynamicTmp.dimension*(0.,)
# # --- center of mass ------------
# (self.featureCom, self.featureComDes, self.taskCom, self.gainCom) = \
# self.createCenterOfMassFeatureAndTask\
# (self.dynamicTmp, '{0}_feature_com'.format(self.robot.name),
# '{0}_feature_ref_com'.format(self.robot.name),
# '{0}_task_com'.format(self.robot.name))
# --- operational points tasks -----
self.robot.features = dict()
self.robot.tasks = dict()
self.robot.gains = dict()
for op in self.robot.OperationalPoints:
opName= op + name
self.dynamicTmp.createOpPoint(op, op)
(self.robot.features[opName], self.robot.tasks[opName], self.robot.gains[opName]) = \
self.createOperationalPointFeatureAndTask(self.dynamicTmp, op,
'{0}_feature_{1}'.format(self.robot.name, opName),
'{0}_task_{1}'.format(self.robot.name, opName))
# define a member for each operational point
w = op.split('-')
memberName = w[0]
for i in w[1:]:
memberName += i.capitalize()
setattr(self, memberName, self.robot.features[opName])
# self.robot.tasks ['com'] = self.taskCom
# self.robot.features ['com'] = self.featureCom
# self.robot.gains['com'] = self.gainCom
self.robot.features['waist'+name].selec.value = '011100'
return self.dynamicTmp
|
amifsud/sot-state-observation
|
src/dynamic_graph/sot/application/state_observation/initializations/hrp2_model_base_flex_estimator_imu_force_encoders.py
|
Python
|
lgpl-3.0
| 15,688
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for the :class:`iris._data_manager.DataManager`.
"""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import copy
from unittest import mock
import numpy as np
import numpy.ma as ma
from iris._data_manager import DataManager
from iris._lazy_data import as_lazy_data
class Test___copy__(tests.IrisTest):
def test(self):
dm = DataManager(np.array(0))
emsg = "Shallow-copy of {!r} is not permitted."
name = type(dm).__name__
with self.assertRaisesRegex(copy.Error, emsg.format(name)):
copy.copy(dm)
class Test___deepcopy__(tests.IrisTest):
def test(self):
dm = DataManager(np.array(0))
method = "iris._data_manager.DataManager._deepcopy"
return_value = mock.sentinel.return_value
with mock.patch(method) as mocker:
mocker.return_value = return_value
result = copy.deepcopy(dm)
self.assertEqual(mocker.call_count, 1)
[args], kwargs = mocker.call_args
self.assertEqual(kwargs, dict())
self.assertEqual(len(args), 2)
expected = [return_value, [dm]]
for item in args.values():
self.assertIn(item, expected)
self.assertIs(result, return_value)
class Test___eq__(tests.IrisTest):
def setUp(self):
self.shape = (2, 3, 4)
self.size = np.prod(self.shape)
self.real_array = np.arange(self.size, dtype=float).reshape(self.shape)
def test_real_with_real(self):
dm1 = DataManager(self.real_array)
dm2 = DataManager(self.real_array.copy())
self.assertEqual(dm1, dm2)
def test_real_with_real_failure(self):
dm1 = DataManager(self.real_array)
dm2 = DataManager(np.ones(self.shape))
self.assertFalse(dm1 == dm2)
def test_real_with_real__dtype_failure(self):
dm1 = DataManager(self.real_array)
dm2 = DataManager(self.real_array.astype(int))
self.assertFalse(dm1 == dm2)
def test_real_with_lazy_failure(self):
dm1 = DataManager(self.real_array)
dm2 = DataManager(as_lazy_data(self.real_array))
self.assertFalse(dm1 == dm2)
self.assertFalse(dm2 == dm1)
def test_lazy_with_lazy(self):
dm1 = DataManager(as_lazy_data(self.real_array))
dm2 = DataManager(as_lazy_data(self.real_array))
self.assertEqual(dm1, dm2)
def test_lazy_with_lazy_failure(self):
dm1 = DataManager(as_lazy_data(self.real_array))
dm2 = DataManager(as_lazy_data(self.real_array) * 10)
self.assertFalse(dm1 == dm2)
def test_lazy_with_lazy__dtype_failure(self):
dm1 = DataManager(as_lazy_data(self.real_array))
dm2 = DataManager(as_lazy_data(self.real_array).astype(int))
self.assertFalse(dm1 == dm2)
def test_non_DataManager_failure(self):
dm = DataManager(np.array(0))
self.assertFalse(dm == 0)
class Test___ne__(tests.IrisTest):
def setUp(self):
self.shape = (2, 3, 4)
self.size = np.prod(self.shape)
self.real_array = np.arange(self.size, dtype=float).reshape(self.shape)
def test_real_with_real(self):
dm1 = DataManager(self.real_array)
dm2 = DataManager(np.ones(self.shape))
self.assertNotEqual(dm1, dm2)
def test_real_with_real_failure(self):
dm1 = DataManager(self.real_array)
dm2 = DataManager(self.real_array.copy())
self.assertFalse(dm1 != dm2)
def test_real_with_real__dtype(self):
dm1 = DataManager(self.real_array)
dm2 = DataManager(self.real_array.astype(int))
self.assertNotEqual(dm1, dm2)
def test_real_with_lazy(self):
dm1 = DataManager(self.real_array)
dm2 = DataManager(as_lazy_data(self.real_array))
self.assertNotEqual(dm1, dm2)
self.assertNotEqual(dm2, dm1)
def test_lazy_with_lazy(self):
dm1 = DataManager(as_lazy_data(self.real_array))
dm2 = DataManager(as_lazy_data(self.real_array) * 10)
self.assertNotEqual(dm1, dm2)
def test_lazy_with_lazy_failure(self):
dm1 = DataManager(as_lazy_data(self.real_array))
dm2 = DataManager(as_lazy_data(self.real_array))
self.assertFalse(dm1 != dm2)
def test_lazy_with_lazy__dtype(self):
dm1 = DataManager(as_lazy_data(self.real_array))
dm2 = DataManager(as_lazy_data(self.real_array).astype(int))
self.assertNotEqual(dm1, dm2)
def test_non_DataManager(self):
dm = DataManager(np.array(0))
self.assertNotEqual(dm, 0)
class Test___repr__(tests.IrisTest):
def setUp(self):
self.real_array = np.array(123)
masked_array = ma.array([0, 1], mask=[0, 1])
self.lazy_array = as_lazy_data(masked_array)
self.name = DataManager.__name__
def test_real(self):
dm = DataManager(self.real_array)
result = repr(dm)
expected = "{}({!r})".format(self.name, self.real_array)
self.assertEqual(result, expected)
def test_lazy(self):
dm = DataManager(self.lazy_array)
result = repr(dm)
expected = "{}({!r})".format(self.name, self.lazy_array)
self.assertEqual(result, expected)
class Test__assert_axioms(tests.IrisTest):
def setUp(self):
self.real_array = np.array(0)
self.lazy_array = as_lazy_data(self.real_array)
self.dm = DataManager(self.real_array)
def test_array_none(self):
self.dm._real_array = None
emsg = "Unexpected data state, got no lazy and no real data"
with self.assertRaisesRegex(AssertionError, emsg):
self.dm._assert_axioms()
def test_array_all(self):
self.dm._lazy_array = self.lazy_array
emsg = "Unexpected data state, got lazy and real data"
with self.assertRaisesRegex(AssertionError, emsg):
self.dm._assert_axioms()
class Test__deepcopy(tests.IrisTest):
def setUp(self):
self.shape = (2, 3, 4)
self.size = np.prod(self.shape)
self.real_array = np.arange(self.size, dtype=float).reshape(self.shape)
self.memo = dict()
def test_real(self):
dm = DataManager(self.real_array)
result = dm._deepcopy(self.memo)
self.assertEqual(dm, result)
def test_lazy(self):
dm = DataManager(as_lazy_data(self.real_array))
result = dm._deepcopy(self.memo)
self.assertEqual(dm, result)
def test_real_with_real(self):
dm = DataManager(self.real_array)
data = self.real_array.copy() * 10
result = dm._deepcopy(self.memo, data=data)
expected = DataManager(data)
self.assertEqual(result, expected)
self.assertIs(result._real_array, data)
def test_real_with_lazy(self):
dm = DataManager(self.real_array)
data = as_lazy_data(self.real_array) * 10
result = dm._deepcopy(self.memo, data=data)
expected = DataManager(data)
self.assertEqual(result, expected)
self.assertIs(result._lazy_array, data)
def test_lazy_with_real(self):
dm = DataManager(as_lazy_data(self.real_array))
data = self.real_array.copy() * 10
result = dm._deepcopy(self.memo, data=data)
expected = DataManager(data)
self.assertEqual(result, expected)
self.assertIs(result._real_array, data)
def test_lazy_with_lazy(self):
dm = DataManager(as_lazy_data(self.real_array))
data = as_lazy_data(self.real_array) * 10
result = dm._deepcopy(self.memo, data=data)
expected = DataManager(data)
self.assertEqual(result, expected)
self.assertIs(result._lazy_array, data)
def test_real_with_real_failure(self):
dm = DataManager(self.real_array)
emsg = "Cannot copy"
with self.assertRaisesRegex(ValueError, emsg):
dm._deepcopy(self.memo, data=np.array(0))
def test_real_with_lazy_failure(self):
dm = DataManager(self.real_array)
emsg = "Cannot copy"
with self.assertRaisesRegex(ValueError, emsg):
dm._deepcopy(self.memo, data=as_lazy_data(np.array(0)))
def test_lazy_with_real_failure(self):
dm = DataManager(as_lazy_data(self.real_array))
emsg = "Cannot copy"
with self.assertRaisesRegex(ValueError, emsg):
dm._deepcopy(self.memo, data=np.array(0))
def test_lazy_with_lazy_failure(self):
dm = DataManager(as_lazy_data(self.real_array))
emsg = "Cannot copy"
with self.assertRaisesRegex(ValueError, emsg):
dm._deepcopy(self.memo, data=as_lazy_data(np.array(0)))
class Test_data__getter(tests.IrisTest):
def setUp(self):
shape = (2, 3, 4)
size = np.prod(shape)
self.real_array = np.arange(size).reshape(shape)
self.lazy_array = as_lazy_data(self.real_array)
self.mask_array = ma.masked_array(self.real_array)
self.mask_array_masked = self.mask_array.copy()
self.mask_array_masked[0, 0, 0] = ma.masked
self.dtype = self.mask_array.dtype
self.fill_value = self.mask_array.fill_value
self.lazy_mask_array = as_lazy_data(self.mask_array)
self.lazy_mask_array_masked = as_lazy_data(self.mask_array_masked)
def test_with_real_array(self):
dm = DataManager(self.real_array)
self.assertFalse(dm.has_lazy_data())
result = dm.data
self.assertFalse(dm.has_lazy_data())
self.assertIs(result, self.real_array)
def test_with_lazy_array(self):
dm = DataManager(self.lazy_array)
self.assertTrue(dm.has_lazy_data())
result = dm.data
self.assertFalse(dm.has_lazy_data())
self.assertArrayEqual(result, self.real_array)
def test_with_lazy_mask_array__not_masked(self):
dm = DataManager(self.lazy_mask_array)
self.assertTrue(dm.has_lazy_data())
result = dm.data
self.assertFalse(dm.has_lazy_data())
self.assertIsInstance(result, np.core.ndarray)
self.assertEqual(dm.dtype, self.dtype)
self.assertEqual(result.fill_value, self.fill_value)
self.assertArrayEqual(result, self.real_array)
def test_with_lazy_mask_array__masked(self):
dm = DataManager(self.lazy_mask_array_masked)
self.assertTrue(dm.has_lazy_data())
result = dm.data
self.assertFalse(dm.has_lazy_data())
self.assertIsInstance(result, ma.MaskedArray)
self.assertEqual(dm.dtype, self.dtype)
self.assertEqual(result.fill_value, self.fill_value)
self.assertArrayEqual(result, self.mask_array_masked)
def test_with_real_masked_constant(self):
masked_data = ma.masked_array([666], mask=True, dtype=np.dtype("f8"))
masked_constant = masked_data[0]
dm = DataManager(masked_constant)
result = dm.data
self.assertFalse(dm.has_lazy_data())
self.assertIsInstance(result, ma.MaskedArray)
self.assertNotIsInstance(result, ma.core.MaskedConstant)
self.assertMaskedArrayEqual(result, masked_data)
def test_with_lazy_masked_constant(self):
masked_data = ma.masked_array([666], mask=True)
masked_constant = masked_data[0]
lazy_masked_constant = as_lazy_data(masked_constant)
dm = DataManager(lazy_masked_constant)
result = dm.data
self.assertFalse(dm.has_lazy_data())
self.assertIsInstance(result, ma.MaskedArray)
self.assertNotIsInstance(result, ma.core.MaskedConstant)
self.assertMaskedArrayEqual(result, masked_data)
class Test_data__setter(tests.IrisTest):
def test_zero_ndim_real_with_scalar_int(self):
value = 456
dm = DataManager(np.array(123))
self.assertFalse(dm.has_lazy_data())
dm.data = value
self.assertFalse(dm.has_lazy_data())
self.assertArrayEqual(dm.data, np.array(value))
def test_zero_ndim_real_with_scalar_float(self):
value = 456.0
dm = DataManager(np.array(123))
self.assertFalse(dm.has_lazy_data())
dm.data = value
self.assertFalse(dm.has_lazy_data())
self.assertArrayEqual(dm.data, np.array(value))
def test_zero_ndim_real_with_zero_ndim_real(self):
real_array = np.array(456)
dm = DataManager(np.array(123))
self.assertFalse(dm.has_lazy_data())
dm.data = real_array
self.assertFalse(dm.has_lazy_data())
self.assertArrayEqual(dm.data, real_array)
def test_zero_ndim_real_with_zero_ndim_lazy(self):
lazy_array = as_lazy_data(np.array(456))
dm = DataManager(np.array(123))
self.assertFalse(dm.has_lazy_data())
dm.data = lazy_array
self.assertTrue(dm.has_lazy_data())
self.assertArrayEqual(dm.data, lazy_array.compute())
def test_zero_ndim_lazy_with_zero_ndim_real(self):
real_array = np.array(456)
dm = DataManager(as_lazy_data(np.array(123)))
self.assertTrue(dm.has_lazy_data())
dm.data = real_array
self.assertFalse(dm.has_lazy_data())
self.assertArrayEqual(dm.data, real_array)
def test_zero_ndim_lazy_with_zero_ndim_lazy(self):
lazy_array = as_lazy_data(np.array(456))
dm = DataManager(as_lazy_data(np.array(123)))
self.assertTrue(dm.has_lazy_data())
dm.data = lazy_array
self.assertTrue(dm.has_lazy_data())
self.assertArrayEqual(dm.data, lazy_array.compute())
def test_zero_ndim_real_to_scalar_1d_real_promote(self):
real_array = np.array([456])
dm = DataManager(np.array(123))
self.assertFalse(dm.has_lazy_data())
dm.data = real_array
self.assertFalse(dm.has_lazy_data())
self.assertArrayEqual(dm.data, real_array)
def test_zero_ndim_real_to_scalar_1d_lazy_promote(self):
lazy_array = as_lazy_data(np.array([456]))
dm = DataManager(np.array(123))
self.assertFalse(dm.has_lazy_data())
dm.data = lazy_array
self.assertTrue(dm.has_lazy_data())
self.assertArrayEqual(dm.data, lazy_array.compute())
def test_zero_ndim_lazy_to_scalar_1d_real_promote(self):
real_array = np.array([456])
dm = DataManager(as_lazy_data(np.array(123)))
self.assertTrue(dm.has_lazy_data())
dm.data = real_array
self.assertFalse(dm.has_lazy_data())
self.assertArrayEqual(dm.data, real_array)
def test_zero_ndim_lazy_to_scalar_1d_lazy_promote(self):
lazy_array = as_lazy_data(np.array([456]))
dm = DataManager(as_lazy_data(np.array(123)))
self.assertTrue(dm.has_lazy_data())
dm.data = lazy_array
self.assertTrue(dm.has_lazy_data())
self.assertArrayEqual(dm.data, lazy_array.compute())
def test_scalar_1d_to_zero_ndim_fail(self):
dm = DataManager(np.array([123]))
emsg = r"Require data with shape \(1,\), got \(\)."
with self.assertRaisesRegex(ValueError, emsg):
dm.data = 456
def test_nd_real_to_nd_real(self):
shape = (2, 3, 4)
size = np.prod(shape)
real_array = np.arange(size).reshape(shape)
dm = DataManager(real_array * 10)
self.assertFalse(dm.has_lazy_data())
dm.data = real_array
self.assertFalse(dm.has_lazy_data())
self.assertArrayEqual(dm.data, real_array)
def test_nd_real_to_nd_lazy(self):
shape = (2, 3, 4)
size = np.prod(shape)
real_array = np.arange(size).reshape(shape)
lazy_array = as_lazy_data(real_array) * 10
dm = DataManager(real_array)
self.assertFalse(dm.has_lazy_data())
dm.data = lazy_array
self.assertTrue(dm.has_lazy_data())
self.assertArrayEqual(dm.data, lazy_array.compute())
def test_nd_lazy_to_nd_real(self):
shape = (2, 3, 4)
size = np.prod(shape)
real_array = np.arange(size).reshape(shape)
lazy_array = as_lazy_data(real_array)
dm = DataManager(lazy_array * 10)
self.assertTrue(dm.has_lazy_data())
dm.data = real_array
self.assertFalse(dm.has_lazy_data())
self.assertArrayEqual(dm.data, real_array)
def test_nd_lazy_to_nd_lazy(self):
shape = (2, 3, 4)
size = np.prod(shape)
real_array = np.arange(size).reshape(shape)
lazy_array = as_lazy_data(real_array)
dm = DataManager(lazy_array * 10)
self.assertTrue(dm.has_lazy_data())
dm.data = lazy_array
self.assertTrue(dm.has_lazy_data())
self.assertArrayEqual(dm.data, lazy_array.compute())
def test_coerce_to_ndarray(self):
shape = (2, 3)
size = np.prod(shape)
real_array = np.arange(size).reshape(shape)
matrix = np.matrix(real_array)
dm = DataManager(real_array)
dm.data = matrix
self.assertIsInstance(dm._real_array, np.core.ndarray)
self.assertIsInstance(dm.data, np.core.ndarray)
self.assertArrayEqual(dm.data, real_array)
def test_real_masked_constant_to_array(self):
masked_data = ma.masked_array([666], mask=True, dtype=np.dtype("f8"))
masked_constant = masked_data[0]
dm = DataManager(masked_constant)
self.assertIsInstance(dm._real_array, ma.MaskedArray)
self.assertNotIsInstance(dm._real_array, ma.core.MaskedConstant)
self.assertIsInstance(dm.data, ma.MaskedArray)
self.assertNotIsInstance(dm.data, ma.core.MaskedConstant)
self.assertMaskedArrayEqual(dm.data, masked_data)
class Test_dtype(tests.IrisTest):
def setUp(self):
self.real_array = np.array(0, dtype=np.dtype("int64"))
self.lazy_array = as_lazy_data(np.array(0, dtype=np.dtype("float64")))
def test_real_array(self):
dm = DataManager(self.real_array)
self.assertEqual(dm.dtype, np.dtype("int64"))
def test_lazy_array(self):
dm = DataManager(self.lazy_array)
self.assertEqual(dm.dtype, np.dtype("float64"))
class Test_ndim(tests.IrisTest):
def test_ndim_0(self):
real_array = np.array(0)
dm = DataManager(real_array)
self.assertEqual(dm.ndim, 0)
lazy_array = as_lazy_data(real_array)
dm = DataManager(lazy_array)
self.assertEqual(dm.ndim, 0)
def test_ndim_nd(self):
shape = (2, 3, 4)
real_array = np.arange(24).reshape(shape)
dm = DataManager(real_array)
self.assertEqual(dm.ndim, len(shape))
lazy_array = as_lazy_data(real_array)
dm = DataManager(lazy_array)
self.assertEqual(dm.ndim, len(shape))
class Test_shape(tests.IrisTest):
def test_shape_scalar(self):
real_array = np.array(0)
dm = DataManager(real_array)
self.assertEqual(dm.shape, ())
lazy_array = as_lazy_data(real_array)
dm = DataManager(lazy_array)
self.assertEqual(dm.shape, ())
def test_shape_nd(self):
shape = (2, 3, 4)
real_array = np.arange(24).reshape(shape)
dm = DataManager(real_array)
self.assertEqual(dm.shape, shape)
lazy_array = as_lazy_data(real_array)
dm = DataManager(lazy_array)
self.assertEqual(dm.shape, shape)
class Test_copy(tests.IrisTest):
def setUp(self):
self.method = "iris._data_manager.DataManager._deepcopy"
self.data = mock.sentinel.data
self.return_value = mock.sentinel.return_value
self.memo = {}
def test(self):
dm = DataManager(np.array(0))
kwargs = dict(data=self.data)
with mock.patch(self.method) as mocker:
mocker.return_value = self.return_value
result = dm.copy(data=self.data)
mocker.assert_called_once_with(self.memo, **kwargs)
self.assertIs(result, self.return_value)
class Test_core_data(tests.IrisTest):
def test_real_array(self):
real_array = np.array(0)
dm = DataManager(real_array)
self.assertIs(dm.core_data(), real_array)
def test_lazy_array(self):
lazy_array = as_lazy_data(np.array(0))
dm = DataManager(lazy_array)
self.assertIs(dm.core_data(), lazy_array)
class Test_has_lazy_data(tests.IrisTest):
def setUp(self):
self.real_array = np.array(0)
self.lazy_array = as_lazy_data(self.real_array)
def test_with_lazy_array(self):
dm = DataManager(self.lazy_array)
self.assertTrue(dm.has_lazy_data())
def test_with_real_array(self):
dm = DataManager(self.real_array)
self.assertFalse(dm.has_lazy_data())
class Test_lazy_data(tests.IrisTest):
def setUp(self):
self.real_array = np.array(0)
self.lazy_array = as_lazy_data(self.real_array)
def test_with_real_array(self):
dm = DataManager(self.real_array)
self.assertFalse(dm.has_lazy_data())
result = dm.lazy_data()
self.assertFalse(dm.has_lazy_data())
self.assertEqual(result, self.lazy_array)
self.assertFalse(dm.has_lazy_data())
def test_with_lazy_array(self):
dm = DataManager(self.lazy_array)
self.assertTrue(dm.has_lazy_data())
result = dm.lazy_data()
self.assertTrue(dm.has_lazy_data())
self.assertIs(result, dm._lazy_array)
if __name__ == "__main__":
tests.main()
|
pp-mo/iris
|
lib/iris/tests/unit/data_manager/test_DataManager.py
|
Python
|
lgpl-3.0
| 21,589
|
"""
WSGI config for simpleform project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simpleform.settings")
application = get_wsgi_application()
|
dynamsoftsamples/dwt-django-file-upload
|
form-upload/simpleform/simpleform/wsgi.py
|
Python
|
apache-2.0
| 397
|
import gsmlib.GSMC0 as C0
import matplotlib.pyplot as plt
import numpy as np
import gsmlib.splibs as sp
import gsmlib.Burst as Burst
import gsmlib.burstfile as burstfile
from ctypes import *
import constant
import gsmlib.clib as clib
import gsmlib.SB as SB
import gsmlib.NB as NB
import gsmlib.convCode as convCode
import gsmlib.interleave as interleave
import time
class GSMClibTest:
def __init__(self):
self.c0 = C0.GSMC0()
self.c0.initSCH()
#self.c0.initBCCH()
#self.c0.initCCCH(range(0,9))
self.c0.initFACCH([5,6,7])
#c0.initSDCCH(range(1),[1])
self.lib = CDLL(constant.c_temp_dir+'libcgsm.so')
self.acc = clib.clib(self.lib)
for bcch in self.c0.bcch:
bcch.setLib(self.lib)
for ccch in self.c0.ccch:
ccch.setLib(self.lib)
file = "../../../temp/log"
self.bf = burstfile.burstfile(file)
self.c0.state.timingSyncState.to("fine")
for i in range(3):
self.c0.state.timingSyncState.once()
self.frameC = 0
self.color = ['r','b','y','g','c','m','k','g-']
def testSchFun(self,b):
chn = acc.channelEst(b.recv,SB.SBTraining.modulated,Burst.Burst.fosr)
inx = np.floor(np.arange(64)*Burst.Burst.fosr)
print inx[:]
print "clib chn",len(chn),chn[:3]
print "python chn",len(b.chn),b.chn[:3]
rhh = acc.matchFilter(
b.chn[b.cut_pos:b.cut_pos+Burst.Burst.chnMatchLength]
, b.cut_chn
, Burst.Burst.fosr
, 0. )
print rhh/64.
print b.rhh
p = acc.maxwin(b.chn[SB.SB._chn_s:SB.SB._chn_e],Burst.Burst.chn_len)
print "clib p",p+SB.SB._chn_s,"python p",b.cut_pos
print b.a
print b.b
o = acc.viterbi_detector(b.mafi,np.conj(b.rhh),148)
print o
yy = b.viterbi.t2b(b.mafi,0)
yr = acc.viterbi_restore(yy,np.conj(b.rhh),148) #maybe right
yg = acc.viterbi_restore(yy,b.rhh,148)
print yy
print np.array(yy[:147])-np.array(o[1:148])
plt.figure(1)
plt.plot(yr.real[1:],'r')
plt.plot(yg.real[1:],'g')
plt.plot(b.mafi.real,'b')
plt.figure(2)
plt.plot(yr.imag[1:],'r')
plt.plot(yg.imag[1:],'g')
plt.plot(b.mafi.imag,'b')
plt.show()
def powerGraph(self,mf1,mf2):
power = [0.]*(mf1*8)
for i in range(mf1*8*mf2):
f = self.bf.readBurst().recv
power[i%(mf1*8)] +=(np.dot(f,np.conj(f)).real)
p = np.array(power)
p.shape = (mf1,8)
plt.imshow(p,aspect='auto')
def oneSDCCH(self):
for i in range(4):
b = self.c0.C0.frame[i][3]
ub = self.acc.newBurst(b.srecv)
self.acc.demodu(ub,2)
frame = clib.cf2c(ub.frame)
print np.dot(frame,np.conj(frame))
def oneSDCCH_nb(self):
b = c0.C0.frame[0][1]
for t in range(1,10):
acc.demodu(ub,t)
chn = clib.cf2c(ub.chn)
plt.plot(np.abs(chn),color[t%8])
mafi = clib.cf2c(ub.mafi)
rhh = clib.cf2c(ub.rh)
plt.plot(mafi.real,'r')
plt.plot(b.mafi.real/b.rhh[2].real,'b')
def parity_check(self,decoded_data):
buf = []
buf = np.array(decoded_data[:35])
for i in range(25):
if buf[i]==1:
buf[i:i+10+1]^=convCode.convCode.sch_config['parity_polynomial']
print i,hex(clib.buf2uint64(clib.compress_bits(buf[i+1:])))
def oneSch_nb(self):
self.bf.skip(8)
b,_F = self.bf.toC0(self.c0)
ok,data = b.ch.callback(b,_F,self.c0.state)
self.sch = b.ch
ub = self.acc.newBurst(b.srecv)
e,aSch = self.acc.doSch(ub)
self.parity_check(b.ch.decoded_data)
print "python ",b.sbm0[3:]+b.sbm1[:-3]
print "python ",b.ch.decoded_data
print b.ch.info
print hex(aSch.out[0])
b.ch.decodeBin(aSch.out[0])
print b.ch.info
def oneBcch(self):
bcch_nb = []
bs = []
for i in range(4):
self.bf.skip(7)
b,_F = self.bf.toC0(self.c0)
ub = self.acc.newBurst(b.srecv)
bcch_nb.append(ub)
bs.append(b)
ok,data = b.ch.callback(b,_F,self.c0.state)
print ok
e,aCch = self.acc.doCch(bcch_nb,self.sch.info['bcc'])
print "pc",e
# chn = clib.cf2c(bcch_nb[0].chn)
# plt.plot(np.abs(chn))
# plt.show()
print bcch_nb[0].msg[:]
print bs[0].msg
return bcch_nb
def testInterleave(self,bcch_nb):
c = 0
for b in bcch_nb:
for i in range(57*2):
b.msg[i]=c
c+=1
self.acc.cch_deinterleave(bcch_nb)
print clib.clib.cch_dec.ilT[:]
il = interleave.interleave(114*4,114)
in_b = np.array(range(57*8))
outb = il.decode(in_b)
print outb[:]
def whatinslot(self,slot):
p = np.zeros((9,51*26))
ts = range(8,9)
for i in range(51*26):
b = self.c0.C0.frame[i][slot]
for j in range(len(ts)):
ub = self.acc.newBurst(b.srecv)
self.acc.demodu(ub,ts[j])
power = float(ub.chpower)
p[j,i] = power
for j in range(len(ts)):
plt.plot(p[j,:],self.color[j%8])
def testDemodu(self):
frameC = 0
startT = time.time()
for i in range(51*26*8*10):
b,_F = self.bf.toC0(self.c0)
if b.ch!=None:
if b.ch.name!='FCCH':
#print b.ch.name,b.__name__,_F
ok,data = b.ch.callback(b,_F,self.c0.state)
# ub = acc.newBurst(b.srecv)
# if b.__class__==NB.NB:
# acc.demodu(ub,b.training+1)
# elif b.__class__==SB.SB:
# acc.demodu(ub,0)
frameC+=1
endT = time.time()
print "pre burst time:",(endT-startT)/frameC
def main():
"""
slot 0 FCCH,SCH,BCCH,CCCH
slot 2,4 BCCH,CCCH
slot 6 26 frames struct
"""
uut = GSMClibTest()
# uut.oneSch_nb()
# plt.figure(1)
# uut.bf.reset()
# uut.powerGraph(52,51)
# plt.figure(2)
# uut.bf.reset()
# uut.powerGraph(51,52)
uut.testDemodu()
# uut.whatinslot(5)
# plt.show()
if __name__ == '__main__':
main()
|
ruishihan/R7-with-notes
|
src/host/python/GSMClibTest.py
|
Python
|
apache-2.0
| 5,341
|
#!/usr/local/bin/python
###################################################
#ArXivFetcher.py
#Inputs:
#1)seeds for calls (can be multiples, contained in one string)
#2) a uuid
#3) number of results
#Outputs:
#1)
#Attributes to add:
#1)accept multiple seed terms
#
#######
import os, sys, codecs, urllib.request, urllib.parse, urllib.error
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
#=================================================
def main():
seed = str(sys.argv[1])
uuid_path = str(sys.argv[2])
results_num = sys.argv[3]
os.chdir(uuid_path)
title = ''
pdf = ''
url = 'http://export.arxiv.org/api/query?search_query=all:'+seed+'&start=0&max_results='+results_num+'&sortBy=relevance&sortOrder=descending'
data = urllib.request.urlopen(url).read()
tree = ET.fromstring(data)
title_list = []
pdf_list = []
iternum = 0;
titlenum = 0;
for elem in tree.iterfind('.//{http://www.w3.org/2005/Atom}entry'):
for subelem in elem.iterfind('.//{http://www.w3.org/2005/Atom}title'):
title = subelem.text
title_list.append(title)
for elem in tree.iterfind('.//{http://www.w3.org/2005/Atom}link[@type="application/pdf"]'):
pdf = elem.attrib.get('href')
pdf_list.append(pdf)
#pdf_url = tree[7][8].attrib.get('href')
#title = tree[7][3].text
if title != '':
for i in range(len(title_list)):
print((title_list[i]))
print((pdf_list[i]))
else:
print('No results found.')
#print data
#=================================================
if __name__ == '__main__':
main()
|
fredzannarbor/pagekicker-community
|
scripts_python_3/bin/ArXivFetcher.py
|
Python
|
apache-2.0
| 1,579
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import extensions
from cinder.api.schemas import scheduler_hints
from cinder.api import validation
def create(req, body):
attr = 'OS-SCH-HNT:scheduler_hints'
if body.get(attr) is not None:
scheduler_hints_body = dict.fromkeys((attr,), body.get(attr))
@validation.schema(scheduler_hints.create)
def _validate_scheduler_hints(req=None, body=None):
# TODO(pooja_jadhav): The scheduler hints schema validation
# should be moved to v3 volume schema directly and this module
# should be deleted at the time of deletion of v2 version code.
pass
_validate_scheduler_hints(req=req, body=scheduler_hints_body)
body['volume']['scheduler_hints'] = scheduler_hints_body.get(attr)
return body
# NOTE: This class is added to include "OS-SCH-HNT" in the list extensions
# response and "OS-SCH-HNT" is still not loaded as a standard extension.
class Scheduler_hints(extensions.ExtensionDescriptor):
"""Pass arbitrary key/value pairs to the scheduler."""
name = "SchedulerHints"
alias = "OS-SCH-HNT"
updated = "2013-04-18T00:00:00+00:00"
|
mahak/cinder
|
cinder/api/contrib/scheduler_hints.py
|
Python
|
apache-2.0
| 1,772
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Transforms a float-trained graph into an equivalent quantized version.
An example of command-line usage is:
bazel build tensorflow/contrib/quantization/tools/:quantize_graph \
&& bazel-bin/tensorflow/contrib/quantization/tools/quantize_graph \
--input=tensorflow_inception_graph.pb
--output_node_names="softmax2" --print_nodes --output=/tmp/quantized_graph.pb \
--mode=eightbit --logtostderr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
import tensorflow as tf
from tensorflow.python.client import graph_util
from tensorflow.python.framework import tensor_util
# TODO(petewarden) - Remove this ugly hack to get around Python linking problems
# with Bazel.
# pylint: disable=g-bad-import-order
from tensorflow.contrib.quantization import load_quantized_ops_so
from tensorflow.contrib.quantization.kernels import load_quantized_kernels_so
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean("print_nodes", False, """Lists all nodes in the model.""")
flags.DEFINE_string("input", "", """TensorFlow 'GraphDef' file to load.""")
flags.DEFINE_string("output_node_names", "",
"""Output node names, comma separated.""")
flags.DEFINE_string("output", "", """File to save the output graph to.""")
flags.DEFINE_integer("bitdepth", 8,
"""How many bits to quantize the graph to.""")
flags.DEFINE_string("mode", "round",
"""What transformation to apply (round, quantize,"""
""" eightbit, weights, or weights_rounded).""")
flags.DEFINE_string("test_input_dims", "1,224,224,3",
"""The size of the input tensor to use when testing a"""
""" graph loaded from a file.""")
flags.DEFINE_boolean("strip_redundant_quantization", True,
"""Removes redundant dequantize/quantize pairs.""")
def print_input_nodes(current_node, nodes_map, indent, already_visited):
print(" " * indent + current_node.op + ":" + current_node.name)
for input_node_name in current_node.input:
if input_node_name in already_visited:
continue
input_node = nodes_map[input_node_name]
print_input_nodes(input_node, nodes_map, indent + 1, already_visited)
already_visited[current_node.name] = True
def create_node(op, name, inputs):
new_node = tf.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node(name, value, dtype, shape=None):
node = create_node("Const", name, [])
set_attr_dtype(node, "dtype", dtype)
set_attr_tensor(node, "value", value, dtype, shape)
return node
def copy_attr(node, key, attr_value):
try:
node.attr[key].CopyFrom(attr_value)
except KeyError:
pass
def set_attr_dtype(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(type=value.as_datatype_enum))
except KeyError:
pass
def set_attr_tensor(node, key, value, dtype, shape=None):
try:
node.attr[key].CopyFrom(tf.AttrValue(
tensor=tensor_util.make_tensor_proto(value,
dtype=dtype,
shape=shape)))
except KeyError:
pass
def set_attr_string(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(s=value))
except KeyError:
pass
def set_attr_int_list(node, key, value):
list_value = tf.AttrValue.ListValue(i=value)
try:
node.attr[key].CopyFrom(tf.AttrValue(list=list_value))
except KeyError:
pass
def set_attr_bool(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(b=value))
except KeyError:
pass
def set_attr_int(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(i=value))
except KeyError:
pass
def set_attr_float(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(f=value))
except KeyError:
pass
def node_name_from_input(node_name):
"""Strips off ports and other decorations to get the underlying node name."""
if node_name.startswith("^"):
node_name = node_name[1:]
m = re.search(r"(.*):\d+$", node_name)
if m:
node_name = m.group(1)
return node_name
def ensure_tensor_name_has_port(node_name):
"""Makes sure that a tensor name has :0 if no explicit port exists."""
m = re.search(r"(.*):\d+$", node_name)
if m:
name_with_port = node_name
else:
name_with_port = node_name + ":0"
return name_with_port
def unique_node_name_from_input(node_name):
"""Replaces invalid characters in input names to get a unique node name."""
return node_name.replace(":", "__port__").replace("^", "__hat__")
def quantize_array(arr, num_buckets):
"""Quantizes a numpy array.
This function maps each scalar in arr to the center of one of num_buckets
buckets. For instance,
quantize_array([0, 0.3, 0.6, 1], 2) => [0.25, 0.25, 0.75, 0.75]
Args:
arr: The numpy array to quantize.
num_buckets: The number of buckets to map "var" to.
Returns:
The quantized numpy array.
Raises:
ValueError: when num_buckets < 1.
"""
if num_buckets < 1:
raise ValueError("num_buckets must be >= 1")
arr_max = arr.max()
arr_min = arr.min()
if arr_max == arr_min:
return arr
bucket_width = (arr_max - arr_min) / num_buckets
# Map scalars to bucket indices. Take special care of max(arr).
bucket_indices = np.floor((arr - arr_min) / bucket_width)
bucket_indices[bucket_indices == num_buckets] = num_buckets - 1
# Map each scalar to the center of a bucket.
arr = arr_min + bucket_width * (bucket_indices + 0.5)
return arr
def quantize_weight_rounded(input_node):
"""Returns a replacement node for input_node containing bucketed floats."""
input_tensor = input_node.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
tensor_shape = input_tensor.tensor_shape
# Currently, the parameter FLAGS.bitdepth is used to compute the
# number of buckets as 1 << FLAGS.bitdepth, meaning the number of
# buckets can only be a power of 2.
# This could be fixed by intorducing a new parameter, num_buckets,
# which would allow for more flexibility in chosing the right model
# size/accuracy tradeoff. But I didn't want to add more parameters
# to this script than absolutely necessary.
num_buckets = 1 << FLAGS.bitdepth
tensor_value_rounded = quantize_array(tensor_value, num_buckets)
tensor_shape_list = tensor_util.TensorShapeProtoToList(tensor_shape)
return [create_constant_node(input_node.name, tensor_value_rounded,
tf.float32, shape=tensor_shape_list)]
def quantize_weight_eightbit(input_node, quantization_mode):
"""Returns replacement nodes for input_node using the Dequantize op."""
base_name = input_node.name + "_"
quint8_const_name = base_name + "quint8_const"
min_name = base_name + "min"
max_name = base_name + "max"
float_tensor = tensor_util.MakeNdarray(
input_node.attr["value"].tensor)
min_value = np.min(float_tensor.flatten())
max_value = np.max(float_tensor.flatten())
# min_value == max_value is a tricky case. It can occur for general
# tensors, and of course for scalars. The quantized ops cannot deal
# with this case, so we set max_value to something else.
# It's a tricky question what is the numerically best solution to
# deal with this degeneracy.
# TODO(petewarden): Better use a tolerance than a hard comparison?
if min_value == max_value:
if abs(min_value) < 0.000001:
max_value = min_value + 1.0
else:
max_value = 2 * min_value
sess = tf.Session()
with sess.as_default():
quantize_op = tf.contrib.quantization.python.quantize_v2(
float_tensor,
min_value,
max_value,
tf.quint8,
mode=quantization_mode)
quint8_tensor = quantize_op[0].eval()
shape = tensor_util.TensorShapeProtoToList(input_node.attr[
"value"].tensor.tensor_shape)
quint8_const_node = create_constant_node(quint8_const_name,
quint8_tensor,
tf.quint8,
shape=shape)
min_node = create_constant_node(min_name, min_value, tf.float32)
max_node = create_constant_node(max_name, max_value, tf.float32)
dequantize_node = create_node("Dequantize", input_node.name,
[quint8_const_name, min_name, max_name])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", quantization_mode)
return [quint8_const_node, min_node, max_node, dequantize_node]
class GraphRewriter(object):
"""Takes a float graph, and rewrites it in quantized form."""
def __init__(self, input_graph, mode):
"""Sets up the class to rewrite a float graph.
Args:
input_graph: A float graph to transform.
mode: A string controlling how quantization is performed -
round, quantize, eightbit, or weights.
Raises:
ValueError: Two nodes with the same name were found in the graph.
"""
self.input_graph = input_graph
self.nodes_map = self.create_nodes_map(input_graph)
self.output_graph = None
self.mode = mode
load_quantized_ops_so.Load()
load_quantized_kernels_so.Load()
def create_nodes_map(self, graph):
"""Builds a mapping of node names to their defs from the graph."""
nodes_map = {}
for node in graph.node:
if node.name not in nodes_map.keys():
nodes_map[node.name] = node
else:
raise ValueError("Duplicate node names detected.")
return nodes_map
def rewrite(self, output_node_names):
"""Triggers rewriting of the float graph.
Args:
output_node_names: A list of names of the nodes that produce the final
results.
Returns:
A quantized version of the float graph.
"""
self.output_graph = tf.GraphDef()
output_nodes = [self.nodes_map[output_node_name]
for output_node_name in output_node_names]
if self.mode == "round":
self.already_visited = {}
for output_node in output_nodes:
self.round_nodes_recursively(output_node)
elif self.mode == "quantize":
self.already_visited = {}
self.already_quantized = {}
for output_node in output_nodes:
self.quantize_nodes_recursively(output_node)
elif self.mode == "eightbit":
self.set_input_graph(self.remove_unneeded_nodes(self.input_graph))
self.already_visited = {}
self.layers_eightbitized = []
for output_node in output_nodes:
self.eightbitize_nodes_recursively(output_node)
self.output_graph = self.quantize_weights(self.output_graph, b"MIN_FIRST")
if FLAGS.strip_redundant_quantization:
self.output_graph = self.remove_redundant_quantization(
self.output_graph)
self.remove_dead_nodes(output_node_names)
elif self.mode == "weights":
self.output_graph = self.quantize_weights(self.input_graph,
b"MIN_COMBINED")
self.remove_dead_nodes(output_node_names)
elif self.mode == "weights_rounded":
self.output_graph = self.quantize_weights(self.input_graph, self.mode)
self.remove_dead_nodes(output_node_names)
else:
print("Bad mode - " + self.mode + ".")
return self.output_graph
def round_nodes_recursively(self, current_node):
"""The entry point for simple rounding quantization."""
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
if input_node_name in self.already_visited:
continue
input_node = self.nodes_map[input_node_name]
self.round_nodes_recursively(input_node)
self.already_visited[current_node.name] = True
nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"]
if any(current_node.op in s for s in nodes_to_quantize):
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
new_node.name = current_node.name + "_original"
self.add_output_graph_node(new_node)
levels = 1 << FLAGS.bitdepth
constant_name = current_node.name + "_round_depth"
constant_tensor = tf.constant(levels, dtype=tf.int32, name=constant_name)
constant_node = constant_tensor.op.node_def
self.add_output_graph_node(constant_node)
quantize_node = tf.NodeDef()
quantize_node.op = "RoundToSteps"
quantize_node.name = current_node.name
quantize_node.input.extend([current_node.name + "_original"])
quantize_node.input.extend([constant_node.name])
self.add_output_graph_node(quantize_node)
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def quantize_nodes_recursively(self, current_node):
"""The entry point for quantizing nodes to eight bit and back."""
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
if input_node_name in self.already_visited:
continue
input_node = self.nodes_map[input_node_name]
self.quantize_nodes_recursively(input_node)
self.already_visited[current_node.name] = True
nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"]
if any(current_node.op in s for s in nodes_to_quantize):
for input_name in current_node.input:
input_name = node_name_from_input(input_name)
input_node = self.nodes_map[input_name]
self.quantize_node(input_node)
self.quantize_node(current_node)
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def quantize_node(self, input_node):
"""Handles quantizing a single node."""
input_name = input_node.name
if input_name in self.already_quantized:
return
self.already_quantized[input_name] = True
original_input_name = input_name + "_original"
reshape_name = input_name + "_reshape"
reshape_dims_name = input_name + "_reshape_dims"
max_name = input_name + "_max"
min_name = input_name + "_min"
dims_name = input_name + "_dims"
quantize_name = input_name + "_quantize"
dequantize_name = input_name
original_input_node = tf.NodeDef()
original_input_node.CopyFrom(input_node)
original_input_node.name = original_input_name
self.add_output_graph_node(original_input_node)
reshape_dims_node = create_constant_node(reshape_dims_name, -1, tf.int32,
[1])
self.add_output_graph_node(reshape_dims_node)
reshape_node = create_node("Reshape", reshape_name, [original_input_name,
reshape_dims_name])
set_attr_dtype(reshape_node, "T", tf.float32)
self.add_output_graph_node(reshape_node)
dims_node = create_constant_node(dims_name, 0, tf.int32, [1])
self.add_output_graph_node(dims_node)
max_node = create_node("Max", max_name, [reshape_name, dims_name])
set_attr_dtype(max_node, "T", tf.float32)
set_attr_bool(max_node, "keep_dims", False)
self.add_output_graph_node(max_node)
min_node = create_node("Min", min_name, [reshape_name, dims_name])
set_attr_dtype(min_node, "T", tf.float32)
set_attr_bool(min_node, "keep_dims", False)
self.add_output_graph_node(min_node)
quantize_node = create_node("Quantize", quantize_name, [original_input_name,
min_name, max_name])
set_attr_dtype(quantize_node, "T", tf.quint8)
set_attr_string(quantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(quantize_node)
dequantize_node = create_node("Dequantize", dequantize_name,
[quantize_name, min_name, max_name])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
def eightbitize_nodes_recursively(self, current_node):
"""The entry point for transforming a graph into full eight bit."""
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
if input_node_name in self.already_visited:
continue
input_node = self.nodes_map[input_node_name]
self.eightbitize_nodes_recursively(input_node)
self.already_visited[current_node.name] = True
if current_node.op == "MatMul":
self.eightbitize_mat_mul_node(current_node)
elif current_node.op == "Conv2D":
self.eightbitize_conv_node(current_node)
self.layers_eightbitized.append(current_node.name)
elif current_node.op == "BiasAdd":
self.eightbitize_bias_add_node(current_node)
elif current_node.op == "MaxPool" or current_node.op == "AvgPool":
self.eightbitize_single_input_tensor_node(current_node,
self.add_pool_function)
elif current_node.op == "Relu" or current_node.op == "Relu6":
self.eightbitize_single_input_tensor_node(current_node,
self.add_relu_function)
elif current_node.op == "Concat":
self.eightbitize_concat_node(current_node)
elif current_node.op == "BatchNormWithGlobalNormalization":
self.eightbitize_batch_norm_node(current_node)
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def add_eightbit_prologue_nodes(self, original_node):
"""Adds input conversion nodes to handle quantizing the underlying node."""
namespace_prefix = original_node.name + "_eightbit"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
input_names = []
min_max_names = []
for original_input_name in original_node.input:
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name,
reduction_dims_name))
input_names.append(quantize_input_name)
min_max_names.append(min_input_name)
min_max_names.append(max_input_name)
all_input_names = []
all_input_names.extend(input_names)
all_input_names.extend(min_max_names)
return all_input_names
def add_common_quantization_nodes(self, namespace_prefix):
"""Builds constant nodes needed for quantization of inputs."""
reshape_dims_name = namespace_prefix + "_reshape_dims"
reduction_dims_name = namespace_prefix + "_reduction_dims"
reshape_dims_node = create_constant_node(reshape_dims_name, -1, tf.int32,
[1])
self.add_output_graph_node(reshape_dims_node)
reduction_dims_node = create_constant_node(reduction_dims_name, 0, tf.int32,
[1])
self.add_output_graph_node(reduction_dims_node)
return reshape_dims_name, reduction_dims_name
def eightbitize_input_to_node(self, namespace_prefix, original_input_name,
reshape_dims_name, reduction_dims_name):
"""Takes one float input to an op, and converts it to quantized form."""
unique_input_name = unique_node_name_from_input(original_input_name)
reshape_input_name = namespace_prefix + "_reshape_" + unique_input_name
min_input_name = namespace_prefix + "_min_" + unique_input_name
max_input_name = namespace_prefix + "_max_" + unique_input_name
quantize_input_name = namespace_prefix + "_quantize_" + unique_input_name
reshape_input_node = create_node("Reshape", reshape_input_name,
[original_input_name, reshape_dims_name])
set_attr_dtype(reshape_input_node, "T", tf.float32)
self.add_output_graph_node(reshape_input_node)
min_input_node = create_node("Min", min_input_name, [reshape_input_name,
reduction_dims_name])
set_attr_dtype(min_input_node, "T", tf.float32)
set_attr_bool(min_input_node, "keep_dims", False)
self.add_output_graph_node(min_input_node)
max_input_node = create_node("Max", max_input_name, [reshape_input_name,
reduction_dims_name])
set_attr_dtype(max_input_node, "T", tf.float32)
set_attr_bool(max_input_node, "keep_dims", False)
self.add_output_graph_node(max_input_node)
quantize_input_node = create_node("QuantizeV2", quantize_input_name,
[original_input_name, min_input_name,
max_input_name])
set_attr_dtype(quantize_input_node, "T", tf.quint8)
set_attr_string(quantize_input_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(quantize_input_node)
min_output_name = quantize_input_name + ":1"
max_output_name = quantize_input_name + ":2"
return quantize_input_name, min_output_name, max_output_name
def add_quantize_down_node(self, original_node, quantized_output_name):
quantize_down_name = original_node.name + "_eightbit_quantize_down"
quantize_down_node = create_node(
"QuantizeDownAndShrinkRange", quantize_down_name,
[quantized_output_name, quantized_output_name + ":1",
quantized_output_name + ":2"])
set_attr_dtype(quantize_down_node, "Tinput", tf.qint32)
set_attr_dtype(quantize_down_node, "out_type", tf.quint8)
self.add_output_graph_node(quantize_down_node)
return quantize_down_name
def add_dequantize_result_node(self, quantized_output_name,
original_node_name):
dequantize_name = original_node_name
dequantize_node = create_node("Dequantize", dequantize_name,
[quantized_output_name,
quantized_output_name + ":1",
quantized_output_name + ":2"])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
def eightbitize_mat_mul_node(self, original_node):
"""Replaces a MatMul node with the eight bit equivalent sub-graph."""
quantized_mat_mul_name = original_node.name + "_eightbit_quantized_bias_add"
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_mat_mul_node = create_node(
"QuantizedMatMul", quantized_mat_mul_name,
all_input_names)
set_attr_dtype(quantized_mat_mul_node, "T1", tf.quint8)
set_attr_dtype(quantized_mat_mul_node, "T2", tf.quint8)
set_attr_dtype(quantized_mat_mul_node, "Toutput", tf.qint32)
copy_attr(quantized_mat_mul_node, "transpose_a",
original_node.attr["transpose_a"])
copy_attr(quantized_mat_mul_node, "transpose_b",
original_node.attr["transpose_b"])
self.add_output_graph_node(quantized_mat_mul_node)
quantize_down_name = self.add_quantize_down_node(original_node,
quantized_mat_mul_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_conv_node(self, original_node):
"""Replaces a Conv2D node with the eight bit equivalent sub-graph."""
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_conv_name = original_node.name + "_eightbit_quantized_conv"
quantized_conv_node = create_node("QuantizedConv2D", quantized_conv_name,
all_input_names)
copy_attr(quantized_conv_node, "strides", original_node.attr["strides"])
copy_attr(quantized_conv_node, "padding", original_node.attr["padding"])
set_attr_dtype(quantized_conv_node, "Tinput", tf.quint8)
set_attr_dtype(quantized_conv_node, "Tfilter", tf.quint8)
set_attr_dtype(quantized_conv_node, "out_type", tf.qint32)
self.add_output_graph_node(quantized_conv_node)
quantize_down_name = self.add_quantize_down_node(original_node,
quantized_conv_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_bias_add_node(self, original_node):
"""Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
quantized_bias_add_name = (original_node.name +
"_eightbit_quantized_bias_add")
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_bias_add_node = create_node(
"QuantizedBiasAdd", quantized_bias_add_name,
all_input_names)
set_attr_dtype(quantized_bias_add_node, "T1", tf.quint8)
set_attr_dtype(quantized_bias_add_node, "T2", tf.quint8)
set_attr_dtype(quantized_bias_add_node, "out_type", tf.qint32)
self.add_output_graph_node(quantized_bias_add_node)
quantize_down_name = self.add_quantize_down_node(original_node,
quantized_bias_add_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_single_input_tensor_node(self, original_node,
add_op_function):
"""Replaces a single-tensor node with the eight bit equivalent sub-graph.
Converts a node like this:
Shape(f) Input(f)
| |
+--------v v
Operation
|
v
(f)
Into a quantized equivalent:
Input(f) ReshapeDims
+------v v-------------+
| Reshape
| |
| | ReductionDims
| +-----+ |
| | +---c---------+
| v v v v-------+
| Min Max
| +----+ |
v v v--------+
Quantize
|
v
QuantizedOperation
| | |
v v v
Dequantize
|
v
(f)
Args:
original_node: Float node to be converted.
add_op_function: Function to create the actual node.
Returns:
Subgraph representing the quantized version of the original node.
"""
quantized_op_name = original_node.name + "_eightbit_quantized"
quantized_op_type = "Quantized" + original_node.op
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_op_node = create_node(
quantized_op_type, quantized_op_name, all_input_names)
add_op_function(original_node, quantized_op_node)
self.add_output_graph_node(quantized_op_node)
self.add_dequantize_result_node(quantized_op_name, original_node.name)
def add_pool_function(self, original_node, quantized_op_node):
set_attr_dtype(quantized_op_node, "T", tf.quint8)
copy_attr(quantized_op_node, "ksize", original_node.attr["ksize"])
copy_attr(quantized_op_node, "strides", original_node.attr["strides"])
copy_attr(quantized_op_node, "padding", original_node.attr["padding"])
def add_relu_function(self, unused_arg_node, quantized_op_node):
set_attr_dtype(quantized_op_node, "Tinput", tf.quint8)
def eightbitize_concat_node(self, original_node):
"""Replaces a Concat node with the eight bit equivalent sub-graph.
Converts a node like this:
Shape(f) Input0(f) Input1(f)
| | |
+--------v v v----------+
Concat
|
v
(f)
Into a quantized equivalent:
Shape(f) Input0(f) ReshapeDims Input1(f)
| +------v v--------------+------------------v v------+
| | Reshape Reshape |
| | | | |
| | | ReductionDims | |
| | +------+ | +--------+ |
| | | +---c---------+-----------c-----+ | |
| | +v v v v-------+---------v v v v+ |
| | Min Max Min Max |
| | +----+ | | +-----+ |
| v v v--------+ +----------v v v
| Quantize Quantize
| +------------------+ +----------------------+
+-------------------------------+ | |
v v v
QuantizedConcat
| | |
v v v
Dequantize
|
v
(f)
Args:
original_node: Float node to be converted.
Returns:
Subgraph representing the quantized version of the original node.
"""
namespace_prefix = original_node.name + "_eightbit"
quantized_concat_name = namespace_prefix + "_quantized_concat"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
shape_input_name = original_node.input[0]
original_inputs = original_node.input[1:]
input_names = []
min_names = []
max_names = []
for original_input_name in original_inputs:
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name,
reduction_dims_name))
input_names.append(quantize_input_name)
min_names.append(min_input_name)
max_names.append(max_input_name)
all_input_names = [shape_input_name]
all_input_names.extend(input_names)
all_input_names.extend(min_names)
all_input_names.extend(max_names)
quantized_concat_node = create_node(
"QuantizedConcat", quantized_concat_name, all_input_names)
set_attr_int(quantized_concat_node, "N", len(original_inputs))
set_attr_dtype(quantized_concat_node, "T", tf.quint8)
self.add_output_graph_node(quantized_concat_node)
self.add_dequantize_result_node(quantized_concat_name, original_node.name)
def eightbitize_batch_norm_node(self, original_node):
"""Replaces a MatMul node with the eight bit equivalent sub-graph."""
namespace_prefix = original_node.name + "_eightbit"
original_input_name = original_node.input[0]
original_mean_name = original_node.input[1]
original_variance_name = original_node.input[2]
original_beta_name = original_node.input[3]
original_gamma_name = original_node.input[4]
quantized_batch_norm_name = namespace_prefix + "_quantized_batch_norm"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name, reduction_dims_name))
quantize_mean_name, min_mean_name, max_mean_name = (
self.eightbitize_input_to_node(namespace_prefix, original_mean_name,
reshape_dims_name, reduction_dims_name))
quantize_variance_name, min_variance_name, max_variance_name = (
self.eightbitize_input_to_node(namespace_prefix, original_variance_name,
reshape_dims_name, reduction_dims_name))
quantize_beta_name, min_beta_name, max_beta_name = (
self.eightbitize_input_to_node(namespace_prefix, original_beta_name,
reshape_dims_name, reduction_dims_name))
quantize_gamma_name, min_gamma_name, max_gamma_name = (
self.eightbitize_input_to_node(namespace_prefix, original_gamma_name,
reshape_dims_name, reduction_dims_name))
quantized_batch_norm_node = create_node(
"QuantizedBatchNormWithGlobalNormalization", quantized_batch_norm_name,
[quantize_input_name, min_input_name, max_input_name,
quantize_mean_name, min_mean_name, max_mean_name,
quantize_variance_name, min_variance_name, max_variance_name,
quantize_beta_name, min_beta_name, max_beta_name, quantize_gamma_name,
min_gamma_name, max_gamma_name])
set_attr_dtype(quantized_batch_norm_node, "Tinput", tf.quint8)
set_attr_dtype(quantized_batch_norm_node, "out_type", tf.qint32)
copy_attr(quantized_batch_norm_node, "scale_after_normalization",
original_node.attr["scale_after_normalization"])
copy_attr(quantized_batch_norm_node, "variance_epsilon",
original_node.attr["variance_epsilon"])
self.add_output_graph_node(quantized_batch_norm_node)
quantize_down_name = self.add_quantize_down_node(original_node,
quantized_batch_norm_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def add_output_graph_node(self, output_node):
"""Inserts one node into the new graph."""
self.output_graph.node.extend([output_node])
def remove_redundant_quantization(self, old_graph):
"""Removes unneeded pairs of quantize/dequantize ops from the graph.
This is a bit of a tricky function, because it's attempting to spot the
pattern of dequantizing from eight-bit up to float, and then immediately
quantizing back down to eight bits again, that's introduced by previous
passes that do 'key-hole' conversions of individual nodes but have to
convert back to float to match the previous output interface, since they
don't know that the next op can handle quantized tensors.
It works by:
- Looking for Quantize nodes.
- Checking to see if their first input is a Dequantize node.
- Seeing if their min/max inputs come from Min/Max nodes.
- Making sure those Min/Max nodes are being fed from the same Dequantize.
- Or that the Min is indirectly being fed from the same Dequantize as Max.
- Making sure the Dequantize is going through a Reshape (which we add
during the previous pass when we create the quantize sub-graph).
- Looking for the dims Const op for the Min/Max dims.
If all of these conditions are met, then it's a sub-graph pattern that
we know how to optimize out (and is likely the common one we've introduced).
We then rewire the graph to skip it entirely, and then rely on the dead node
removal pass to get rid of any nodes that are no longer needed.
Args:
old_graph: The model we'll be stripping redundant nodes from.
Returns:
A graph with the unnecessary nodes removed.
Raises:
ValueError: Two nodes with the same name were found in the graph.
"""
old_nodes_map = self.create_nodes_map(old_graph)
self.output_graph = tf.GraphDef()
inputs_to_rename = {}
# We go through all the nodes, looking for any that match the patterns we
# know how to optimize away.
for node in old_graph.node:
# We always start with a Quantize node, and examine its inputs to see if
# they are in a form that can be removed.
if node.op not in ["Quantize", "QuantizeV2"]:
continue
dequantize_node_name = node_name_from_input(node.input[0])
if dequantize_node_name not in old_nodes_map:
raise ValueError("Input node name '" + dequantize_node_name +
"' not found in node '" + node.name + "'")
dequantize_node = old_nodes_map[dequantize_node_name]
# Do we have a Dequantize feeding in, with the same type as the Quantize?
if dequantize_node.op != "Dequantize":
continue
if node.attr["T"] != dequantize_node.attr["T"]:
continue
# Now look at the other inputs, and ensure they're Min/Max nodes.
min_node_name = node_name_from_input(node.input[1])
max_node_name = node_name_from_input(node.input[2])
min_node = old_nodes_map[min_node_name]
max_node = old_nodes_map[max_node_name]
is_min_right_type = (min_node.op in ["Min", "Dequantize"])
is_max_right_type = (max_node.op in ["Max", "Dequantize"])
if not is_min_right_type or not is_max_right_type:
print("Didn't find expected types on inputs : %s, %s." % (
min_node.op, max_node.op))
continue
min_node_input_name = node_name_from_input(min_node.input[0])
max_node_input_name = node_name_from_input(max_node.input[0])
# There are two different patterns for Min nodes we can recognize, one
# where the input comes directly from the same one as the Max, and
# another where we run it through another Min first, so check for both.
is_same_input = False
if min_node_input_name == max_node_input_name:
is_same_input = True
else:
first_min_node_input = old_nodes_map[min_node_input_name]
if first_min_node_input.op == "Concat":
second_min_node_name = node_name_from_input(
first_min_node_input.input[1])
second_min_node = old_nodes_map[second_min_node_name]
if second_min_node.op == "Min":
second_min_node_input_name = node_name_from_input(
second_min_node.input[0])
is_same_input = (second_min_node_input_name == max_node_input_name)
if not is_same_input:
print("Different min/max inputs: " + min_node_input_name)
continue
# We recognize this pattern, so mark the graph edges to be rewired to
# route around it entirely, since we know it's a no-op.
dequantize_source_name = node_name_from_input(dequantize_node.input[0])
node_tensor_name = ensure_tensor_name_has_port(node.name)
min_tensor_name = node.name + ":1"
max_tensor_name = node.name + ":2"
inputs_to_rename[node_tensor_name] = dequantize_source_name
inputs_to_rename[min_tensor_name] = dequantize_node.input[1]
inputs_to_rename[max_tensor_name] = dequantize_node.input[2]
# Finally we apply all the rewiring we've marked to the graph.
for node in old_graph.node:
for index, input_full_name in enumerate(node.input):
input_name = ensure_tensor_name_has_port(input_full_name)
if input_name in inputs_to_rename:
node.input[index] = inputs_to_rename[input_name]
self.add_output_graph_node(node)
return self.output_graph
def remove_dead_nodes(self, output_names):
"""Removes nodes that are no longer needed for inference from the graph."""
old_output_graph = self.output_graph
self.output_graph = graph_util.extract_sub_graph(old_output_graph,
output_names)
def quantize_weights(self, input_graph, quantization_mode):
"""Quantize float Const ops.
There are two modes of operations, both replace float Const ops with
quantized values.
1. If quantization_mode is "weights_rounded", this function replaces float
Const ops with quantized float Const ops - same as the original op, but
float values being mapped to the center of one of 1<<FLAGS.bitdepth buckets.
This does not change the raw model size, but compression algorithms such as
zip (as used for compressing apks) or bzip2 will achieve a very good
compression ratio.
2. For other quantization modes ("MIN_COMBINED" or "MIN_FIRST"), float
Const ops are quantized and replaced by a tuple of four ops to perform
the dequantization at runtime:
* eight-bit Const (bucket indices, same shape as original float Const op
* two float Const ops (min and max value of original float Const op)
* Dequantize op to convert the eight-bit consts to float tensors.
The quantization mode is important because we see accuracy problems when
quantizing weights for different situations depending on the algorithm
used. We haven't figured out exactly what the underlying cause is yet,
unfortunately.
Args:
input_graph: A GraphDef of the model containing float Const ops.
quantization_mode: How to quantize and dequantize the values.
Returns:
A GraphDef of the converted graph.
Raises:
ValueError: If quantization_mode is unsupported.
"""
output_graph = tf.GraphDef()
for input_node in input_graph.node:
should_quantize = False
if input_node.op == "Const":
dtype = tf.as_dtype(input_node.attr["dtype"].type)
if dtype == tf.float32:
should_quantize = True
if should_quantize:
if quantization_mode == "weights_rounded":
output_graph.node.extend(quantize_weight_rounded(input_node))
elif quantization_mode in (b"MIN_COMBINED", b"MIN_FIRST"):
output_graph.node.extend(quantize_weight_eightbit(input_node,
quantization_mode))
else:
raise ValueError("Unsupported quantization mode %s." %
quantization_mode)
else:
output_node = tf.NodeDef()
output_node.CopyFrom(input_node)
output_graph.node.extend([output_node])
return output_graph
def remove_unneeded_nodes(self, input_graph):
"""Prunes out nodes that aren't needed for inference.
There are nodes like Identity and CheckNumerics that are only useful
during training, and can be removed in graphs that will be used for
nothing but inference. Here we identify and remove them, returning an
equivalent graph.
Args:
input_graph: Model to analyze and prune.
Returns:
A list of nodes with the unnecessary ones removed.
"""
types_to_remove = {"CheckNumerics": True}
input_nodes = input_graph.node
names_to_remove = {}
for node in input_nodes:
if node.op in types_to_remove:
names_to_remove[node.name] = True
nodes_after_removal = []
for node in input_nodes:
if node.name in names_to_remove:
continue
new_node = tf.NodeDef()
new_node.CopyFrom(node)
input_before_removal = node.input
del new_node.input[:]
for full_input_name in input_before_removal:
input_name = re.sub(r"^\^", "", full_input_name)
if input_name in names_to_remove:
continue
new_node.input.append(full_input_name)
nodes_after_removal.append(new_node)
types_to_splice = {"Identity": True}
names_to_splice = {}
for node in nodes_after_removal:
if node.op in types_to_splice:
# We don't want to remove nodes that have control edge inputs, because
# they might be involved in subtle dependency issues that removing them
# will jeopardize.
has_control_edge = False
for input_name in node.input:
if re.match(r"^\^", input_name):
has_control_edge = True
if not has_control_edge:
names_to_splice[node.name] = node.input[0]
nodes_after_splicing = []
for node in nodes_after_removal:
if node.name in names_to_splice:
continue
new_node = tf.NodeDef()
new_node.CopyFrom(node)
input_before_removal = node.input
del new_node.input[:]
for full_input_name in input_before_removal:
input_name = re.sub(r"^\^", "", full_input_name)
if input_name in names_to_splice:
new_node.input.append(names_to_splice[input_name])
else:
new_node.input.append(full_input_name)
nodes_after_splicing.append(new_node)
output_graph = tf.GraphDef()
output_graph.node.extend(nodes_after_splicing)
return output_graph
def set_input_graph(self, new_input_graph):
self.input_graph = new_input_graph
self.nodes_map = self.create_nodes_map(self.input_graph)
def main(unused_args):
if not tf.gfile.Exists(FLAGS.input):
print("Input graph file '" + FLAGS.input + "' does not exist!")
return -1
known_modes = ["round", "quantize", "eightbit", "weights", "test",
"weights_rounded"]
if not any(FLAGS.mode in s for s in known_modes):
print("mode is '" + FLAGS.mode + "', not in " + ", ".join(known_modes) +
".")
return -1
tf_graph = tf.GraphDef()
with tf.gfile.Open(FLAGS.input, "r") as f:
data = f.read()
tf_graph.ParseFromString(data)
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(tf_graph, input_map={}, name="")
rewriter = GraphRewriter(tf_graph, FLAGS.mode)
output_graph = rewriter.rewrite(FLAGS.output_node_names.split(","))
f = tf.gfile.FastGFile(FLAGS.output, "w")
f.write(output_graph.SerializeToString())
return 0
if __name__ == "__main__":
tf.app.run()
|
TakayukiSakai/tensorflow
|
tensorflow/contrib/quantization/tools/quantize_graph.py
|
Python
|
apache-2.0
| 46,171
|
from datetime import datetime, date
from time import mktime, gmtime
def jp_author_uri_old(forename, surname):
"""Construct the URI of an author so that it will reduce correctly most of the time."""
first_word = forename.replace('.','')
second_word = surname.replace('.','')
if len(second_word) < 3:
base = second_word[0].lower() + '.' + first_word.lower()
else:
base = first_word[0].lower() + '.' + second_word.lower()
return "author/" + asciiChars(base,'')
def jp_author_uri(forename, surname):
"""Construct the URI of an author so that it will reduce correctly most of the time."""
return jp_author_uri_2(forename + " " + surname)
def jp_author_uri_2(name):
normalized = jp_author_name_normalized(name)
return "author/" + normalized.replace(' ','').lower()
def jp_author_name_normalized(name):
"""Construct the author name as P. Szekely."""
clean = name.replace('.',' ').replace(',',' ').replace(';', ' ')
clean = asciiChars(clean, '')
names = re.sub(r'\s+', ' ', clean.strip()).split(' ');
last_word = names[-1]
if len(last_word) == 1:
# The last word is an initial, so we accumulate all words before it that are not initials
# that will be our last name
i = 0;
index = -1 # index of last word that is not an initial
for n in names:
if len(n)>1:
index = i
else:
names[i] = n + '.'
i = i + 1;
if index == -1 or index == len(names) - 1:
return ' '.join(names).title();
last = names[index]
first = ' '.join(names[0:index]) + ' '.join(names[index + 1:])
return (first + ' ' + last).title()
else:
i = 0
for n in names:
if len(n) == 1:
names[i] = n + '.'
elif i < len(names) - 1:
names[i] = n[0] + '.'
i = i + 1
return ' '.join(names).title();
def jp_author_name_normalized_old(name):
"""Construct the author name as P. Szekely."""
names = name.replace('.','').split(' ');
first_word = names[0];
last_word = names[-1]
if len(last_word) == 1:
first = last_word;
last = first_word;
else:
first = first_word;
last = last_word;
# make the first name be the initial only.
if len(first) > 1:
first = first[0];
if len(first) == 1:
first = first + '.';
names[0] = first;
names[-1] = last;
return ' '.join(names).title();
def jp_author_name(forename, surname):
"""Construct the name of a person as a single string."""
if len(forename) == 1:
forename = forename+'.'
return forename+" "+surname
def jp_article_uri(filename):
"""Construct the URI for an article using the file name"""
i = filename.rfind('.')
just_name = alphaNumeric(filename[:i],'_')
return 'article/jpl/'+just_name
def jp_clean_date(dateString):
return getYearFromISODate(iso8601date(dateString,"%B %Y"))
def jp_clean_year(string, format):
"""Parse the date and return the year."""
return getYearFromISODate(iso8601date(string,format))
def jp_clean_year_best_effort(string):
"""Try to parse the string as a date and return the year"""
d = jp_clean_year(string, "%Y")
if d:
return d;
d = jp_clean_year(string, "%Y-%m")
if d:
return d;
return ''
|
usc-isi-i2/dig-alignment
|
versions/2.0/karma/python/jpl-pdf.py
|
Python
|
apache-2.0
| 3,063
|
import pytest
from share.change import ChangeGraph
from share.models import ChangeSet
from share.disambiguation import GraphDisambiguator
from tests.share.normalize.factories import *
class TestPruneChangeGraph:
@pytest.mark.parametrize('input', [
[Preprint(0, identifiers=[WorkIdentifier(1)])]
])
def test_no_change(self, Graph, input):
graph = ChangeGraph(Graph(*input))
GraphDisambiguator().prune(graph)
result = [n.serialize() for n in graph.nodes]
assert result == Graph(*input)
@pytest.mark.parametrize('input, output', [
([
Preprint(0, identifiers=[WorkIdentifier(id=1, uri='http://osf.io/guidguid')]),
CreativeWork(id=1, sparse=True, identifiers=[WorkIdentifier(uri='http://osf.io/guidguid')])
], [
Preprint(0, identifiers=[WorkIdentifier(uri='http://osf.io/guidguid')]),
]),
([
Preprint(0, identifiers=[
WorkIdentifier(uri='http://osf.io/guidguid'),
WorkIdentifier(4)
]),
CreativeWork(id=1, sparse=True, identifiers=[WorkIdentifier(uri='http://osf.io/guidguid')])
], [
Preprint(0, identifiers=[
WorkIdentifier(uri='http://osf.io/guidguid'),
WorkIdentifier(4)
]),
])
])
def test_prune(self, Graph, input, output):
graph = ChangeGraph(Graph(*input))
GraphDisambiguator().prune(graph)
result = [n.serialize() for n in graph.nodes]
assert result == Graph(*output)
@pytest.mark.django_db
@pytest.mark.parametrize('input', [
[
Preprint(identifiers=[WorkIdentifier()])
],
[
Preprint(identifiers=[
WorkIdentifier(),
WorkIdentifier()
])
],
[
Article(
identifiers=[WorkIdentifier()],
agent_relations=[
Creator(agent=Person()),
Creator(agent=Person()),
Publisher(agent=Organization())
],
tags=[Tag(), Tag()]
)
],
])
def test_all_disambiguate(self, input, Graph, normalized_data_id):
graph = ChangeGraph(Graph(*input))
ChangeSet.objects.from_graph(graph, normalized_data_id).accept()
assert all(n.instance is None for n in graph.nodes)
GraphDisambiguator().find_instances(graph)
assert all(n.instance for n in graph.nodes)
assert all(n.instance._meta.model_name == n.type for n in graph.nodes)
|
zamattiac/SHARE
|
tests/share/disambiguation/test_prune.py
|
Python
|
apache-2.0
| 2,629
|
import unittest
import sys
import os
sys.path.append('roomfinder_spark/roomfinder_spark')
import spark_bot
class FlaskTestCase(unittest.TestCase):
def setUp(self):
sys.stderr.write('Setup testing.\n')
#web_server.data_server = os.getenv("roomfinder_data_server")
#web_server.book_url = os.getenv("roomfinder_book_server")
spark_bot.app.config['TESTING'] = True
self.app = spark_bot.app.test_client()
def test_correct_http_response(self):
sys.stderr.write('Test HTTP GET /demoroom/members == 200.\n')
resp = self.app.get('/demoroom/members')
self.assertEquals(resp.status_code, 200)
#def test_about_correct_http_response(self):
# sys.stderr.write('Test HTTP GET /about == 200.\n')
# resp = self.app.get('/about')
# self.assertEquals(resp.status_code, 200)
#def test_form_correct_http_response(self):
# sys.stderr.write('Test HTTP GET /form == 200.\n')
# resp = self.app.get('/form')
# self.assertEquals(resp.status_code, 200)
# def test_correct_content(self):
# resp = self.app.get('/hello/world')
# self.assertEquals(resp.data, '"Hello World!"\n')
# def test_universe_correct_content(self):
# resp = self.app.get('/hello/universe')
# self.assertEquals(resp.data, '"Hello Universe!"\n')
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
GuillaumeMorini/roomfinder
|
testing.py
|
Python
|
apache-2.0
| 1,440
|
#!/usr/bin/python
import sys
import os
if len(sys.argv) >= 4 :
ref_gpd_filename = sys.argv[1]
tag_gpd_filename = sys.argv[2]
output_filename_suffix = sys.argv[3]
else:
print("usage:ref_gpd_filename tag_gpd_filename output_filename_suffix")
print("or ./")
sys.exit(1)
def GetPathAndName(pathfilename):
ls=pathfilename.split('/')
filename=ls[-1]
path='/'.join(ls[0:-1])
if path == "":
path = "."
path = path +'/'
return path, filename
output_filename_suffix_path, output_filename_suffix_filename = GetPathAndName(output_filename_suffix)
novel_output_filename = output_filename_suffix_path + "negative_" + output_filename_suffix_filename
known_output_filename = output_filename_suffix_path + "maypositive_" + output_filename_suffix_filename
################################################################################
print "loading reference annotation", ref_gpd_filename
ref=open(ref_gpd_filename,'r')
ref_junlk_dt={}
for refline in ref:
refline_list=refline.split()
exon_start_list=refline_list[9].strip(',').split(',')
exon_start_list=exon_start_list[1:]
exon_end_list=refline_list[10].strip(',').split(',')
exon_end_list=exon_end_list[:-1]
if not ref_junlk_dt.has_key(refline_list[2]):
ref_junlk_dt[refline_list[2]]={}
i=1
while i < len(exon_end_list):
start = exon_end_list[i]
end=exon_start_list[i]
next_start = exon_end_list[i]
next_end = exon_start_list[i]
junlk_str = str(start) + "_" + str(end) + "_" + str(next_start) + "_" + str(next_end)
if not ref_junlk_dt[refline_list[2]].has_key(junlk_str):
ref_junlk_dt[refline_list[2]][junlk_str] = 0
ref_junlk_dt[refline_list[2]][junlk_str] += 1
i+=1
ref.close()
################################################################################
def check_novelity(ref_junlk_dt,chr_name,exon_start_list,exon_end_list):
# 0 is novel, at least one of the twojun_lk is novel
# 1 is known, all twojun_lk is known
i=1
while i < len(exon_end_list):
start = exon_end_list[i]
end=exon_start_list[i]
next_start = exon_end_list[i]
next_end = exon_start_list[i]
junlk_str = str(start) + "_" + str(end) + "_" + str(next_start) + "_" + str(next_end)
if not ref_junlk_dt[chr_name].has_key(junlk_str):
return 0
i+=1
return 1
################################################################################
novel_output = open(novel_output_filename,'w')
known_output = open(known_output_filename,'w')
print "loading target annotation", tag_gpd_filename
tag = open(tag_gpd_filename,'r')
for line in tag:
line_list=line.split()
exon_start_list=line_list[9].strip(',').split(',')
exon_start_list=exon_start_list[1:]
exon_end_list=line_list[10].strip(',').split(',')
exon_end_list=exon_end_list[:-1]
if len(exon_end_list) == 1:
print "single-junction transcript:", line.strip()
continue
elif len(exon_end_list) == 0:
print "single-exon transcript:", line.strip()
continue
if not ref_junlk_dt.has_key(line_list[2]):
print "no reference chromosome", line_list[2]
continue #novel
I = check_novelity(ref_junlk_dt,line_list[2],exon_start_list,exon_end_list)
if I == 0:
novel_output.write(line)
elif I == 1:
known_output.write(line)
tag.close()
novel_output.close()
known_output.close()
|
jason-weirather/IDP
|
bin/consective_junlk_filter.py
|
Python
|
apache-2.0
| 3,515
|
#!/usr/bin/env vpython
# Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from __future__ import print_function
import json
import sys
from google.protobuf import json_format as jsonpb
import test_env
def add_repo_with_basic_upstream_dependency(deps):
"""Does:
Create `upstream` repo with `up_mod` module, containing a single method
`cool_step`.
Make the main repo depend on this module, and use the module for a recipe
`my_recipe`.
Run simulation training for the main repo, and commit the result.
"""
upstream = deps.add_repo('upstream')
# Set up a recipe in main_repo depending on a module in upstream
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def cool_method(self):
self.m.step('upstream step', ['echo', 'whats up'])
''')
up_commit = upstream.commit('add "up_mod"')
# Now use the upstream module in main_repo
with deps.main_repo.edit_recipes_cfg_pb2() as pkg_pb:
pkg_pb.deps['upstream'].revision = up_commit.revision
with deps.main_repo.write_recipe('my_recipe') as recipe:
recipe.DEPS = ['upstream/up_mod']
recipe.RunSteps.write('''
api.up_mod.cool_method()
''')
deps.main_repo.recipes_py('test', 'train')
deps.main_repo.commit('depend on upstream/up_mod')
class AutorollSmokeTest(test_env.RecipeEngineUnitTest):
def run_roll(self, deps, *args):
"""Runs the autoroll command and returns JSON.
Does not commit the resulting roll.
"""
outfile = self.tempfile()
output, retcode = deps.main_repo.recipes_py(
'-v', '-v', 'autoroll', '--verbose-json', '--output-json',
outfile, *args
)
if retcode != 0:
print(output, file=sys.stdout)
raise Exception('Roll failed')
with open(outfile) as fil:
return json.load(fil)
def test_empty(self):
"""Tests the scenario where there are no roll candidates."""
deps = self.FakeRecipeDeps()
roll_result = self.run_roll(deps)
self.assertTrue(roll_result['success'])
self.assertEqual([], roll_result['roll_details'])
self.assertEqual([], roll_result['rejected_candidate_specs'])
def test_trivial(self):
"""Tests the simplest trivial (i.e. no expectation changes) roll scenario.
"""
# prep
deps = self.FakeRecipeDeps()
upstream = deps.add_repo('upstream')
with upstream.write_file('some_file') as buf:
buf.write('hi!')
upstream_commit = upstream.commit('c1')
# test
spec = deps.main_repo.recipes_cfg_pb2
roll_result = self.run_roll(deps)
self.assertTrue(roll_result['success'])
self.assertTrue(roll_result['trivial'])
spec.deps['upstream'].revision = upstream_commit.revision
expected_picked_roll = {
'commit_infos': {
'upstream': [
upstream_commit.as_roll_info(),
],
},
'spec': jsonpb.MessageToDict(spec, preserving_proto_field_name=True),
}
self.assertEqual(expected_picked_roll['commit_infos'],
roll_result['picked_roll_details']['commit_infos'])
self.assertEqual(expected_picked_roll['spec'],
roll_result['picked_roll_details']['spec'])
self.assertEqual(
0, roll_result['picked_roll_details']['recipes_simulation_test']['rc'])
def test_nontrivial(self):
"""Tests the simplest nontrivial (i.e. expectation changes) roll scenario.
"""
deps = self.FakeRecipeDeps()
add_repo_with_basic_upstream_dependency(deps)
upstream = deps.repos['upstream']
spec = deps.main_repo.recipes_cfg_pb2
# Change implementation of up_mod in a way that's compatible, but changes
# expectations.
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def cool_method(self):
self.m.step('upstream step', ['echo', 'whats down'])
''')
up_commit = upstream.commit('change "up_mod"')
# Roll again, and we can see the non-trivial roll now.
roll_result = self.run_roll(deps)
self.assertTrue(roll_result['success'])
self.assertFalse(roll_result['trivial'])
spec.deps['upstream'].revision = up_commit.revision
expected_picked_roll = {
'commit_infos': {
'upstream': [
up_commit.as_roll_info()
],
},
'spec': jsonpb.MessageToDict(spec, preserving_proto_field_name=True),
}
picked_roll = roll_result['picked_roll_details']
self.assertEqual(expected_picked_roll['commit_infos'],
picked_roll['commit_infos'])
self.assertEqual(expected_picked_roll['spec'],
picked_roll['spec'])
self.assertEqual(
1, picked_roll['recipes_simulation_test']['rc'])
self.assertEqual(
0, picked_roll['recipes_simulation_test_train']['rc'])
def test_failure(self):
"""Tests the simplest scenario where an automated roll is not possible
because of incompatible API changes.
"""
deps = self.FakeRecipeDeps()
add_repo_with_basic_upstream_dependency(deps)
upstream = deps.repos['upstream']
# Change API of the recipe module in a totally incompatible way.
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def uncool_method(self):
self.m.step('upstream step', ['echo', 'whats up'])
''')
upstream.commit('add incompatibility')
# watch our roll fail
roll_result = self.run_roll(deps)
self.assertFalse(roll_result['success'])
def test_jump_over_failure(self):
"""Tests whether the roller considers pulling more commits to make
the roll succeed, when earlier ones have incompatible API changes
fixed later.
"""
deps = self.FakeRecipeDeps()
add_repo_with_basic_upstream_dependency(deps)
upstream = deps.repos['upstream']
spec = deps.main_repo.recipes_cfg_pb2
# Change API of the recipe module in an incompatible way.
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def uncool_method(self):
self.m.step('upstream step', ['echo', 'whats up'])
''')
middle_commit = upstream.commit('add incompatibility')
# Restore compatibility, but change expectations.
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def cool_method(self):
self.m.step('upstream step', ['echo', 'whats down'])
''')
final_commit = upstream.commit('restore similar method')
roll_result = self.run_roll(deps)
self.assertTrue(roll_result['success'])
self.assertFalse(roll_result['trivial'])
spec.deps['upstream'].revision = final_commit.revision
expected_picked_roll = {
'commit_infos': {
'upstream': [
middle_commit.as_roll_info(),
final_commit.as_roll_info(),
],
},
'spec': jsonpb.MessageToDict(spec, preserving_proto_field_name=True),
}
picked_roll = roll_result['picked_roll_details']
self.assertEqual(expected_picked_roll['commit_infos'],
picked_roll['commit_infos'])
self.assertEqual(expected_picked_roll['spec'],
picked_roll['spec'])
self.assertEqual(
1, picked_roll['recipes_simulation_test']['rc'])
self.assertEqual(
0, picked_roll['recipes_simulation_test_train']['rc'])
def test_pick_smallest_nontrivial_roll(self):
"""Test that with several nontrivial rolls possible, the minimal one
is picked.
"""
deps = self.FakeRecipeDeps()
add_repo_with_basic_upstream_dependency(deps)
upstream = deps.repos['upstream']
spec = deps.main_repo.recipes_cfg_pb2
# Change API of the recipe module in an incompatible way.
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def uncool_method(self):
self.m.step('upstream step', ['echo', 'whats up'])
''')
middle_commit = upstream.commit('add incompatibility')
# Restore compatibility, but change expectations.
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def cool_method(self):
self.m.step('upstream step', ['echo', 'whats down'])
''')
final_commit = upstream.commit('restore similar method')
# Create another change that would result in a nontrivial roll,
# which should not be picked - nontrivial rolls should be minimal.
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def cool_method(self):
self.m.step('upstream step', ['echo', 'whats superdown'])
''')
upstream.commit('second nontrivial change')
roll_result = self.run_roll(deps)
self.assertTrue(roll_result['success'])
self.assertFalse(roll_result['trivial'])
spec.deps['upstream'].revision = final_commit.revision
expected_picked_roll = {
'commit_infos': {
'upstream': [
middle_commit.as_roll_info(),
final_commit.as_roll_info(),
],
},
'spec': jsonpb.MessageToDict(spec, preserving_proto_field_name=True),
}
picked_roll = roll_result['picked_roll_details']
self.assertEqual(expected_picked_roll['commit_infos'],
picked_roll['commit_infos'])
self.assertEqual(expected_picked_roll['spec'],
picked_roll['spec'])
self.assertEqual(
1, picked_roll['recipes_simulation_test']['rc'])
self.assertEqual(
0, picked_roll['recipes_simulation_test_train']['rc'])
def test_pick_largest_trivial_roll(self):
"""Test that with several trivial rolls possible, the largest one is picked.
This helps avoid noise with several rolls where one is sufficient,
with no expectation changes.
"""
deps = self.FakeRecipeDeps()
add_repo_with_basic_upstream_dependency(deps)
upstream = deps.repos['upstream']
spec = deps.main_repo.recipes_cfg_pb2
# Change API of the recipe module in an incompatible way.
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def uncool_method(self):
self.m.step('upstream step', ['echo', 'whats up'])
''')
first_commit = upstream.commit('add incompatibility')
# Restore compatibility, but change expectations.
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def cool_method(self):
self.m.step('upstream step', ['echo', 'whats down'])
''')
second_commit = upstream.commit('restore similar method')
# Create another change that would result in a nontrivial roll,
# which should not be picked - nontrivial rolls should be minimal.
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def cool_method(self):
self.m.step('upstream step', ['echo', 'whats superdown'])
''')
third_commit = upstream.commit('second nontrivial change')
# Introduce another commit which makes the roll trivial again.
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def cool_method(self):
self.m.step('upstream step', ['echo', 'whats up'])
''')
final_commit = upstream.commit('restore original behavior')
roll_result = self.run_roll(deps)
self.assertTrue(roll_result['success'])
self.assertTrue(roll_result['trivial'])
spec.deps['upstream'].revision = final_commit.revision
expected_picked_roll = {
'commit_infos': {
'upstream': [
first_commit.as_roll_info(),
second_commit.as_roll_info(),
third_commit.as_roll_info(),
final_commit.as_roll_info(),
],
},
'spec': jsonpb.MessageToDict(spec, preserving_proto_field_name=True),
}
picked_roll = roll_result['picked_roll_details']
self.assertEqual(expected_picked_roll['commit_infos'],
picked_roll['commit_infos'])
self.assertEqual(expected_picked_roll['spec'],
picked_roll['spec'])
self.assertEqual(
0, picked_roll['recipes_simulation_test']['rc'])
def test_find_minimal_candidate(self):
"""Tests that the roller can automatically find a viable minimal
roll candidate, in a scenario where previous roll algorithm
was getting stuck.
"""
deps = self.FakeRecipeDeps()
upstream = deps.add_repo('upstream')
super_upstream = deps.add_repo('super_upstream')
spec = deps.main_repo.recipes_cfg_pb2
# Now make upstream depend on super_upstream, then roll that into the main
# repo.
upstream.add_dep('super_upstream')
super_commit = upstream.commit('add dep on super_upstream')
with deps.main_repo.edit_recipes_cfg_pb2() as pkg_pb:
pkg_pb.deps['upstream'].revision = super_commit.revision
deps.main_repo.commit('roll upstream')
# Set up a recipe in the main repo depending on a module in upstream.
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def cool_method(self):
self.m.step('upstream step', ['echo', 'whats up'])
''')
up_commit = upstream.commit('add up_mod')
with deps.main_repo.edit_recipes_cfg_pb2() as pkg_pb:
pkg_pb.deps['upstream'].revision = up_commit.revision
with deps.main_repo.write_recipe('my_recipe') as recipe:
recipe.DEPS = ['upstream/up_mod']
recipe.RunSteps.write('''
api.up_mod.cool_method()
''')
deps.main_repo.recipes_py('test', 'train')
deps.main_repo.commit('depend on upstream/up_mod')
# Create a new commit in super_uptsream repo and roll it into upstream.
super_commit = super_upstream.commit('trivial commit')
with upstream.edit_recipes_cfg_pb2() as pkg_pb:
pkg_pb.deps['super_upstream'].revision = super_commit.revision
super_roll = upstream.commit('roll super_upstream')
# Change API of the upstream module in an incompatible way.
with upstream.write_module('up_mod') as mod:
mod.api.write('''
def uncool_method(self):
self.m.step('upstream step', ['echo', 'whats up'])
''')
up_commit = upstream.commit('incompatible up_mod')
roll_result = self.run_roll(deps)
self.assertTrue(roll_result['success'])
self.assertTrue(roll_result['trivial'])
spec.deps['super_upstream'].revision = super_commit.revision
spec.deps['upstream'].revision = super_roll.revision
expected_picked_roll = {
'commit_infos': {
'upstream': [super_roll.as_roll_info()],
'super_upstream': [super_commit.as_roll_info()],
},
'spec': jsonpb.MessageToDict(spec, preserving_proto_field_name=True),
}
picked_roll = roll_result['picked_roll_details']
self.assertEqual(expected_picked_roll['commit_infos'],
picked_roll['commit_infos'])
self.assertEqual(expected_picked_roll['spec'],
picked_roll['spec'])
self.assertEqual(
0, picked_roll['recipes_simulation_test']['rc'])
def test_no_backwards_roll(self):
"""Tests that we never roll backwards."""
deps = self.FakeRecipeDeps()
upstream = deps.add_repo('upstream')
super_upstream = deps.add_repo('super_upstream')
original_super_commit = super_upstream.backend.commit_metadata('HEAD')
upstream.add_dep('super_upstream')
upstream.commit('add dep on super_upstream')
# Create a new commit in super_upstream repo and roll it to upstream.
super_commit = super_upstream.commit('trivial commit')
with upstream.edit_recipes_cfg_pb2() as pkg_pb:
pkg_pb.deps['super_upstream'].revision = super_commit.revision
up_commit = upstream.commit('roll')
# Roll above commits to main_repo.
with deps.main_repo.edit_recipes_cfg_pb2() as pkg_pb:
pkg_pb.deps['upstream'].revision = up_commit.revision
pkg_pb.deps['super_upstream'].revision = super_commit.revision
deps.main_repo.commit('roll upstream+super_upstream')
spec = deps.main_repo.recipes_cfg_pb2
# Create a new commit in upstream that would result in backwards roll.
with upstream.edit_recipes_cfg_pb2() as pkg_pb:
pkg_pb.deps['super_upstream'].revision = original_super_commit.revision
up_commit = upstream.commit('backwards commit')
roll_result = self.run_roll(deps)
self.assertTrue(roll_result['success'])
self.assertEqual([], roll_result['roll_details'])
spec.deps['upstream'].revision = up_commit.revision
self.assertEqual(
roll_result['rejected_candidate_specs'],
[jsonpb.MessageToDict(spec, preserving_proto_field_name=True)],
)
def test_inconsistent_errors(self):
deps = self.FakeRecipeDeps()
upstream = deps.add_repo('upstream')
upstream_deeper = deps.add_repo('upstream_deeper')
upstream_deepest = deps.add_repo('upstream_deepest')
# Add:
# upstream_deeper -> upstream_deepest
# upstream -> upstream_deeper
# upstream -> upstream_deepest
upstream_deeper.add_dep('upstream_deepest')
upstream_deeper.commit('add dep on upstream_deepest')
upstream.add_dep('upstream_deeper', 'upstream_deepest')
upstream.commit('add dep on upstream_deepest + upstream_deeper')
# Roll all of that into main.
self.run_roll(deps)
# Create a new commit in deepest repo and roll it to deeper.
deepest_commit = upstream_deepest.commit('deep commit')
with upstream_deeper.edit_recipes_cfg_pb2() as pkg_pb:
pkg_pb.deps['upstream_deepest'].revision = deepest_commit.revision
upstream_deeper.commit('roll deepest')
# We shouldn't be able to roll upstream_deeper/upstream_deepest until
# upstream includes them. i.e. there should be no roll, because there are no
# valid roll candidates.
roll_result = self.run_roll(deps)
self.assertTrue(roll_result['success'])
self.assertEqual([], roll_result['roll_details'])
self.assertGreater(len(roll_result['rejected_candidate_specs']), 0)
def test_inconsistent_candidates_do_not_advance(self):
deps = self.FakeRecipeDeps()
upstream = deps.add_repo('upstream')
upstream_deeper = deps.add_repo('upstream_deeper')
# Add:
# upstream -> upstream_deeper
upstream.add_dep('upstream_deeper')
upstream.commit('add dep on upstream_deeper')
# Roll all of that into main.
self.run_roll(deps)
# Create 2 commits in deepest repo that are not rolled into anything
with upstream_deeper.write_module('deeper1_mod') as mod:
mod.api.write('''
def method(self):
self.m.step('deeper1 step', ['echo', 'whats up'])
''')
upstream_deeper.commit('add deeper1_mod')
with upstream_deeper.write_module('deeper2_mod') as mod:
mod.api.write('''
def method(self):
self.m.step('deeper2 step', ['echo', 'whats up'])
''')
upstream_deeper.commit('add deeper2_mod')
# Create a commit in deeper repo
with upstream.write_module('upstream_mod') as mod:
mod.api.write('''
def method(self):
self.m.step('upstream step', ['echo', 'whats up'])
''')
upstream_commit = upstream.commit('add upstream_mod')
# We can't roll either commit in upstream_deeper because they are
# inconsistent with upstream's pin for upstream_deeper, but upstream's
# commit should still be able to roll
roll_result = self.run_roll(deps)
self.assertTrue(roll_result['success'])
spec = deps.main_repo.recipes_cfg_pb2
expected_picked_roll = {
'commit_infos': {
'upstream': [upstream_commit.as_roll_info(),],
},
'spec': jsonpb.MessageToDict(spec, preserving_proto_field_name=True),
}
picked_roll = roll_result['picked_roll_details']
self.assertEqual(expected_picked_roll['commit_infos'],
picked_roll['commit_infos'])
self.assertEqual(expected_picked_roll['spec'], picked_roll['spec'])
def non_candidate_commits_are_not_considered(self):
deps = self.FakeRecipeDeps()
upstream = deps.add_repo('upstream')
# Have the upstream's recipes directory not be the root of the repo
with upstream.edit_recipes_cfg_pb2() as recipes_cfg:
recipes_cfg.recipes_path = 'recipes'
upstream.commit('set recipes dir')
# Roll that into main.
self.run_roll(deps)
# Create a non-candidate CL by adding a file in the root of the repo
with upstream.write_file('some_file') as buf:
buf.write('hi!')
non_candidate_commit = upstream.commit('non-candidate commit')
# Create a candidate CL by adding a file in the recipes dir
with upstream.write_file('recipes/some_file') as buf:
buf.write('hello again!')
candidate_commit = upstream.commit('candidate commit')
# Rolling should not create a candidate config with the non-candidate commit
roll_result = self.run_roll(deps)
self.assertTrue(roll_result['success'])
spec = deps.main_repo.recipes_cfg_pb2
expected_picked_roll = {
'commit_infos': {
'upstream': [
non_candidate_commit.as_roll_info(),
candidate_commit.as_roll_info(),
],
},
'spec': jsonpb.MessageToDict(spec, preserving_proto_field_name=True),
}
picked_roll = roll_result['picked_roll_details']
self.assertEqual(expected_picked_roll['commit_infos'],
picked_roll['commit_infos'])
self.assertEqual(expected_picked_roll['spec'], picked_roll['spec'])
self.assertEqual(len(roll_result['roll_details']), 1)
def test_roll_adds_dependency(self):
deps = self.FakeRecipeDeps()
upstream = deps.add_repo('upstream')
other = deps.add_repo('other')
with deps.main_repo.edit_recipes_cfg_pb2() as spec:
del spec.deps['other']
deps.main_repo.commit('remove other dep')
spec = deps.main_repo.recipes_cfg_pb2
roll_result = self.run_roll(deps)
self.assertTrue(roll_result['success'])
self.assertEqual(spec, deps.main_repo.recipes_cfg_pb2) # noop
# Now we add a commit to 'upstream' which pulls in 'other'.
upstream.add_dep('other')
upstream.commit('add other dep')
with upstream.write_file('trivial') as fil:
fil.write('trivial file')
up_commit = upstream.commit('add trivial file')
roll_result = self.run_roll(deps)
self.assertTrue(roll_result['success'])
spec.deps['upstream'].revision = up_commit.revision
spec.deps['other'].CopyFrom(upstream.recipes_cfg_pb2.deps['other'])
self.assertEqual(spec, deps.main_repo.recipes_cfg_pb2)
if __name__ == '__main__':
test_env.main()
|
luci/recipes-py
|
unittests/autoroll_test.py
|
Python
|
apache-2.0
| 22,416
|
from io import BytesIO
from threading import Lock
import contextlib
import itertools
import os.path
import pickle
import shutil
import tempfile
import unittest
import sys
import numpy as np
import pandas as pd
import xray
from xray import Dataset, open_dataset, open_mfdataset, backends, save_mfdataset
from xray.backends.common import robust_getitem
from xray.core.pycompat import iteritems, PY3
from . import (TestCase, requires_scipy, requires_netCDF4, requires_pydap,
requires_scipy_or_netCDF4, requires_dask, requires_h5netcdf,
has_netCDF4, has_scipy)
from .test_dataset import create_test_data
try:
import netCDF4 as nc4
except ImportError:
pass
try:
import dask
import dask.array as da
except ImportError:
pass
def open_example_dataset(name, *args, **kwargs):
return open_dataset(os.path.join(os.path.dirname(__file__), 'data', name),
*args, **kwargs)
def create_masked_and_scaled_data():
x = np.array([np.nan, np.nan, 10, 10.1, 10.2])
encoding = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1), 'dtype': 'i2'}
return Dataset({'x': ('t', x, {}, encoding)})
def create_encoded_masked_and_scaled_data():
attributes = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1)}
return Dataset({'x': ('t', [-1, -1, 0, 1, 2], attributes)})
class TestCommon(TestCase):
def test_robust_getitem(self):
class UnreliableArrayFailure(Exception):
pass
class UnreliableArray(object):
def __init__(self, array, failures=1):
self.array = array
self.failures = failures
def __getitem__(self, key):
if self.failures > 0:
self.failures -= 1
raise UnreliableArrayFailure
return self.array[key]
array = UnreliableArray([0])
with self.assertRaises(UnreliableArrayFailure):
array[0]
self.assertEqual(array[0], 0)
actual = robust_getitem(array, 0, catch=UnreliableArrayFailure,
initial_delay=0)
self.assertEqual(actual, 0)
class Only32BitTypes(object):
pass
class DatasetIOTestCases(object):
def create_store(self):
raise NotImplementedError
def roundtrip(self, data, **kwargs):
raise NotImplementedError
def test_zero_dimensional_variable(self):
expected = create_test_data()
expected['float_var'] = ([], 1.0e9, {'units': 'units of awesome'})
expected['string_var'] = ([], np.array('foobar', dtype='S'))
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_write_store(self):
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
# we need to cf decode the store because it has time and
# non-dimension coordinates
actual = xray.decode_cf(store)
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_test_data(self):
expected = create_test_data()
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_load(self):
expected = create_test_data()
@contextlib.contextmanager
def assert_loads(vars=None):
if vars is None:
vars = expected
with self.roundtrip(expected) as actual:
for v in actual.values():
self.assertFalse(v._in_memory)
yield actual
for k, v in actual.items():
if k in vars:
self.assertTrue(v._in_memory)
self.assertDatasetAllClose(expected, actual)
with self.assertRaises(AssertionError):
# make sure the contextmanager works!
with assert_loads() as ds:
pass
with assert_loads() as ds:
ds.load()
with assert_loads(['var1', 'dim1', 'dim2']) as ds:
ds['var1'].load()
# verify we can read data even after closing the file
with self.roundtrip(expected) as ds:
actual = ds.load()
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_None_variable(self):
expected = Dataset({None: (('x', 'y'), [[0, 1], [2, 3]])})
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_object_dtype(self):
floats = np.array([0.0, 0.0, 1.0, 2.0, 3.0], dtype=object)
floats_nans = np.array([np.nan, np.nan, 1.0, 2.0, 3.0], dtype=object)
letters = np.array(['ab', 'cdef', 'g'], dtype=object)
letters_nans = np.array(['ab', 'cdef', np.nan], dtype=object)
all_nans = np.array([np.nan, np.nan], dtype=object)
original = Dataset({'floats': ('a', floats),
'floats_nans': ('a', floats_nans),
'letters': ('b', letters),
'letters_nans': ('b', letters_nans),
'all_nans': ('c', all_nans),
'nan': ([], np.nan)})
expected = original.copy(deep=True)
if isinstance(self, Only32BitTypes):
# for netCDF3 tests, expect the results to come back as characters
expected['letters_nans'] = expected['letters_nans'].astype('S')
expected['letters'] = expected['letters'].astype('S')
with self.roundtrip(original) as actual:
try:
self.assertDatasetIdentical(expected, actual)
except AssertionError:
# Most stores use '' for nans in strings, but some don't
# first try the ideal case (where the store returns exactly)
# the original Dataset), then try a more realistic case.
# ScipyDataTest, NetCDF3ViaNetCDF4DataTest and NetCDF4DataTest
# all end up using this case.
expected['letters_nans'][-1] = ''
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_string_data(self):
expected = Dataset({'x': ('t', ['ab', 'cdef'])})
with self.roundtrip(expected) as actual:
if isinstance(self, Only32BitTypes):
expected['x'] = expected['x'].astype('S')
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_datetime_data(self):
times = pd.to_datetime(['2000-01-01', '2000-01-02', 'NaT'])
expected = Dataset({'t': ('t', times), 't0': times[0]})
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_timedelta_data(self):
time_deltas = pd.to_timedelta(['1h', '2h', 'NaT'])
expected = Dataset({'td': ('td', time_deltas), 'td0': time_deltas[0]})
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_example_1_netcdf(self):
expected = open_example_dataset('example_1.nc')
with self.roundtrip(expected) as actual:
# we allow the attributes to differ since that
# will depend on the encoding used. For example,
# without CF encoding 'actual' will end up with
# a dtype attribute.
self.assertDatasetEqual(expected, actual)
def test_roundtrip_coordinates(self):
original = Dataset({'foo': ('x', [0, 1])},
{'x': [2, 3], 'y': ('a', [42]), 'z': ('x', [4, 5])})
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(original, actual)
expected = original.drop('foo')
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
expected = original.copy()
expected.attrs['coordinates'] = 'something random'
with self.assertRaisesRegexp(ValueError, 'cannot serialize'):
with self.roundtrip(expected):
pass
expected = original.copy(deep=True)
expected['foo'].attrs['coordinates'] = 'something random'
with self.assertRaisesRegexp(ValueError, 'cannot serialize'):
with self.roundtrip(expected):
pass
def test_orthogonal_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
indexers = {'dim1': np.arange(3), 'dim2': np.arange(4),
'dim3': np.arange(5)}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
self.assertDatasetAllClose(expected, actual)
# do it twice, to make sure we're switched from orthogonal -> numpy
# when we cached the values
actual = on_disk.isel(**indexers)
self.assertDatasetAllClose(expected, actual)
def test_pickle(self):
on_disk = open_example_dataset('bears.nc')
unpickled = pickle.loads(pickle.dumps(on_disk))
self.assertDatasetIdentical(on_disk, unpickled)
class CFEncodedDataTest(DatasetIOTestCases):
def test_roundtrip_strings_with_fill_value(self):
values = np.array(['ab', 'cdef', np.nan], dtype=object)
encoding = {'_FillValue': np.string_('X'), 'dtype': np.dtype('S1')}
original = Dataset({'x': ('t', values, {}, encoding)})
expected = original.copy(deep=True)
expected['x'][:2] = values[:2].astype('S')
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(expected, actual)
original = Dataset({'x': ('t', values, {}, {'_FillValue': '\x00'})})
if not isinstance(self, Only32BitTypes):
# these stores can save unicode strings
expected = original.copy(deep=True)
if isinstance(self, BaseNetCDF4Test):
# netCDF4 can't keep track of an empty _FillValue for VLEN
# variables
expected['x'][-1] = ''
elif (type(self) is NetCDF3ViaNetCDF4DataTest
or (has_netCDF4 and type(self) is GenericNetCDFDataTest)):
# netCDF4 can't keep track of an empty _FillValue for nc3, either:
# https://github.com/Unidata/netcdf4-python/issues/273
expected['x'][-1] = np.string_('')
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_mask_and_scale(self):
decoded = create_masked_and_scaled_data()
encoded = create_encoded_masked_and_scaled_data()
with self.roundtrip(decoded) as actual:
self.assertDatasetAllClose(decoded, actual)
with self.roundtrip(decoded, decode_cf=False) as actual:
# TODO: this assumes that all roundtrips will first
# encode. Is that something we want to test for?
self.assertDatasetAllClose(encoded, actual)
with self.roundtrip(encoded, decode_cf=False) as actual:
self.assertDatasetAllClose(encoded, actual)
# make sure roundtrip encoding didn't change the
# original dataset.
self.assertDatasetIdentical(encoded,
create_encoded_masked_and_scaled_data())
with self.roundtrip(encoded) as actual:
self.assertDatasetAllClose(decoded, actual)
with self.roundtrip(encoded, decode_cf=False) as actual:
self.assertDatasetAllClose(encoded, actual)
def test_coordinates_encoding(self):
def equals_latlon(obj):
return obj == 'lat lon' or obj == 'lon lat'
original = Dataset({'temp': ('x', [0, 1]), 'precip': ('x', [0, -1])},
{'lat': ('x', [2, 3]), 'lon': ('x', [4, 5])})
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(actual, original)
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
self.assertTrue(equals_latlon(ds['temp'].attrs['coordinates']))
self.assertTrue(equals_latlon(ds['precip'].attrs['coordinates']))
self.assertNotIn('coordinates', ds.attrs)
self.assertNotIn('coordinates', ds['lat'].attrs)
self.assertNotIn('coordinates', ds['lon'].attrs)
modified = original.drop(['temp', 'precip'])
with self.roundtrip(modified) as actual:
self.assertDatasetIdentical(actual, modified)
with create_tmp_file() as tmp_file:
modified.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
self.assertTrue(equals_latlon(ds.attrs['coordinates']))
self.assertNotIn('coordinates', ds['lat'].attrs)
self.assertNotIn('coordinates', ds['lon'].attrs)
def test_roundtrip_endian(self):
ds = Dataset({'x': np.arange(3, 10, dtype='>i2'),
'y': np.arange(3, 20, dtype='<i4'),
'z': np.arange(3, 30, dtype='=i8'),
'w': ('x', np.arange(3, 10, dtype=np.float))})
with self.roundtrip(ds) as actual:
# technically these datasets are slightly different,
# one hold mixed endian data (ds) the other should be
# all big endian (actual). assertDatasetIdentical
# should still pass though.
self.assertDatasetIdentical(ds, actual)
if type(self) is NetCDF4DataTest:
ds['z'].encoding['endian'] = 'big'
with self.assertRaises(NotImplementedError):
with self.roundtrip(ds) as actual:
pass
_counter = itertools.count()
@contextlib.contextmanager
def create_tmp_file(suffix='.nc'):
temp_dir = tempfile.mkdtemp()
path = os.path.join(temp_dir, 'temp-%s.%s' % (next(_counter), suffix))
try:
yield path
finally:
shutil.rmtree(temp_dir)
class BaseNetCDF4Test(CFEncodedDataTest):
def test_open_group(self):
# Create a netCDF file with a dataset stored within a group
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as rootgrp:
foogrp = rootgrp.createGroup('foo')
ds = foogrp
ds.createDimension('time', size=10)
x = np.arange(10)
ds.createVariable('x', np.int32, dimensions=('time',))
ds.variables['x'][:] = x
expected = Dataset()
expected['x'] = ('time', x)
# check equivalent ways to specify group
for group in 'foo', '/foo', 'foo/', '/foo/':
with open_dataset(tmp_file, group=group) as actual:
self.assertVariableEqual(actual['x'], expected['x'])
# check that missing group raises appropriate exception
with self.assertRaises(IOError):
open_dataset(tmp_file, group='bar')
with self.assertRaisesRegexp(ValueError, 'must be a string'):
open_dataset(tmp_file, group=(1, 2, 3))
def test_open_subgroup(self):
# Create a netCDF file with a dataset stored within a group within a group
with create_tmp_file() as tmp_file:
rootgrp = nc4.Dataset(tmp_file, 'w')
foogrp = rootgrp.createGroup('foo')
bargrp = foogrp.createGroup('bar')
ds = bargrp
ds.createDimension('time', size=10)
x = np.arange(10)
ds.createVariable('x', np.int32, dimensions=('time',))
ds.variables['x'][:] = x
rootgrp.close()
expected = Dataset()
expected['x'] = ('time', x)
# check equivalent ways to specify group
for group in 'foo/bar', '/foo/bar', 'foo/bar/', '/foo/bar/':
with open_dataset(tmp_file, group=group) as actual:
self.assertVariableEqual(actual['x'], expected['x'])
def test_write_groups(self):
data1 = create_test_data()
data2 = data1 * 2
with create_tmp_file() as tmp_file:
data1.to_netcdf(tmp_file, group='data/1')
data2.to_netcdf(tmp_file, group='data/2', mode='a')
with open_dataset(tmp_file, group='data/1') as actual1:
self.assertDatasetIdentical(data1, actual1)
with open_dataset(tmp_file, group='data/2') as actual2:
self.assertDatasetIdentical(data2, actual2)
def test_roundtrip_character_array(self):
with create_tmp_file() as tmp_file:
values = np.array([['a', 'b', 'c'], ['d', 'e', 'f']], dtype='S')
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('x', 2)
nc.createDimension('string3', 3)
v = nc.createVariable('x', np.dtype('S1'), ('x', 'string3'))
v[:] = values
values = np.array(['abc', 'def'], dtype='S')
expected = Dataset({'x': ('x', values)})
with open_dataset(tmp_file) as actual:
self.assertDatasetIdentical(expected, actual)
# regression test for #157
with self.roundtrip(actual) as roundtripped:
self.assertDatasetIdentical(expected, roundtripped)
def test_default_to_char_arrays(self):
data = Dataset({'x': np.array(['foo', 'zzzz'], dtype='S')})
with self.roundtrip(data) as actual:
self.assertDatasetIdentical(data, actual)
self.assertEqual(actual['x'].dtype, np.dtype('S4'))
def test_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
expected = Dataset()
time = pd.date_range('1999-01-05', periods=10)
encoding = {'units': units, 'dtype': np.dtype('int32')}
expected['time'] = ('time', time, {}, encoding)
with open_dataset(tmp_file) as actual:
self.assertVariableEqual(actual['time'], expected['time'])
actual_encoding = dict((k, v) for k, v in iteritems(actual['time'].encoding)
if k in expected['time'].encoding)
self.assertDictEqual(actual_encoding, expected['time'].encoding)
def test_dump_and_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
with open_dataset(tmp_file) as xray_dataset:
with create_tmp_file() as tmp_file2:
xray_dataset.to_netcdf(tmp_file2)
with nc4.Dataset(tmp_file2, 'r') as ds:
self.assertEqual(ds.variables['time'].getncattr('units'), units)
self.assertArrayEqual(ds.variables['time'], np.arange(10) + 4)
def test_compression_encoding(self):
data = create_test_data()
data['var2'].encoding.update({'zlib': True,
'chunksizes': (5, 5),
'fletcher32': True})
with self.roundtrip(data) as actual:
for k, v in iteritems(data['var2'].encoding):
self.assertEqual(v, actual['var2'].encoding[k])
# regression test for #156
expected = data.isel(dim1=0)
with self.roundtrip(expected) as actual:
self.assertDatasetEqual(expected, actual)
def test_mask_and_scale(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('t', 5)
nc.createVariable('x', 'int16', ('t',), fill_value=-1)
v = nc.variables['x']
v.set_auto_maskandscale(False)
v.add_offset = 10
v.scale_factor = 0.1
v[:] = np.array([-1, -1, 0, 1, 2])
# first make sure netCDF4 reads the masked and scaled data correctly
with nc4.Dataset(tmp_file, mode='r') as nc:
expected = np.ma.array([-1, -1, 10, 10.1, 10.2],
mask=[True, True, False, False, False])
actual = nc.variables['x'][:]
self.assertArrayEqual(expected, actual)
# now check xray
with open_dataset(tmp_file) as ds:
expected = create_masked_and_scaled_data()
self.assertDatasetIdentical(expected, ds)
def test_0dimensional_variable(self):
# This fix verifies our work-around to this netCDF4-python bug:
# https://github.com/Unidata/netcdf4-python/pull/220
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
v = nc.createVariable('x', 'int16')
v[...] = 123
with open_dataset(tmp_file) as ds:
expected = Dataset({'x': ((), 123)})
self.assertDatasetIdentical(expected, ds)
def test_variable_len_strings(self):
with create_tmp_file() as tmp_file:
values = np.array(['foo', 'bar', 'baz'], dtype=object)
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('x', 3)
v = nc.createVariable('x', str, ('x',))
v[:] = values
expected = Dataset({'x': ('x', values)})
for kwargs in [{}, {'decode_cf': True}]:
with open_dataset(tmp_file, **kwargs) as actual:
self.assertDatasetIdentical(expected, actual)
@requires_netCDF4
class NetCDF4DataTest(BaseNetCDF4Test, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore(tmp_file, mode='w') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file)
with open_dataset(tmp_file, **kwargs) as ds:
yield ds
def test_variable_order(self):
# doesn't work with scipy or h5py :(
ds = Dataset()
ds['a'] = 1
ds['z'] = 2
ds['b'] = 3
ds.coords['c'] = 4
with self.roundtrip(ds) as actual:
self.assertEqual(list(ds), list(actual))
@requires_netCDF4
@requires_dask
class NetCDF4ViaDaskDataTest(NetCDF4DataTest):
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file)
with open_dataset(tmp_file, **kwargs) as ds:
yield ds.chunk()
@requires_scipy
class ScipyInMemoryDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
fobj = BytesIO()
yield backends.ScipyDataStore(fobj, 'w')
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
serialized = data.to_netcdf()
with open_dataset(BytesIO(serialized), **kwargs) as ds:
yield ds
@requires_scipy
class ScipyOnDiskDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.ScipyDataStore(tmp_file, mode='w') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine='scipy')
with open_dataset(tmp_file, engine='scipy', **kwargs) as ds:
yield ds
def test_array_attrs(self):
ds = Dataset(attrs={'foo': [[1, 2], [3, 4]]})
with self.assertRaisesRegexp(ValueError, 'must be 1-dimensional'):
with self.roundtrip(ds) as roundtripped:
pass
def test_roundtrip_example_1_netcdf_gz(self):
if sys.version_info[:2] < (2, 7):
with self.assertRaisesRegexp(ValueError,
'gzipped netCDF not supported'):
open_example_dataset('example_1.nc.gz')
else:
with open_example_dataset('example_1.nc.gz') as expected:
with open_example_dataset('example_1.nc') as actual:
self.assertDatasetIdentical(expected, actual)
def test_netcdf3_endianness(self):
# regression test for GH416
expected = open_example_dataset('bears.nc', engine='scipy')
for var in expected.values():
self.assertTrue(var.dtype.isnative)
@requires_netCDF4
class NetCDF3ViaNetCDF4DataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore(tmp_file, mode='w',
format='NETCDF3_CLASSIC') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format='NETCDF3_CLASSIC',
engine='netcdf4')
with open_dataset(tmp_file, engine='netcdf4', **kwargs) as ds:
yield ds
@requires_scipy_or_netCDF4
class GenericNetCDFDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
# verify that we can read and write netCDF3 files as long as we have scipy
# or netCDF4-python installed
def test_write_store(self):
# there's no specific store to test here
pass
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format='netcdf3_64bit')
with open_dataset(tmp_file, **kwargs) as ds:
yield ds
def test_engine(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'unrecognized engine'):
data.to_netcdf('foo.nc', engine='foobar')
with self.assertRaisesRegexp(ValueError, 'invalid engine'):
data.to_netcdf(engine='netcdf4')
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file)
with self.assertRaisesRegexp(ValueError, 'unrecognized engine'):
open_dataset(tmp_file, engine='foobar')
netcdf_bytes = data.to_netcdf()
with self.assertRaisesRegexp(ValueError, 'can only read'):
open_dataset(BytesIO(netcdf_bytes), engine='foobar')
def test_cross_engine_read_write_netcdf3(self):
data = create_test_data()
valid_engines = set()
if has_netCDF4:
valid_engines.add('netcdf4')
if has_scipy:
valid_engines.add('scipy')
for write_engine in valid_engines:
for format in ['NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format=format,
engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file,
engine=read_engine) as actual:
self.assertDatasetAllClose(data, actual)
@requires_h5netcdf
@requires_netCDF4
class H5NetCDFDataTest(BaseNetCDF4Test, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
yield backends.H5NetCDFStore(tmp_file, 'w')
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine='h5netcdf')
with open_dataset(tmp_file, engine='h5netcdf', **kwargs) as ds:
yield ds
def test_orthogonal_indexing(self):
# doesn't work for h5py (without using dask as an intermediate layer)
pass
def test_complex(self):
expected = Dataset({'x': ('y', np.ones(5) + 1j * np.ones(5))})
with self.roundtrip(expected) as actual:
self.assertDatasetEqual(expected, actual)
def test_cross_engine_read_write_netcdf4(self):
# Drop dim3, because its labels include strings. These appear to be
# not properly read with python-netCDF4, which converts them into
# unicode instead of leaving them as bytes.
data = create_test_data().drop('dim3')
data.attrs['foo'] = 'bar'
valid_engines = ['netcdf4', 'h5netcdf']
for write_engine in valid_engines:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file, engine=read_engine) as actual:
self.assertDatasetIdentical(data, actual)
def test_read_byte_attrs_as_unicode(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as nc:
nc.foo = b'bar'
actual = open_dataset(tmp_file)
expected = Dataset(attrs={'foo': 'bar'})
self.assertDatasetIdentical(expected, actual)
@requires_dask
@requires_scipy
@requires_netCDF4
class DaskTest(TestCase):
def test_open_mfdataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2]) as actual:
self.assertIsInstance(actual.foo.variable.data, da.Array)
self.assertEqual(actual.foo.variable.data.chunks,
((5, 5),))
self.assertDatasetAllClose(original, actual)
with open_mfdataset([tmp1, tmp2], chunks={'x': 3}) as actual:
self.assertEqual(actual.foo.variable.data.chunks,
((3, 2, 3, 2),))
with self.assertRaisesRegexp(IOError, 'no files to open'):
open_mfdataset('foo-bar-baz-*.nc')
def test_preprocess_mfdataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
preprocess = lambda ds: ds.assign_coords(z=0)
expected = preprocess(original)
with open_mfdataset(tmp, preprocess=preprocess) as actual:
self.assertDatasetIdentical(expected, actual)
def test_lock(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp, format='NETCDF3_CLASSIC')
with open_dataset(tmp, chunks=10) as ds:
task = ds.foo.data.dask[ds.foo.data.name, 0]
self.assertIsInstance(task[-1], type(Lock()))
with open_mfdataset(tmp) as ds:
task = ds.foo.data.dask[ds.foo.data.name, 0]
self.assertIsInstance(task[-1], type(Lock()))
with open_mfdataset(tmp, engine='scipy') as ds:
task = ds.foo.data.dask[ds.foo.data.name, 0]
self.assertNotIsInstance(task[-1], type(Lock()))
def test_save_mfdataset_roundtrip(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
datasets = [original.isel(x=slice(5)),
original.isel(x=slice(5, 10))]
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
save_mfdataset(datasets, [tmp1, tmp2])
with open_mfdataset([tmp1, tmp2]) as actual:
self.assertDatasetIdentical(actual, original)
def test_save_mfdataset_invalid(self):
ds = Dataset()
with self.assertRaisesRegexp(ValueError, 'cannot use mode'):
save_mfdataset([ds, ds], ['same', 'same'])
with self.assertRaisesRegexp(ValueError, 'same length'):
save_mfdataset([ds, ds], ['only one path'])
def test_open_and_do_math(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_mfdataset(tmp) as ds:
actual = 1.0 * ds
self.assertDatasetAllClose(original, actual)
def test_open_dataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_dataset(tmp, chunks={'x': 5}) as actual:
self.assertIsInstance(actual.foo.variable.data, da.Array)
self.assertEqual(actual.foo.variable.data.chunks, ((5, 5),))
self.assertDatasetIdentical(original, actual)
with open_dataset(tmp, chunks=5) as actual:
self.assertDatasetIdentical(original, actual)
with open_dataset(tmp) as actual:
self.assertIsInstance(actual.foo.variable.data, np.ndarray)
self.assertDatasetIdentical(original, actual)
def test_dask_roundtrip(self):
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
chunks = {'dim1': 4, 'dim2': 4, 'dim3': 4, 'time': 10}
with open_dataset(tmp, chunks=chunks) as dask_ds:
self.assertDatasetIdentical(data, dask_ds)
with create_tmp_file() as tmp2:
dask_ds.to_netcdf(tmp2)
with open_dataset(tmp2) as on_disk:
self.assertDatasetIdentical(data, on_disk)
@requires_scipy_or_netCDF4
@requires_pydap
class PydapTest(TestCase):
def test_cmp_local_file(self):
url = 'http://test.opendap.org/opendap/hyrax/data/nc/bears.nc'
@contextlib.contextmanager
def create_datasets():
actual = open_dataset(url, engine='pydap')
with open_example_dataset('bears.nc') as expected:
# don't check attributes since pydap doesn't serialize them
# correctly also skip the "bears" variable since the test DAP
# server incorrectly concatenates it.
actual = actual.drop('bears')
expected = expected.drop('bears')
yield actual, expected
with create_datasets() as (actual, expected):
self.assertDatasetEqual(actual, expected)
with create_datasets() as (actual, expected):
self.assertDatasetEqual(actual.isel(l=2), expected.isel(l=2))
with create_datasets() as (actual, expected):
self.assertDatasetEqual(actual.isel(i=0, j=-1),
expected.isel(i=0, j=-1))
with create_datasets() as (actual, expected):
self.assertDatasetEqual(actual.isel(j=slice(1, 2)),
expected.isel(j=slice(1, 2)))
|
markelg/xray
|
xray/test/test_backends.py
|
Python
|
apache-2.0
| 36,295
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import AssetSetAssetServiceClient
__all__ = ("AssetSetAssetServiceClient",)
|
googleads/google-ads-python
|
google/ads/googleads/v9/services/services/asset_set_asset_service/__init__.py
|
Python
|
apache-2.0
| 690
|
__ssl = __import__('ssl')
from eventlet.patcher import slurp_properties
slurp_properties(__ssl, globals(), srckeys=dir(__ssl))
import sys
import errno
time = __import__('time')
from eventlet.support import get_errno
from eventlet.hubs import trampoline
from eventlet.greenio import set_nonblocking, GreenSocket, SOCKET_CLOSED, CONNECT_ERR, CONNECT_SUCCESS
orig_socket = __import__('socket')
socket = orig_socket.socket
if sys.version_info >= (2,7):
has_ciphers = True
timeout_exc = SSLError
else:
has_ciphers = False
timeout_exc = orig_socket.timeout
__patched__ = ['SSLSocket', 'wrap_socket', 'sslwrap_simple']
class GreenSSLSocket(__ssl.SSLSocket):
""" This is a green version of the SSLSocket class from the ssl module added
in 2.6. For documentation on it, please see the Python standard
documentation.
Python nonblocking ssl objects don't give errors when the other end
of the socket is closed (they do notice when the other end is shutdown,
though). Any write/read operations will simply hang if the socket is
closed from the other end. There is no obvious fix for this problem;
it appears to be a limitation of Python's ssl object implementation.
A workaround is to set a reasonable timeout on the socket using
settimeout(), and to close/reopen the connection when a timeout
occurs at an unexpected juncture in the code.
"""
# we are inheriting from SSLSocket because its constructor calls
# do_handshake whose behavior we wish to override
def __init__(self, sock, *args, **kw):
if not isinstance(sock, GreenSocket):
sock = GreenSocket(sock)
self.act_non_blocking = sock.act_non_blocking
self._timeout = sock.gettimeout()
super(GreenSSLSocket, self).__init__(sock.fd, *args, **kw)
# the superclass initializer trashes the methods so we remove
# the local-object versions of them and let the actual class
# methods shine through
try:
for fn in orig_socket._delegate_methods:
delattr(self, fn)
except AttributeError:
pass
def settimeout(self, timeout):
self._timeout = timeout
def gettimeout(self):
return self._timeout
def setblocking(self, flag):
if flag:
self.act_non_blocking = False
self._timeout = None
else:
self.act_non_blocking = True
self._timeout = 0.0
def _call_trampolining(self, func, *a, **kw):
if self.act_non_blocking:
return func(*a, **kw)
else:
while True:
try:
return func(*a, **kw)
except SSLError, exc:
if get_errno(exc) == SSL_ERROR_WANT_READ:
trampoline(self,
read=True,
timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
elif get_errno(exc) == SSL_ERROR_WANT_WRITE:
trampoline(self,
write=True,
timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
return self._call_trampolining(
super(GreenSSLSocket, self).write, data)
def read(self, len=1024):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
return self._call_trampolining(
super(GreenSSLSocket, self).read, len)
def send (self, data, flags=0):
if self._sslobj:
return self._call_trampolining(
super(GreenSSLSocket, self).send, data, flags)
else:
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
return socket.send(self, data, flags)
def sendto (self, data, addr, flags=0):
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
else:
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
return socket.sendto(self, data, addr, flags)
def sendall (self, data, flags=0):
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
if v == 0:
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
return amount
else:
while True:
try:
return socket.sendall(self, buflen, flags)
except orig_socket.error, e:
if self.act_non_blocking:
raise
if get_errno(e) == errno.EWOULDBLOCK:
trampoline(self, write=True,
timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
if get_errno(e) in SOCKET_CLOSED:
return ''
raise
def recv(self, buflen=1024, flags=0):
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
read = self.read(buflen)
return read
else:
while True:
try:
return socket.recv(self, buflen, flags)
except orig_socket.error, e:
if self.act_non_blocking:
raise
if get_errno(e) == errno.EWOULDBLOCK:
trampoline(self, read=True,
timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
if get_errno(e) in SOCKET_CLOSED:
return ''
raise
def recv_into (self, buffer, nbytes=None, flags=0):
if not self.act_non_blocking:
trampoline(self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
return super(GreenSSLSocket, self).recv_into(buffer, nbytes, flags)
def recvfrom (self, addr, buflen=1024, flags=0):
if not self.act_non_blocking:
trampoline(self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
return super(GreenSSLSocket, self).recvfrom(addr, buflen, flags)
def recvfrom_into (self, buffer, nbytes=None, flags=0):
if not self.act_non_blocking:
trampoline(self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
return super(GreenSSLSocket, self).recvfrom_into(buffer, nbytes, flags)
def unwrap(self):
return GreenSocket(self._call_trampolining(
super(GreenSSLSocket, self).unwrap))
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
return self._call_trampolining(
super(GreenSSLSocket, self).do_handshake)
def _socket_connect(self, addr):
real_connect = socket.connect
if self.act_non_blocking:
return real_connect(self, addr)
else:
# *NOTE: gross, copied code from greenio because it's not factored
# well enough to reuse
if self.gettimeout() is None:
while True:
try:
return real_connect(self, addr)
except orig_socket.error, exc:
if get_errno(exc) in CONNECT_ERR:
trampoline(self, write=True)
elif get_errno(exc) in CONNECT_SUCCESS:
return
else:
raise
else:
end = time.time() + self.gettimeout()
while True:
try:
real_connect(self, addr)
except orig_socket.error, exc:
if get_errno(exc) in CONNECT_ERR:
trampoline(self, write=True,
timeout=end-time.time(), timeout_exc=timeout_exc('timed out'))
elif get_errno(exc) in CONNECT_SUCCESS:
return
else:
raise
if time.time() >= end:
raise timeout_exc('timed out')
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
# *NOTE: grrrrr copied this code from ssl.py because of the reference
# to socket.connect which we don't want to call directly
if self._sslobj:
raise ValueError("attempt to connect already-connected SSLSocket!")
self._socket_connect(addr)
if has_ciphers:
self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
self.cert_reqs, self.ssl_version,
self.ca_certs, self.ciphers)
else:
self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
self.cert_reqs, self.ssl_version,
self.ca_certs)
if self.do_handshake_on_connect:
self.do_handshake()
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
# RDW grr duplication of code from greenio
if self.act_non_blocking:
newsock, addr = socket.accept(self)
else:
while True:
try:
newsock, addr = socket.accept(self)
set_nonblocking(newsock)
break
except orig_socket.error, e:
if get_errno(e) != errno.EWOULDBLOCK:
raise
trampoline(self, read=True, timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
new_ssl = type(self)(newsock,
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True,
cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version,
ca_certs=self.ca_certs,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs)
return (new_ssl, addr)
def dup(self):
raise NotImplementedError("Can't dup an ssl object")
SSLSocket = GreenSSLSocket
def wrap_socket(sock, *a, **kw):
return GreenSSLSocket(sock, *a, **kw)
if hasattr(__ssl, 'sslwrap_simple'):
def sslwrap_simple(sock, keyfile=None, certfile=None):
"""A replacement for the old socket.ssl function. Designed
for compability with Python 2.5 and earlier. Will disappear in
Python 3.0."""
ssl_sock = GreenSSLSocket(sock, keyfile=keyfile, certfile=certfile,
server_side=False,
cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23,
ca_certs=None)
return ssl_sock
|
ioram7/keystone-federado-pgid2013
|
build/eventlet/eventlet/green/ssl.py
|
Python
|
apache-2.0
| 12,519
|
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
import canopy
fX = theano.config.floatX
def test_output_nanguard():
def output_nanguard_fn(a):
class CustomNode(treeano.NodeImpl):
def compute_output(self, network, in_vw):
network.create_vw(
"default",
variable=in_vw.variable / a,
shape=in_vw.shape
)
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=()),
CustomNode("c")]
).network()
return canopy.handlers.handled_fn(
network,
[canopy.handlers.output_nanguard()],
{"x": "i"},
{"out": "s"})
fn1 = output_nanguard_fn(3)
np.testing.assert_equal(fn1({"x": 3}), {"out": np.array(1)})
np.testing.assert_equal(fn1({"x": -6}), {"out": np.array(-2)})
fn2 = output_nanguard_fn(0)
try:
fn2({"x": 3})
except Exception as e:
assert e.args[0]["error_type"] == "inf"
np.testing.assert_equal(e.args[0]["value"], np.array(np.inf))
else:
assert False
try:
fn2({"x": -6})
except Exception as e:
nt.assert_equal(e.args[0]["error_type"], "inf")
np.testing.assert_equal(e.args[0]["value"], np.array(-np.inf))
else:
assert False
try:
fn2({"x": 0})
except Exception as e:
nt.assert_equal(e.args[0]["error_type"], "nan")
np.testing.assert_equal(e.args[0]["value"], np.array(np.nan))
else:
assert False
try:
fn1({"x": 6e10})
except Exception as e:
nt.assert_equal(e.args[0]["error_type"], "big")
np.testing.assert_allclose(e.args[0]["value"],
np.array(2e10),
rtol=1e-5)
else:
assert False
def test_nanguardmode():
def nanguardmode_fn(a):
class CustomNode(treeano.NodeImpl):
def compute_output(self, network, in_vw):
network.create_vw(
"default",
variable=in_vw.variable / a,
shape=in_vw.shape
)
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=()),
CustomNode("c")]
).network()
return canopy.handlers.handled_fn(
network,
[canopy.handlers.nanguardmode()],
{"x": "i"},
{"out": "s"})
fn1 = nanguardmode_fn(3)
np.testing.assert_equal(fn1({"x": 3}), {"out": np.array(1)})
np.testing.assert_equal(fn1({"x": -6}), {"out": np.array(-2)})
@nt.raises(AssertionError)
def raises_fn1(x):
fn1({"x": x})
raises_fn1(6e10)
fn2 = nanguardmode_fn(0)
@nt.raises(AssertionError)
def raises_fn2(x):
fn2({"x": x})
raises_fn2(3)
raises_fn2(-6)
raises_fn2(0)
raises_fn2(6e10)
def test_save_last_inputs_and_networks():
class StateDiffNode(treeano.NodeImpl):
def compute_output(self, network, in_vw):
foo_vw = network.create_vw(
"foo",
shape=(),
is_shared=True,
tags={"parameter", "weight"},
inits=[]
)
network.create_vw(
"default",
variable=abs(in_vw.variable - foo_vw.variable),
shape=()
)
network = tn.AdamNode(
"adam",
{"subtree": tn.SequentialNode(
"s",
[tn.InputNode("i", shape=()),
StateDiffNode("ss")]),
"cost": tn.ReferenceNode("r", reference="s")}
).network()
# eagerly create shared variables
network.build()
save_handler = canopy.handlers.save_last_inputs_and_networks(5)
fn = canopy.handlers.handled_fn(
network,
[save_handler],
{"x": "i"},
{"out": "s"},
include_updates=True)
inputs = [{"x": treeano.utils.as_fX(np.random.randn())} for _ in range(10)]
outputs = [fn(i) for i in inputs]
nt.assert_equal(save_handler.inputs_, inputs[-5:])
# PY3: calling list on zip to make it eager
# otherwise, save_handler.value_dicts_ looks at the mutating
# value ducts
for value_dict, i, o in list(zip(save_handler.value_dicts_,
inputs[-5:],
outputs[-5:])):
canopy.network_utils.load_value_dict(network, value_dict)
nt.assert_equal(o, fn(i))
def test_network_nanguard():
class CustomNode(treeano.NodeImpl):
input_keys = ()
def compute_output(self, network):
network.create_vw(
"default",
is_shared=True,
shape=(),
inits=[]
)
network = CustomNode("c").network()
# build eagerly to share weights
network.build()
fn = canopy.handlers.handled_fn(
network,
[canopy.handlers.network_nanguard()],
{},
{})
vw = network["c"].get_vw("default")
for x in [3, 4, 1e9, 9e9, -9e9, 0]:
vw.variable.set_value(treeano.utils.as_fX(x))
fn({})
for x in [np.inf, -np.inf, np.nan, 2e10]:
vw.variable.set_value(treeano.utils.as_fX(x))
nt.raises(Exception)(lambda x: fn(x))({})
|
diogo149/treeano
|
canopy/handlers/tests/debug_test.py
|
Python
|
apache-2.0
| 5,452
|
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import azurelinuxagent.common.utils.shellutil as shellutil
from azurelinuxagent.common.osutil.default import DefaultOSUtil
class ArchUtil(DefaultOSUtil):
def __init__(self):
super(ArchUtil, self).__init__()
self.jit_enabled = True
def is_dhcp_enabled(self):
return True
def start_network(self):
return shellutil.run("systemctl start systemd-networkd", chk_err=False)
def restart_if(self, iface):
shellutil.run("systemctl restart systemd-networkd")
def restart_ssh_service(self):
# SSH is socket activated on CoreOS. No need to restart it.
pass
def stop_dhcp_service(self):
return shellutil.run("systemctl stop systemd-networkd", chk_err=False)
def start_dhcp_service(self):
return shellutil.run("systemctl start systemd-networkd", chk_err=False)
def start_agent_service(self):
return shellutil.run("systemctl start waagent", chk_err=False)
def stop_agent_service(self):
return shellutil.run("systemctl stop waagent", chk_err=False)
def get_dhcp_pid(self):
ret= shellutil.run_get_output("pidof systemd-networkd")
return ret[1] if ret[0] == 0 else None
def conf_sshd(self, disable_password):
# Don't whack the system default sshd conf
pass
|
hglkrijger/WALinuxAgent
|
azurelinuxagent/common/osutil/arch.py
|
Python
|
apache-2.0
| 1,959
|
# -*- coding: utf-8 -*-
import unittest
from copy import deepcopy
from openprocurement.api.tests.base import snitch
from openprocurement.tender.competitivedialogue.tests.base import (
BaseCompetitiveDialogEUStage2ContentWebTest,
test_bids,
test_lots,
author
)
from openprocurement.tender.openeu.tests.qualification_blanks import (
# TenderStage2EUQualificationResourceTest
post_tender_qualifications,
get_tender_qualifications_collection,
patch_tender_qualifications,
get_tender_qualifications,
patch_tender_qualifications_after_status_change,
# TenderStage2EU2LotQualificationResourceTest
lot_patch_tender_qualifications,
lot_get_tender_qualifications_collection,
tender_qualification_cancelled,
# TenderStage2EUQualificationDocumentResourceTest
not_found,
create_qualification_document,
put_qualification_document,
patch_qualification_document,
create_qualification_document_after_status_change,
put_qualification_document_after_status_change,
create_qualification_document_bot,
patch_document_not_author,
# TenderStage2EUQualificationComplaintResourceTest
create_tender_qualification_complaint_invalid,
create_tender_qualification_complaint,
patch_tender_qualification_complaint,
review_tender_qualification_complaint,
review_tender_qualification_stopping_complaint,
get_tender_qualification_complaint,
get_tender_qualification_complaints,
# TenderStage2EULotQualificationComplaintResourceTest
create_tender_lot_qualification_complaint,
patch_tender_lot_qualification_complaint,
get_tender_lot_qualification_complaint,
get_tender_lot_qualification_complaints,
# TenderStage2EU2LotQualificationComplaintResourceTest
create_tender_2lot_qualification_complaint,
patch_tender_2lot_qualification_complaint,
# TenderStage2EUQualificationComplaintDocumentResourceTest
complaint_not_found,
create_tender_qualification_complaint_document,
put_tender_qualification_complaint_document,
patch_tender_qualification_complaint_document,
# TenderStage2EU2LotQualificationComplaintDocumentResourceTest
create_tender_2lot_qualification_complaint_document,
put_tender_2lot_qualification_complaint_document,
patch_tender_2lot_qualification_complaint_document,
)
test_tender_bids = deepcopy(test_bids[:2])
for test_bid in test_tender_bids:
test_bid['tenderers'] = [author]
class TenderStage2EUQualificationResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest):
initial_status = 'active.tendering' # 'active.pre-qualification' status sets in setUp
initial_bids = test_tender_bids
initial_auth = ('Basic', ('broker', ''))
def setUp(self):
super(TenderStage2EUQualificationResourceTest, self).setUp()
# update periods to have possibility to change tender status by chronograph
self.set_status('active.pre-qualification', extra={'status': 'active.tendering'})
# simulate chronograph tick
auth = self.app.authorization
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification')
self.app.authorization = auth
test_post_tender_qualifications = snitch(post_tender_qualifications)
test_get_tender_qualifications_collection = snitch(get_tender_qualifications_collection)
test_patch_tender_qualifications = snitch(patch_tender_qualifications)
test_get_tender_qualifications = snitch(get_tender_qualifications)
test_patch_tender_qualifications_after_status_change = snitch(patch_tender_qualifications_after_status_change)
class TenderStage2EU2LotQualificationResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest):
initial_status = 'active.tendering' # 'active.pre-qualification.stand-still' status sets in setUp
initial_lots = deepcopy(2 * test_lots)
initial_bids = test_tender_bids
initial_auth = ('Basic', ('broker', ''))
def setUp(self):
super(TenderStage2EU2LotQualificationResourceTest, self).setUp()
# update periods to have possibility to change tender status by chronograph
self.set_status('active.pre-qualification', extra={'status': 'active.tendering'})
# simulate chronograph tick
auth = self.app.authorization
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification')
self.app.authorization = auth
response = self.app.get('/tenders/{}/qualifications'.format(self.tender_id))
self.assertEqual(response.content_type, 'application/json')
qualifications = response.json['data']
test_patch_tender_qualifications = snitch(lot_patch_tender_qualifications)
test_get_tender_qualifications_collection = snitch(lot_get_tender_qualifications_collection)
test_tender_qualification_cancelled = snitch(tender_qualification_cancelled)
class TenderStage2EUQualificationDocumentResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest):
initial_status = 'active.tendering'
initial_bids = test_tender_bids
initial_auth = ('Basic', ('broker', ''))
def setUp(self):
super(TenderStage2EUQualificationDocumentResourceTest, self).setUp()
# update periods to have possibility to change tender status by chronograph
self.time_shift('active.pre-qualification')
self.check_chronograph()
# list qualifications
response = self.app.get('/tenders/{}/qualifications?acc_token={}'.format(self.tender_id, self.tender_token))
self.assertEqual(response.status, '200 OK')
self.qualifications = response.json['data']
self.assertEqual(len(self.qualifications), 2)
test_not_found = snitch(not_found)
test_create_qualification_document = snitch(create_qualification_document)
test_put_qualification_document = snitch(put_qualification_document)
test_patch_qualification_document = snitch(patch_qualification_document)
test_create_qualification_document_after_status_change = snitch(create_qualification_document_after_status_change)
test_put_qualification_document_after_status_change = snitch(put_qualification_document_after_status_change)
test_create_qualification_document_bot = snitch(create_qualification_document_bot)
test_patch_document_not_author = snitch(patch_document_not_author)
class TenderStage2EUQualificationComplaintResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest):
initial_status = 'active.tendering' # 'active.pre-qualification.stand-still' status sets in setUp
initial_bids = test_tender_bids
initial_auth = ('Basic', ('broker', ''))
def setUp(self):
super(TenderStage2EUQualificationComplaintResourceTest, self).setUp()
# update periods to have possibility to change tender status by chronograph
self.set_status('active.pre-qualification', extra={'status': 'active.tendering'})
# simulate chronograph tick
auth = self.app.authorization
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification')
self.app.authorization = auth
response = self.app.get('/tenders/{}/qualifications'.format(self.tender_id))
self.assertEqual(response.content_type, 'application/json')
qualifications = response.json['data']
self.qualification_id = qualifications[0]['id']
for qualification in qualifications:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(
self.tender_id, qualification['id'], self.tender_token),
{'data': {'status': 'active', 'qualified': True, 'eligible': True}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token),
{'data': {'status': 'active.pre-qualification.stand-still'}})
self.assertEqual(response.status, '200 OK')
test_create_tender_qualification_complaint_invalid = snitch(create_tender_qualification_complaint_invalid)
test_create_tender_qualification_complaint = snitch(create_tender_qualification_complaint)
test_patch_tender_qualification_complaint = snitch(patch_tender_qualification_complaint)
test_review_tender_qualification_complaint = snitch(review_tender_qualification_complaint)
test_review_tender_qualification_stopping_complaint = snitch(review_tender_qualification_stopping_complaint)
test_get_tender_qualification_complaint = snitch(get_tender_qualification_complaint)
test_get_tender_qualification_complaints = snitch(get_tender_qualification_complaints)
class TenderStage2EULotQualificationComplaintResourceTest(TenderStage2EUQualificationComplaintResourceTest):
initial_lots = test_lots
initial_auth = ('Basic', ('broker', ''))
test_create_tender_qualification_complaint = snitch(create_tender_lot_qualification_complaint)
test_patch_tender_qualification_complaint = snitch(patch_tender_lot_qualification_complaint)
test_get_tender_qualification_complaint = snitch(get_tender_lot_qualification_complaint)
test_get_tender_qualification_complaints = snitch(get_tender_lot_qualification_complaints)
class TenderStage2EU2LotQualificationComplaintResourceTest(TenderStage2EULotQualificationComplaintResourceTest):
initial_lots = deepcopy(2 * test_lots)
initial_auth = ('Basic', ('broker', ''))
test_create_tender_qualification_complaint = snitch(create_tender_2lot_qualification_complaint)
test_patch_tender_qualification_complaint = snitch(patch_tender_2lot_qualification_complaint)
class TenderStage2EUQualificationComplaintDocumentResourceTest(BaseCompetitiveDialogEUStage2ContentWebTest):
initial_status = 'active.tendering' # 'active.pre-qualification.stand-still' status sets in setUp
initial_bids = test_tender_bids
initial_auth = ('Basic', ('broker', ''))
def setUp(self):
super(TenderStage2EUQualificationComplaintDocumentResourceTest, self).setUp()
# update periods to have possibility to change tender status by chronograph
self.set_status('active.pre-qualification', extra={'status': 'active.tendering'})
# simulate chronograph tick
auth = self.app.authorization
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.json['data']['status'], 'active.pre-qualification')
self.app.authorization = auth
response = self.app.get('/tenders/{}/qualifications'.format(self.tender_id))
self.assertEqual(response.content_type, 'application/json')
qualifications = response.json['data']
self.qualification_id = qualifications[0]['id']
for qualification in qualifications:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(
self.tender_id, qualification['id'], self.tender_token),
{'data': {'status': 'active', 'qualified': True, 'eligible': True}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token),
{'data': {'status': 'active.pre-qualification.stand-still'}})
self.assertEqual(response.status, '200 OK')
# Create complaint for qualification
response = self.app.post_json('/tenders/{}/qualifications/{}/complaints?acc_token={}'.format(
self.tender_id, self.qualification_id, self.initial_bids_tokens.values()[0]),
{'data': {'title': 'complaint title',
'description': 'complaint description',
'author': self.bids[0]['tenderers'][0]}})
complaint = response.json['data']
self.complaint_id = complaint['id']
self.complaint_owner_token = response.json['access']['token']
test_not_found = snitch(complaint_not_found)
test_create_tender_qualification_complaint_document = snitch(create_tender_qualification_complaint_document)
test_put_tender_qualification_complaint_document = snitch(put_tender_qualification_complaint_document)
test_patch_tender_qualification_complaint_document = snitch(patch_tender_qualification_complaint_document)
class TenderStage2EU2LotQualificationComplaintDocumentResourceTest(TenderStage2EUQualificationComplaintDocumentResourceTest):
initial_lots = 2 * test_lots
initial_auth = ('Basic', ('broker', ''))
test_create_tender_qualification_complaint_document = snitch(create_tender_2lot_qualification_complaint_document)
test_put_tender_qualification_complaint_document = snitch(put_tender_2lot_qualification_complaint_document)
test_patch_tender_qualification_complaint_document = snitch(patch_tender_2lot_qualification_complaint_document)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderStage2EUQualificationResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EU2LotQualificationResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EUQualificationDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EUQualificationComplaintResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EULotQualificationComplaintResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EU2LotQualificationComplaintResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EUQualificationComplaintDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderStage2EU2LotQualificationComplaintDocumentResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
openprocurement/openprocurement.tender.competitivedialogue
|
openprocurement/tender/competitivedialogue/tests/stage2/qualification.py
|
Python
|
apache-2.0
| 14,465
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling, Inc.
# Author: Matthew Hooker <matt@cloudscaling.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import re
import six
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _get_path_validator_regex():
# rfc3986 path validator regex from
# http://jmrware.com/articles/2009/uri_regexp/URI_regex.html
pchar = "([A-Za-z0-9\-._~!$&'()*+,;=:@]|%[0-9A-Fa-f]{2})"
path = "((/{pchar}*)*|"
path += "/({pchar}+(/{pchar}*)*)?|"
path += "{pchar}+(/{pchar}*)*|"
path += "{pchar}+(/{pchar}*)*|)"
path = path.format(pchar=pchar)
return re.compile(path)
VALIDATE_PATH_RE = _get_path_validator_regex()
def validate_str(max_length=None):
def _do(val):
if not isinstance(val, six.string_types):
return False
if max_length and len(val) > max_length:
return False
return True
return _do
def validate_int(max_value=None):
def _do(val):
if not isinstance(val, int):
return False
if max_value and val > max_value:
return False
return True
return _do
def validate_url_path(val):
"""True if val is matched by the path component grammar in rfc3986."""
if not validate_str()(val):
return False
return VALIDATE_PATH_RE.match(val).end() == len(val)
def validate_image_path(val):
if not validate_str()(val):
return False
bucket_name = val.split('/')[0]
manifest_path = val[len(bucket_name) + 1:]
if not len(bucket_name) or not len(manifest_path):
return False
if val[0] == '/':
return False
# make sure the image path if rfc3986 compliant
# prepend '/' to make input validate
if not validate_url_path('/' + val):
return False
return True
def validate_user_data(user_data):
"""Check if the user_data is encoded properly."""
try:
user_data = base64.b64decode(user_data)
except TypeError:
return False
return True
def validate(args, validator):
"""Validate values of args against validators in validator.
:param args: Dict of values to be validated.
:param validator: A dict where the keys map to keys in args
and the values are validators.
Applies each validator to ``args[key]``
:returns: True if validation succeeds. Otherwise False.
A validator should be a callable which accepts 1 argument and which
returns True if the argument passes validation. False otherwise.
A validator should not raise an exception to indicate validity of the
argument.
Only validates keys which show up in both args and validator.
"""
for key in validator:
if key not in args:
continue
f = validator[key]
assert callable(f)
if not f(args[key]):
LOG.debug(_("%(key)s with value %(value)s failed"
" validator %(name)s"),
{'key': key, 'value': args[key], 'name': f.__name__})
return False
return True
|
sacharya/nova
|
nova/api/validator.py
|
Python
|
apache-2.0
| 3,787
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1Container(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'name': 'str',
'image': 'str',
'command': 'list[str]',
'args': 'list[str]',
'working_dir': 'str',
'ports': 'list[V1ContainerPort]',
'env': 'list[V1EnvVar]',
'resources': 'V1ResourceRequirements',
'volume_mounts': 'list[V1VolumeMount]',
'liveness_probe': 'V1Probe',
'readiness_probe': 'V1Probe',
'lifecycle': 'V1Lifecycle',
'termination_message_path': 'str',
'image_pull_policy': 'str',
'security_context': 'V1SecurityContext',
'stdin': 'bool',
'stdin_once': 'bool',
'tty': 'bool'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'name': 'name',
'image': 'image',
'command': 'command',
'args': 'args',
'working_dir': 'workingDir',
'ports': 'ports',
'env': 'env',
'resources': 'resources',
'volume_mounts': 'volumeMounts',
'liveness_probe': 'livenessProbe',
'readiness_probe': 'readinessProbe',
'lifecycle': 'lifecycle',
'termination_message_path': 'terminationMessagePath',
'image_pull_policy': 'imagePullPolicy',
'security_context': 'securityContext',
'stdin': 'stdin',
'stdin_once': 'stdinOnce',
'tty': 'tty'
}
def __init__(self, name=None, image=None, command=None, args=None, working_dir=None, ports=None, env=None, resources=None, volume_mounts=None, liveness_probe=None, readiness_probe=None, lifecycle=None, termination_message_path=None, image_pull_policy=None, security_context=None, stdin=None, stdin_once=None, tty=None):
"""
V1Container - a model defined in Swagger
"""
self._name = name
self._image = image
self._command = command
self._args = args
self._working_dir = working_dir
self._ports = ports
self._env = env
self._resources = resources
self._volume_mounts = volume_mounts
self._liveness_probe = liveness_probe
self._readiness_probe = readiness_probe
self._lifecycle = lifecycle
self._termination_message_path = termination_message_path
self._image_pull_policy = image_pull_policy
self._security_context = security_context
self._stdin = stdin
self._stdin_once = stdin_once
self._tty = tty
@property
def name(self):
"""
Gets the name of this V1Container.
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
:return: The name of this V1Container.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1Container.
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
:param name: The name of this V1Container.
:type: str
"""
self._name = name
@property
def image(self):
"""
Gets the image of this V1Container.
Docker image name. More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md
:return: The image of this V1Container.
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""
Sets the image of this V1Container.
Docker image name. More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md
:param image: The image of this V1Container.
:type: str
"""
self._image = image
@property
def command(self):
"""
Gets the command of this V1Container.
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md#containers-and-commands
:return: The command of this V1Container.
:rtype: list[str]
"""
return self._command
@command.setter
def command(self, command):
"""
Sets the command of this V1Container.
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md#containers-and-commands
:param command: The command of this V1Container.
:type: list[str]
"""
self._command = command
@property
def args(self):
"""
Gets the args of this V1Container.
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md#containers-and-commands
:return: The args of this V1Container.
:rtype: list[str]
"""
return self._args
@args.setter
def args(self, args):
"""
Sets the args of this V1Container.
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md#containers-and-commands
:param args: The args of this V1Container.
:type: list[str]
"""
self._args = args
@property
def working_dir(self):
"""
Gets the working_dir of this V1Container.
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
:return: The working_dir of this V1Container.
:rtype: str
"""
return self._working_dir
@working_dir.setter
def working_dir(self, working_dir):
"""
Sets the working_dir of this V1Container.
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
:param working_dir: The working_dir of this V1Container.
:type: str
"""
self._working_dir = working_dir
@property
def ports(self):
"""
Gets the ports of this V1Container.
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.
:return: The ports of this V1Container.
:rtype: list[V1ContainerPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""
Sets the ports of this V1Container.
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.
:param ports: The ports of this V1Container.
:type: list[V1ContainerPort]
"""
self._ports = ports
@property
def env(self):
"""
Gets the env of this V1Container.
List of environment variables to set in the container. Cannot be updated.
:return: The env of this V1Container.
:rtype: list[V1EnvVar]
"""
return self._env
@env.setter
def env(self, env):
"""
Sets the env of this V1Container.
List of environment variables to set in the container. Cannot be updated.
:param env: The env of this V1Container.
:type: list[V1EnvVar]
"""
self._env = env
@property
def resources(self):
"""
Gets the resources of this V1Container.
Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#resources
:return: The resources of this V1Container.
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""
Sets the resources of this V1Container.
Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#resources
:param resources: The resources of this V1Container.
:type: V1ResourceRequirements
"""
self._resources = resources
@property
def volume_mounts(self):
"""
Gets the volume_mounts of this V1Container.
Pod volumes to mount into the container's filesyste. Cannot be updated.
:return: The volume_mounts of this V1Container.
:rtype: list[V1VolumeMount]
"""
return self._volume_mounts
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
"""
Sets the volume_mounts of this V1Container.
Pod volumes to mount into the container's filesyste. Cannot be updated.
:param volume_mounts: The volume_mounts of this V1Container.
:type: list[V1VolumeMount]
"""
self._volume_mounts = volume_mounts
@property
def liveness_probe(self):
"""
Gets the liveness_probe of this V1Container.
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes
:return: The liveness_probe of this V1Container.
:rtype: V1Probe
"""
return self._liveness_probe
@liveness_probe.setter
def liveness_probe(self, liveness_probe):
"""
Sets the liveness_probe of this V1Container.
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes
:param liveness_probe: The liveness_probe of this V1Container.
:type: V1Probe
"""
self._liveness_probe = liveness_probe
@property
def readiness_probe(self):
"""
Gets the readiness_probe of this V1Container.
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes
:return: The readiness_probe of this V1Container.
:rtype: V1Probe
"""
return self._readiness_probe
@readiness_probe.setter
def readiness_probe(self, readiness_probe):
"""
Sets the readiness_probe of this V1Container.
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes
:param readiness_probe: The readiness_probe of this V1Container.
:type: V1Probe
"""
self._readiness_probe = readiness_probe
@property
def lifecycle(self):
"""
Gets the lifecycle of this V1Container.
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:return: The lifecycle of this V1Container.
:rtype: V1Lifecycle
"""
return self._lifecycle
@lifecycle.setter
def lifecycle(self, lifecycle):
"""
Sets the lifecycle of this V1Container.
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:param lifecycle: The lifecycle of this V1Container.
:type: V1Lifecycle
"""
self._lifecycle = lifecycle
@property
def termination_message_path(self):
"""
Gets the termination_message_path of this V1Container.
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Defaults to /dev/termination-log. Cannot be updated.
:return: The termination_message_path of this V1Container.
:rtype: str
"""
return self._termination_message_path
@termination_message_path.setter
def termination_message_path(self, termination_message_path):
"""
Sets the termination_message_path of this V1Container.
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Defaults to /dev/termination-log. Cannot be updated.
:param termination_message_path: The termination_message_path of this V1Container.
:type: str
"""
self._termination_message_path = termination_message_path
@property
def image_pull_policy(self):
"""
Gets the image_pull_policy of this V1Container.
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md#updating-images
:return: The image_pull_policy of this V1Container.
:rtype: str
"""
return self._image_pull_policy
@image_pull_policy.setter
def image_pull_policy(self, image_pull_policy):
"""
Sets the image_pull_policy of this V1Container.
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md#updating-images
:param image_pull_policy: The image_pull_policy of this V1Container.
:type: str
"""
self._image_pull_policy = image_pull_policy
@property
def security_context(self):
"""
Gets the security_context of this V1Container.
Security options the pod should run with. More info: http://releases.k8s.io/release-1.2/docs/design/security_context.md
:return: The security_context of this V1Container.
:rtype: V1SecurityContext
"""
return self._security_context
@security_context.setter
def security_context(self, security_context):
"""
Sets the security_context of this V1Container.
Security options the pod should run with. More info: http://releases.k8s.io/release-1.2/docs/design/security_context.md
:param security_context: The security_context of this V1Container.
:type: V1SecurityContext
"""
self._security_context = security_context
@property
def stdin(self):
"""
Gets the stdin of this V1Container.
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
:return: The stdin of this V1Container.
:rtype: bool
"""
return self._stdin
@stdin.setter
def stdin(self, stdin):
"""
Sets the stdin of this V1Container.
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
:param stdin: The stdin of this V1Container.
:type: bool
"""
self._stdin = stdin
@property
def stdin_once(self):
"""
Gets the stdin_once of this V1Container.
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
:return: The stdin_once of this V1Container.
:rtype: bool
"""
return self._stdin_once
@stdin_once.setter
def stdin_once(self, stdin_once):
"""
Sets the stdin_once of this V1Container.
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
:param stdin_once: The stdin_once of this V1Container.
:type: bool
"""
self._stdin_once = stdin_once
@property
def tty(self):
"""
Gets the tty of this V1Container.
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
:return: The tty of this V1Container.
:rtype: bool
"""
return self._tty
@tty.setter
def tty(self, tty):
"""
Sets the tty of this V1Container.
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
:param tty: The tty of this V1Container.
:type: bool
"""
self._tty = tty
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1Container.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
detiber/lib_openshift
|
lib_openshift/models/v1_container.py
|
Python
|
apache-2.0
| 22,330
|
# -*- coding: utf-8 -*-
import random, sys, copy, os, pygame
from pygame.locals import *
FPS = 30 # frames per second to update the screen
WINWIDTH = 800 # width of the program's window, in pixels
WINHEIGHT = 600 # height in pixels
HALF_WINWIDTH = int(WINWIDTH / 2)
HALF_WINHEIGHT = int(WINHEIGHT / 2)
# The total width and height of each tile in pixels.
TILEWIDTH = 50
TILEHEIGHT = 85
TILEFLOORHEIGHT = 40
BRIGHTBLUE = ( 0, 170, 255)
WHITE = (255, 255, 255)
BGCOLOR = BRIGHTBLUE
def readLevelsFile(filename):
assert os.path.exists(filename)
mapFile = open(filename , 'r')
content = mapFile.readlines( ) + ['\r\n'] #另外再加一行 ['\r\n']
mapFile.close()
# print content
levels = []
levelNum = 0
mapTextLines = []
mapObj = []
for lineNum in range(len(content)):
line = content[lineNum].rstrip('\r\n')
if ';' in line:
line = line[:line.find(';')]
if line != '':
mapTextLines.append(line)
#一关结束了,整理该关地图
elif line == '' and len(mapTextLines) > 0:
maxWidth = -1
for i in range(len(mapTextLines)):
if len(mapTextLines[i]) > maxWidth:
maxWidth = len(mapTextLines[i])
for i in range(len(mapTextLines)):
mapTextLines[i] += ' ' * (maxWidth - len(mapTextLines[i]))
for x in range(len(mapTextLines[0])):
mapObj.append([])
for y in range(len(mapTextLines)):
for x in range(maxWidth):
mapObj[x].append(mapTextLines[y][x])
startx = None # The x and y for the player's starting position
starty = None
goals = [] # list of (x, y) tuples for each goal.
stars = [] # list of (x, y) for each star's starting position.
# print maxWidth
for x in range(maxWidth):
for y in range(len(mapObj[x])):
if mapObj[x][y] in ('@', '+'):
# '@' is player, '+' is player & goal
startx = x
starty = y
if mapObj[x][y] in ('.', '+', '*'):
# '.' is goal, '*' is star & goal
goals.append((x, y))
if mapObj[x][y] in ('$', '*'):
# '$' is star
stars.append((x, y))
assert startx != None and starty != None, 'Level %s (around line %s) in %s is missing a "@" or "+" to mark the start point.' % (levelNum+1, lineNum, filename)
assert len(goals) > 0, 'Level %s (around line %s) in %s must have at least one goal.' % (levelNum+1, lineNum, filename)
assert len(stars) >= len(goals), 'Level %s (around line %s) in %s is impossible to solve. It has %s goals but only %s stars.' % (levelNum+1, lineNum, filename, len(goals), len(stars))
# Create level object and starting game state object.
gameStateObj = {'player': (startx, starty),
'stepCounter': 0,
'stars': stars}
levelObj = {'width': maxWidth,
'height': len(mapObj),
'mapObj': mapObj,
'goals': goals,
'startState': gameStateObj}
levels.append(levelObj)
# Reset the variables for reading the next map.
mapTextLines = []
mapObj = []
gameStateObj = {}
levelNum += 1
return levels
def main():
global FPSCLOCK, DISPLAYSURF, IMAGESDICT, TILEMAPPING, OUTSIDEDECOMAPPING, BASICFONT, PLAYERIMAGES, currentImage
# Pygame initialization and basic set up of the global variables.
pygame.init()
FPSCLOCK = pygame.time.Clock()
# Because the Surface object stored in DISPLAYSURF was returned
# from the pygame.display.set_mode() function, this is the
# Surface object that is drawn to the actual computer screen
# when pygame.display.update() is called.
DISPLAYSURF = pygame.display.set_mode((WINWIDTH, WINHEIGHT))
pygame.display.set_caption('Star Pusher')
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
# A global dict value that will contain all the Pygame
# Surface objects returned by pygame.image.load().
IMAGESDICT = {'uncovered goal': pygame.image.load('RedSelector.png'),
'covered goal': pygame.image.load('Selector.png'),
'star': pygame.image.load('Star.png'),
'corner': pygame.image.load('Wall_Block_Tall.png'),
'wall': pygame.image.load('Wood_Block_Tall.png'),
'inside floor': pygame.image.load('Plain_Block.png'),
'outside floor': pygame.image.load('Grass_Block.png'),
'title': pygame.image.load('star_title.png'),
'solved': pygame.image.load('star_solved.png'),
'princess': pygame.image.load('princess.png'),
'boy': pygame.image.load('boy.png'),
'catgirl': pygame.image.load('catgirl.png'),
'horngirl': pygame.image.load('horngirl.png'),
'pinkgirl': pygame.image.load('pinkgirl.png'),
'rock': pygame.image.load('Rock.png'),
'short tree': pygame.image.load('Tree_Short.png'),
'tall tree': pygame.image.load('Tree_Tall.png'),
'ugly tree': pygame.image.load('Tree_Ugly.png')}
# These dict values are global, and map the character that appears
# in the level file to the Surface object it represents.
TILEMAPPING = {'x': IMAGESDICT['corner'],
'#': IMAGESDICT['wall'],
'o': IMAGESDICT['inside floor'],
' ': IMAGESDICT['outside floor']}
OUTSIDEDECOMAPPING = {'1': IMAGESDICT['rock'],
'2': IMAGESDICT['short tree'],
'3': IMAGESDICT['tall tree'],
'4': IMAGESDICT['ugly tree']}
currentImage = 0
PLAYERIMAGES = [IMAGESDICT['princess'],
IMAGESDICT['boy'],
IMAGESDICT['catgirl'],
IMAGESDICT['horngirl'],
IMAGESDICT['pinkgirl']]
levels = readLevelsFile('starPusherLevels.txt')
levelNum = 0
while True:
levelObj = levels[levelNum]
mapObj = levelObj['mapObj']
gameStateObj = copy.deepcopy(levelObj['startState'])
mapSurf = drawMap(mapObj, gameStateObj, levelObj['goals'])
mapSurfRect = mapSurf.get_rect()
mapSurfRect.center = (HALF_WINWIDTH , HALF_WINHEIGHT )
DISPLAYSURF.fill(BGCOLOR)
DISPLAYSURF.blit(mapSurf, mapSurfRect)
for event in pygame.event.get(): # event handling loop
if event.type == MOUSEBUTTONUP:
if event.button == 1 or event.button ==5:
levelNum += 1
if event.button == 3 or event.button ==4:
levelNum -= 1
print levelNum
currentImage = levelNum % 4
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
FPSCLOCK.tick()
def drawMap(mapObj, gameStateObj, goals):
"""Draws the map to a Surface object, including the player and
stars. This function does not call pygame.display.update(), nor
does it draw the "Level" and "Steps" text in the corner."""
# mapSurf will be the single Surface object that the tiles are drawn
# on, so that it is easy to position the entire map on the DISPLAYSURF
# Surface object. First, the width and height must be calculated.
mapSurfWidth = len(mapObj) * TILEWIDTH
mapSurfHeight = (len(mapObj[0]) - 1) * TILEFLOORHEIGHT + TILEHEIGHT
mapSurf = pygame.Surface((mapSurfWidth, mapSurfHeight))
mapSurf.fill(BGCOLOR) # start with a blank color on the surface.
# Draw the tile sprites onto this surface.
for x in range(len(mapObj)):
for y in range(len(mapObj[x])):
spaceRect = pygame.Rect((x * TILEWIDTH, y * TILEFLOORHEIGHT, TILEWIDTH, TILEHEIGHT))
if mapObj[x][y] in TILEMAPPING:
baseTile = TILEMAPPING[mapObj[x][y]]
elif mapObj[x][y] in OUTSIDEDECOMAPPING:
baseTile = TILEMAPPING[' ']
# First draw the base ground/wall tile.
mapSurf.blit(baseTile, spaceRect)
if mapObj[x][y] in OUTSIDEDECOMAPPING:
# Draw any tree/rock decorations that are on this tile.
mapSurf.blit(OUTSIDEDECOMAPPING[mapObj[x][y]], spaceRect)
elif (x, y) in gameStateObj['stars']:
if (x, y) in goals:
# A goal AND star are on this space, draw goal first.
mapSurf.blit(IMAGESDICT['covered goal'], spaceRect)
# Then draw the star sprite.
mapSurf.blit(IMAGESDICT['star'], spaceRect)
elif (x, y) in goals:
# Draw a goal without a star on it.
mapSurf.blit(IMAGESDICT['uncovered goal'], spaceRect)
# Last draw the player on the board.
if (x, y) == gameStateObj['player']:
# Note: The value "currentImage" refers
# to a key in "PLAYERIMAGES" which has the
# specific player image we want to show.
mapSurf.blit(PLAYERIMAGES[currentImage], spaceRect)
return mapSurf
if __name__ == '__main__':
# levels = readLevelsFile('starPusherLevels.txt')
# print levels[0]
main()
|
Eacaen/Eacaen.github.io
|
assets/files/Pygame_book/my_starpusherlevels_test.py
|
Python
|
apache-2.0
| 8,133
|
"""
InfluxDB query building.
"""
from abc import ABCMeta, abstractmethod
from logging import getLogger
from six import add_metaclass
DEFAULT_FIELDS = [
"time",
"value",
]
def maybe_quote(value):
"""
Quote a value for InfluxDB if necessary.
"""
if value[0] == "'" and value[-1] == "'":
return value
return "'{}'".format(value)
def kv_condition(key, value):
"""
Generate a key value equality condition.
Ensure that the value is properly quoted.
"""
return "{} = {}".format(key, maybe_quote(value))
def age_condition(age):
return "time > now() - {}".format(age)
def host_condition(hostname):
return kv_condition("host", hostname)
def kv_conditions(conditions):
return [
kv_condition(*condition.split("=", 1))
for condition in conditions or []
]
class Query(object):
"""
Query wrapper.
"""
def __init__(self, query, measurements=None):
self.query = query
self.measurements = measurements or []
self.logger = getLogger('nagiosplugin')
def __str__(self):
return self.query
def get_results(self, client):
self.logger.debug("Querying InfluxDB at {}:{} with query: \"{}\"".format(
client._host,
client._port,
self.query,
))
results = client.query(self.query)
self.logger.info("Received result set: {}".format(results))
return list(results.get_points())
@add_metaclass(ABCMeta)
class QueryBuilder(object):
"""
Abstract callable that builds queries.
"""
@abstractmethod
def __call__(self):
pass
class ExplicitQueryBuilder(QueryBuilder):
"""
Return the provided query.
"""
def __init__(self, query):
self.query = query
def __call__(self):
return Query(
query=self.query,
)
class SingleMeasurementQueryBuilder(QueryBuilder):
"""
Build a simple InfluxDB query of the form:
SELECT <fields> FROM <measurement> WHERE <conditions>
"""
def __init__(self,
fields=None,
measurement=None,
conditions=None):
self.fields = fields or ["time", "value"]
self.measurement = measurement
self.conditions = conditions or []
def __call__(self):
return Query(
query="SELECT {} FROM {} WHERE {}".format(
", ".join(self.fields),
self.measurement,
" AND ".join(self.conditions),
),
measurements=[
self.measurement,
],
)
@classmethod
def for_hostname_and_age(cls, measurement, age, hostname, where, field=None):
return cls(
fields=["time", field] if field else DEFAULT_FIELDS,
measurement=measurement,
conditions=[
age_condition(age),
host_condition(hostname),
] + kv_conditions(where),
)
|
locationlabs/influxdb-nagios-plugin
|
influxdbnagiosplugin/query.py
|
Python
|
apache-2.0
| 3,019
|
# !/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from metric_collector import JmxMetricCollector,JmxMetricListener,Runner
import json, logging, fnmatch, sys
class NNSafeModeMetric(JmxMetricListener):
def on_metric(self, metric):
if metric["metric"] == "hadoop.namenode.fsnamesystemstate.fsstate":
if metric["value"] == "safeMode":
metric["value"] = 1
else:
metric["value"] = 0
self.collector.collect(metric)
class NNHAMetric(JmxMetricListener):
PREFIX = "hadoop.namenode.fsnamesystem"
def on_bean(self, component, bean):
if bean["name"] == "Hadoop:service=NameNode,name=FSNamesystem":
if bean[u"tag.HAState"] == "active":
self.collector.on_bean_kv(self.PREFIX, component, "hastate", 0)
else:
self.collector.on_bean_kv(self.PREFIX, component, "hastate", 1)
class MemoryUsageMetric(JmxMetricListener):
PREFIX = "hadoop.namenode.jvm"
def on_bean(self, component, bean):
if bean["name"] == "Hadoop:service=NameNode,name=JvmMetrics":
memnonheapusedusage = round(float(bean['MemNonHeapUsedM']) / float(bean['MemNonHeapMaxM']) * 100.0, 2)
self.collector.on_bean_kv(self.PREFIX, component, "memnonheapusedusage", memnonheapusedusage)
memnonheapcommittedusage = round(float(bean['MemNonHeapCommittedM']) / float(bean['MemNonHeapMaxM']) * 100,
2)
self.collector.on_bean_kv(self.PREFIX, component, "memnonheapcommittedusage", memnonheapcommittedusage)
memheapusedusage = round(float(bean['MemHeapUsedM']) / float(bean['MemHeapMaxM']) * 100, 2)
self.collector.on_bean_kv(self.PREFIX, component,"memheapusedusage", memheapusedusage)
memheapcommittedusage = round(float(bean['MemHeapCommittedM']) / float(bean['MemHeapMaxM']) * 100, 2)
self.collector.on_bean_kv(self.PREFIX, component, "memheapcommittedusage", memheapcommittedusage)
class NNCapacityUsageMetric(JmxMetricListener):
PREFIX = "hadoop.namenode.fsnamesystemstate"
def on_bean(self, component, bean):
if bean["name"] == "Hadoop:service=NameNode,name=FSNamesystemState":
capacityusage = round(float(bean['CapacityUsed']) / float(bean['CapacityTotal']) * 100, 2)
self.collector.on_bean_kv(self.PREFIX, component, "capacityusage", capacityusage)
class JournalTransactionInfoMetric(JmxMetricListener):
PREFIX = "hadoop.namenode.journaltransaction"
def on_bean(self, component, bean):
if bean.has_key("JournalTransactionInfo"):
JournalTransactionInfo = json.loads(bean.get("JournalTransactionInfo"))
LastAppliedOrWrittenTxId = float(JournalTransactionInfo.get("LastAppliedOrWrittenTxId"))
MostRecentCheckpointTxId = float(JournalTransactionInfo.get("MostRecentCheckpointTxId"))
self.collector.on_bean_kv(self.PREFIX, component, "LastAppliedOrWrittenTxId", LastAppliedOrWrittenTxId)
self.collector.on_bean_kv(self.PREFIX, component, "MostRecentCheckpointTxId", MostRecentCheckpointTxId)
class DatanodeFSDatasetState(JmxMetricListener):
def on_metric(self, metric):
if fnmatch.fnmatch(metric["metric"], "hadoop.datanode.fsdatasetstate-*.capacity"):
metric["metric"] = "hadoop.datanode.fsdatasetstate.capacity"
self.collector.collect(metric)
elif fnmatch.fnmatch(metric["metric"], "hadoop.datanode.fsdatasetstate-*.dfsused"):
metric["metric"] = "hadoop.datanode.fsdatasetstate.dfsused"
self.collector.collect(metric)
if __name__ == '__main__':
collector = JmxMetricCollector()
collector.register(
NNSafeModeMetric(),
NNHAMetric(),
MemoryUsageMetric(),
NNCapacityUsageMetric(),
JournalTransactionInfoMetric(),
DatanodeFSDatasetState()
)
Runner.run(collector)
|
DadanielZ/incubator-eagle
|
eagle-external/hadoop_jmx_collector/hadoop_jmx_kafka.py
|
Python
|
apache-2.0
| 4,721
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2013 Rackspace Hosting All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import itertools
import posixpath as pp
from debtcollector import removals
import fasteners
import six
from taskflow import exceptions as exc
from taskflow.persistence import path_based
from taskflow.types import tree
class FakeInode(tree.Node):
"""A in-memory filesystem inode-like object."""
def __init__(self, item, path, value=None):
super(FakeInode, self).__init__(item, path=path, value=value)
class FakeFilesystem(object):
"""An in-memory filesystem-like structure.
This filesystem uses posix style paths **only** so users must be careful
to use the ``posixpath`` module instead of the ``os.path`` one which will
vary depending on the operating system which the active python is running
in (the decision to use ``posixpath`` was to avoid the path variations
which are not relevant in an implementation of a in-memory fake
filesystem).
**Not** thread-safe when a single filesystem is mutated at the same
time by multiple threads. For example having multiple threads call into
:meth:`~taskflow.persistence.backends.impl_memory.FakeFilesystem.clear`
at the same time could potentially end badly. It is thread-safe when only
:meth:`~taskflow.persistence.backends.impl_memory.FakeFilesystem.get`
or other read-only actions (like calling into
:meth:`~taskflow.persistence.backends.impl_memory.FakeFilesystem.ls`)
are occuring at the same time.
Example usage:
>>> from taskflow.persistence.backends import impl_memory
>>> fs = impl_memory.FakeFilesystem()
>>> fs.ensure_path('/a/b/c')
>>> fs['/a/b/c'] = 'd'
>>> print(fs['/a/b/c'])
d
>>> del fs['/a/b/c']
>>> fs.ls("/a/b")
[]
>>> fs.get("/a/b/c", 'blob')
'blob'
"""
#: Root path of the in-memory filesystem.
root_path = pp.sep
@classmethod
def normpath(cls, path):
"""Return a normalized absolutized version of the pathname path."""
if not path:
raise ValueError("This filesystem can only normalize paths"
" that are not empty")
if not path.startswith(cls.root_path):
raise ValueError("This filesystem can only normalize"
" paths that start with %s: '%s' is not"
" valid" % (cls.root_path, path))
return pp.normpath(path)
#: Split a pathname into a tuple of ``(head, tail)``.
split = staticmethod(pp.split)
@staticmethod
def join(*pieces):
"""Join many path segments together."""
return pp.sep.join(pieces)
def __init__(self, deep_copy=True):
self._root = FakeInode(self.root_path, self.root_path)
self._reverse_mapping = {
self.root_path: self._root,
}
if deep_copy:
self._copier = copy.deepcopy
else:
self._copier = copy.copy
def ensure_path(self, path):
"""Ensure the path (and parents) exists."""
path = self.normpath(path)
# Ignore the root path as we already checked for that; and it
# will always exist/can't be removed anyway...
if path == self._root.item:
return
node = self._root
for piece in self._iter_pieces(path):
child_node = node.find(piece, only_direct=True,
include_self=False)
if child_node is None:
child_node = self._insert_child(node, piece)
node = child_node
def _insert_child(self, parent_node, basename, value=None):
child_path = self.join(parent_node.metadata['path'], basename)
# This avoids getting '//a/b' (duplicated sep at start)...
#
# Which can happen easily if something like the following is given.
# >>> x = ['/', 'b']
# >>> pp.sep.join(x)
# '//b'
if child_path.startswith(pp.sep * 2):
child_path = child_path[1:]
child_node = FakeInode(basename, child_path, value=value)
parent_node.add(child_node)
self._reverse_mapping[child_path] = child_node
return child_node
def _fetch_node(self, path, normalized=False):
if not normalized:
normed_path = self.normpath(path)
else:
normed_path = path
try:
return self._reverse_mapping[normed_path]
except KeyError:
raise exc.NotFound("Path '%s' not found" % path)
def get(self, path, default=None):
"""Fetch the value of given path (and return default if not found)."""
try:
return self._get_item(self.normpath(path))
except exc.NotFound:
return default
def _get_item(self, path, links=None):
node = self._fetch_node(path, normalized=True)
if 'target' in node.metadata:
# Follow the link (and watch out for loops)...
path = node.metadata['target']
if links is None:
links = []
if path in links:
raise ValueError("Recursive link following not"
" allowed (loop %s detected)"
% (links + [path]))
else:
links.append(path)
return self._get_item(path, links=links)
else:
return self._copier(node.metadata['value'])
def _up_to_root_selector(self, root_node, child_node):
# Build the path from the child to the root and stop at the
# root, and then form a path string...
path_pieces = [child_node.item]
for parent_node in child_node.path_iter(include_self=False):
if parent_node is root_node:
break
path_pieces.append(parent_node.item)
if len(path_pieces) > 1:
path_pieces.reverse()
return self.join(*path_pieces)
@staticmethod
def _metadata_path_selector(root_node, child_node):
return child_node.metadata['path']
def ls_r(self, path, absolute=False):
"""Return list of all children of the given path (recursively)."""
node = self._fetch_node(path)
if absolute:
selector_func = self._metadata_path_selector
else:
selector_func = self._up_to_root_selector
return [selector_func(node, child_node)
for child_node in node.bfs_iter()]
@removals.removed_kwarg('recursive', version="0.11", removal_version="2.0")
def ls(self, path, recursive=False):
"""Return list of all children of the given path.
NOTE(harlowja): if ``recursive`` is passed in as truthy then the
absolute path is **always** returned (not the relative path). If
``recursive`` is left as the default or falsey then the
relative path is **always** returned.
This is documented in bug `1458114`_ and the existing behavior is
being maintained, to get a recursive version that is absolute (or is
not absolute) it is recommended to use the :py:meth:`.ls_r` method
instead.
.. deprecated:: 0.11
In a future release the ``recursive`` keyword argument will
be removed (so preferring and moving to the :py:meth:`.ls_r` should
occur earlier rather than later).
.. _1458114: https://bugs.launchpad.net/taskflow/+bug/1458114
"""
node = self._fetch_node(path)
if recursive:
selector_func = self._metadata_path_selector
child_node_it = node.bfs_iter()
else:
selector_func = self._up_to_root_selector
child_node_it = iter(node)
return [selector_func(node, child_node)
for child_node in child_node_it]
def clear(self):
"""Remove all nodes (except the root) from this filesystem."""
self._reverse_mapping = {
self.root_path: self._root,
}
for node in list(self._root.reverse_iter()):
node.disassociate()
def _iter_pieces(self, path, include_root=False):
if path == self._root.item:
# Check for this directly as the following doesn't work with
# split correctly:
#
# >>> path = "/"
# path.split(pp.sep)
# ['', '']
parts = []
else:
parts = path.split(pp.sep)[1:]
if include_root:
parts.insert(0, self._root.item)
for piece in parts:
yield piece
def __delitem__(self, path):
path = self.normpath(path)
node = self._fetch_node(path, normalized=True)
if node is self._root:
raise ValueError("Can not delete '%s'" % self._root.item)
child_gen = (child.metadata['path'] for child in node.bfs_iter())
for path in itertools.chain([path], child_gen):
self._reverse_mapping.pop(path, None)
node.disassociate()
@staticmethod
def _stringify_node(node):
if 'target' in node.metadata:
return "%s (link to %s)" % (node.item, node.metadata['target'])
else:
return six.text_type(node.item)
def pformat(self):
"""Pretty format this in-memory filesystem."""
return self._root.pformat(stringify_node=self._stringify_node)
def symlink(self, src_path, dest_path):
"""Link the destionation path to the source path."""
dest_path = self.normpath(dest_path)
src_path = self.normpath(src_path)
try:
dest_node = self._fetch_node(dest_path, normalized=True)
except exc.NotFound:
parent_path, basename = self.split(dest_path)
parent_node = self._fetch_node(parent_path, normalized=True)
dest_node = self._insert_child(parent_node, basename)
dest_node.metadata['target'] = src_path
def __getitem__(self, path):
return self._get_item(self.normpath(path))
def __setitem__(self, path, value):
path = self.normpath(path)
value = self._copier(value)
try:
node = self._fetch_node(path, normalized=True)
node.metadata.update(value=value)
except exc.NotFound:
parent_path, basename = self.split(path)
parent_node = self._fetch_node(parent_path, normalized=True)
self._insert_child(parent_node, basename, value=value)
class MemoryBackend(path_based.PathBasedBackend):
"""A in-memory (non-persistent) backend.
This backend writes logbooks, flow details, and atom details to a
in-memory filesystem-like structure (rooted by the ``memory``
instance variable).
This backend does *not* provide true transactional semantics. It does
guarantee that there will be no inter-thread race conditions when
writing and reading by using a read/write locks.
"""
#: Default path used when none is provided.
DEFAULT_PATH = pp.sep
def __init__(self, conf=None):
super(MemoryBackend, self).__init__(conf)
self.memory = FakeFilesystem(deep_copy=self._conf.get('deep_copy',
True))
self.lock = fasteners.ReaderWriterLock()
def get_connection(self):
return Connection(self)
def close(self):
pass
class Connection(path_based.PathBasedConnection):
def __init__(self, backend):
super(Connection, self).__init__(backend)
self.upgrade()
@contextlib.contextmanager
def _memory_lock(self, write=False):
if write:
lock = self.backend.lock.write_lock
else:
lock = self.backend.lock.read_lock
with lock():
try:
yield
except exc.TaskFlowException:
raise
except Exception:
exc.raise_with_cause(exc.StorageFailure,
"Storage backend internal error")
def _join_path(self, *parts):
return pp.join(*parts)
def _get_item(self, path):
with self._memory_lock():
return self.backend.memory[path]
def _set_item(self, path, value, transaction):
self.backend.memory[path] = value
def _del_tree(self, path, transaction):
del self.backend.memory[path]
def _get_children(self, path):
with self._memory_lock():
return self.backend.memory.ls(path)
def _ensure_path(self, path):
with self._memory_lock(write=True):
self.backend.memory.ensure_path(path)
def _create_link(self, src_path, dest_path, transaction):
self.backend.memory.symlink(src_path, dest_path)
@contextlib.contextmanager
def _transaction(self):
"""This just wraps a global write-lock."""
with self._memory_lock(write=True):
yield
def validate(self):
pass
|
junneyang/taskflow
|
taskflow/persistence/backends/impl_memory.py
|
Python
|
apache-2.0
| 13,648
|
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.utils import extract_price
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
class AcityDiscountSpider(BaseSpider):
name = 'acitydiscount.com'
user_agent = 'Googlebot/2.1 (+http://www.google.com/bot.html)'
allowed_domains = ['acitydiscount.com']
def start_requests(self):
yield Request('http://www.acitydiscount.com')
def parse(self, response):
hxs = HtmlXPathSelector(response)
'''
cats = hxs.select('//li[@class="g-item"]/ul//a/@href').extract()
cats += hxs.select('//h3[@class="titlegroups"]/a/@href').extract()
'''
cats = ['http://www.acitydiscount.com/Wisco.1.97.3.1.htm',
'http://www.acitydiscount.com/Thunder-Group-Inc.1.2348.3.1.htm',
'http://www.acitydiscount.com/Libbey.1.406.3.1.htm',
'http://www.acitydiscount.com/Victory.1.57.3.1.htm',
'http://www.acitydiscount.com/Vollrath.1.266.3.1.htm',
'http://www.acitydiscount.com/F-Dick.1.1528.3.1.htm',
'http://www.acitydiscount.com/Cecilware.1.48.3.1.htm',
'http://www.acitydiscount.com/Turbo-Air-Radiance.1.2372.3.1.htm',
'http://www.acitydiscount.com/G-E-T-.1.1137.3.1.htm',
'http://www.acitydiscount.com/Beverage-Air.1.13.3.1.htm',
'http://www.acitydiscount.com/Turbo-Air.1.915.3.1.htm',
'http://www.acitydiscount.com/Amana.1.128.3.1.htm']
for cat in cats:
yield Request(cat)
next_page = hxs.select('//a[@class="pagelinks" and contains(text(), "Next")]/@href').re('\d+')
if next_page:
manuf = re.search("manuf=(\d+)", response.body).groups()[0]
pagination_url = 'http://www.acitydiscount.com/restaurant_equipment/index.cfm' + \
'?_faction=1&manuf=%s&startrow=%s' % (manuf, next_page[0])
yield Request(pagination_url)
products = hxs.select('//input[contains(@src, "btn_add2cart.gif")]/../..//a[position()=1]/@href').extract()
for product in products:
yield Request(product, self.parse_product)
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
main_name = hxs.select('//h1[@class="titleadditional"]/text()').extract()[0].strip()
price = hxs.select('//span[@id="current_price"]/span/text()').extract()[0]
loader = ProductLoader(response=response, item=Product())
loader.add_value('url', response.url)
loader.add_value('name', main_name)
loader.add_value('price', price)
loader.add_xpath('sku', '//div[@id="product_description"]//td[contains(text(), "Model:")]' +
'/following-sibling::td[not(contains(text(), "*"))]/text()')
yield loader.load_item()
'''
def _add_options(option_sets, current_name):
if not option_sets:
loader = ProductLoader(response=response, item=Product())
loader.add_value('url', response.url)
loader.add_value('name', current_name)
loader.add_value('price', price)
yield loader.load_item()
else:
options = option_sets[0]
option_sets = option_sets[1:]
for option in options.select('./option/text()').extract():
name = current_name.strip() + ' ' + option.strip()
for product in _add_options(option_sets, name):
yield product
option_sets = hxs.select('//div[@id="product_price"]//select')
if option_sets:
for product in _add_options(option_sets, main_name):
yield product
'''
|
0--key/lib
|
portfolio/Python/scrapy/tigerchef/acitydiscount.py
|
Python
|
apache-2.0
| 4,043
|
# -*- coding: utf-8 -*-
"""
jinja2htmlcompress
~~~~~~~~~~~~~~~~~~
A Jinja2 extension that eliminates useless whitespace at template
compilation time without extra overhead.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
from jinja2.ext import Extension
from jinja2.lexer import Token, describe_token
from jinja2 import TemplateSyntaxError
_tag_re = re.compile(r'(?:<(/?)([a-zA-Z0-9_-]+)\s*|(>\s*))(?s)')
_ws_normalize_re = re.compile(r'[ \t\r\n]+')
class StreamProcessContext(object):
def __init__(self, stream):
self.stream = stream
self.token = None
self.stack = []
def fail(self, message):
raise TemplateSyntaxError(message, self.token.lineno,
self.stream.name, self.stream.filename)
def _make_dict_from_listing(listing):
rv = {}
for keys, value in listing:
for key in keys:
rv[key] = value
return rv
class HTMLCompress(Extension):
isolated_elements = set(['script', 'style', 'noscript', 'textarea'])
void_elements = set(['br', 'img', 'area', 'hr', 'param', 'input',
'embed', 'col'])
block_elements = set(['div', 'p', 'form', 'ul', 'ol', 'li', 'table', 'tr',
'tbody', 'thead', 'tfoot', 'tr', 'td', 'th', 'dl',
'dt', 'dd', 'blockquote', 'h1', 'h2', 'h3', 'h4',
'h5', 'h6', 'pre'])
breaking_rules = _make_dict_from_listing([
(['p'], set(['#block'])),
(['li'], set(['li'])),
(['td', 'th'], set(['td', 'th', 'tr', 'tbody', 'thead', 'tfoot'])),
(['tr'], set(['tr', 'tbody', 'thead', 'tfoot'])),
(['thead', 'tbody', 'tfoot'], set(['thead', 'tbody', 'tfoot'])),
(['dd', 'dt'], set(['dl', 'dt', 'dd']))
])
def is_isolated(self, stack):
for tag in reversed(stack):
if tag in self.isolated_elements:
return True
return False
def is_breaking(self, tag, other_tag):
breaking = self.breaking_rules.get(other_tag)
return breaking and (tag in breaking or
('#block' in breaking and tag in self.block_elements))
def enter_tag(self, tag, ctx):
while ctx.stack and self.is_breaking(tag, ctx.stack[-1]):
self.leave_tag(ctx.stack[-1], ctx)
if tag not in self.void_elements:
ctx.stack.append(tag)
def leave_tag(self, tag, ctx):
if not ctx.stack:
ctx.fail('Tried to leave "%s" but something closed '
'it already' % tag)
if tag == ctx.stack[-1]:
ctx.stack.pop()
return
for idx, other_tag in enumerate(reversed(ctx.stack)):
if other_tag == tag:
for num in xrange(idx + 1):
ctx.stack.pop()
elif not self.breaking_rules.get(other_tag):
break
def normalize(self, ctx):
pos = 0
buffer = []
def write_data(value):
if not self.is_isolated(ctx.stack):
value = _ws_normalize_re.sub(' ', value.strip())
buffer.append(value)
for match in _tag_re.finditer(ctx.token.value):
closes, tag, sole = match.groups()
preamble = ctx.token.value[pos:match.start()]
write_data(preamble)
if sole:
write_data(sole)
else:
buffer.append(match.group())
(closes and self.leave_tag or self.enter_tag)(tag, ctx)
pos = match.end()
write_data(ctx.token.value[pos:])
return u''.join(buffer)
def filter_stream(self, stream):
ctx = StreamProcessContext(stream)
for token in stream:
if token.type != 'data':
yield token
continue
ctx.token = token
value = self.normalize(ctx)
yield Token(token.lineno, 'data', value)
class SelectiveHTMLCompress(HTMLCompress):
def filter_stream(self, stream):
ctx = StreamProcessContext(stream)
strip_depth = 0
while 1:
if stream.current.type == 'block_begin':
if stream.look().test('name:strip') or \
stream.look().test('name:endstrip'):
stream.skip()
if stream.current.value == 'strip':
strip_depth += 1
else:
strip_depth -= 1
if strip_depth < 0:
ctx.fail('Unexpected tag endstrip')
stream.skip()
if stream.current.type != 'block_end':
ctx.fail('expected end of block, got %s' %
describe_token(stream.current))
stream.skip()
if strip_depth > 0 and stream.current.type == 'data':
ctx.token = stream.current
value = self.normalize(ctx)
yield Token(stream.current.lineno, 'data', value)
else:
yield stream.current
stream.next()
|
mgp/sharebears
|
sharebears/jinja2htmlcompress.py
|
Python
|
apache-2.0
| 5,227
|
#!/usr/bin/env python
# coding: utf8
from __future__ import print_function
from collections import defaultdict
from datetime import datetime
import argparse
import base64
import eviltransform
import json
import logging
import os
import pickle
import time
import urllib2
# Initialize logging.
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(levelname)s: %(message)s')
# Names starting with '.' are calculated in get_hotel_field() below.
HOTEL_FIELDS = ('hotel_id', '.lat', '.lon', 'name', 'address', 'class', '.rate', 'ranking', 'review_score', 'url', 'hoteltype_id', '.trans')
class BookingApi:
def __init__(self, login, password):
self.login = login
self.password = password
self.baseConfig = {
"headers": {
"Content-Type": "application/json",
"Authorization": "Basic " + base64.encodestring(
"{login}:{password}".format(login=self.login, password=self.password)).replace('\n', '')
},
"url": 'https://distribution-xml.booking.com/json/bookings'}
self.checkMinute = 0
self.requestPerMinute = 0
self.requestLimit = 15 # request per minute
def call(self, function, params=None):
self.requestPerMinute += 1
now = datetime.utcnow()
if self.requestPerMinute >= self.requestLimit:
waittime = 60 - now.second
logging.info("Limit for request per minute exceeded. Waiting for: {0} sec.".format(waittime))
time.sleep(waittime)
now = datetime.utcnow()
if self.checkMinute != now.minute:
self.requestPerMinute = 0
self.checkMinute = now.minute
payload = ''
try:
p = "" if not params else '?' + "&".join(
["{key}={value}".format(key=k, value=v) for (k, v) in params.iteritems()])
url = "{base}.{func}{params}".format(base=self.baseConfig["url"], func=function, params=p)
logging.debug("{0} {1} API call:{2}".format(self.checkMinute, self.requestPerMinute, url))
request = urllib2.Request(url, None, self.baseConfig["headers"])
stream = urllib2.urlopen(request)
payload = stream.read()
data = json.loads(payload)
if isinstance(data, dict) and 'message' in data and 'code' in data:
logging.error('Api call failed with error: {0} Code: {1}'.format(data['message'], data['code']))
return None
return data
except Exception as e:
logging.error('Error: {0} Context: {1}'.format(e, payload))
return None
def download(user, password, path):
'''
Downloads all hotels from booking.com and stores them in a bunch of .pkl files.
'''
api = BookingApi(user, password)
maxrows = 1000
countries = api.call("getCountries", dict(languagecodes='en'))
for country in countries:
countrycode = country['countrycode']
logging.info(u'Download[{0}]: {1}'.format(countrycode, country['name']))
allhotels = {}
while True:
hotels = api.call('getHotels',
dict(new_hotel_type=1, offset=len(allhotels), rows=maxrows, countrycodes=countrycode))
# Check for error.
if hotels is None:
logging.critical('No hotels downloaded for country {0}'.format(country['name']))
break
for h in hotels:
allhotels[h['hotel_id']] = h
# If hotels in answer less then maxrows, we reach end of data.
if len(hotels) < maxrows:
break
if not hotels:
continue
# Now the same for hotel translations
offset = 0
while True:
hotels = api.call('getHotelTranslations', dict(offset=offset, rows=maxrows, countrycodes=countrycode))
if hotels is None:
exit(1)
# Add translations for each hotel
for h in hotels:
if h['hotel_id'] in allhotels:
if 'translations' not in allhotels[h['hotel_id']]:
allhotels[h['hotel_id']]['translations'] = {}
allhotels[h['hotel_id']]['translations'][h['languagecode']] = {'name': h['name'], 'address': h['address']}
offset += len(hotels)
if len(hotels) < maxrows:
break
logging.info('Num of hotels: {0}, translations: {1}'.format(len(allhotels), offset))
filename = os.path.join(path,
'{0} - {1}.pkl'.format(country['area'].encode('utf8'), country['name'].encode('utf8')))
with open(filename, 'wb') as fd:
pickle.dump(allhotels.values(), fd, pickle.HIGHEST_PROTOCOL)
def translate(source, output):
'''
Reads *.pkl files and produces a single list of hotels as tab separated values.
'''
files = [os.path.join(source, filename)
for filename in os.listdir(source) if filename.endswith('.pkl')]
data = []
for filename in sorted(files):
logging.info('Processing {0}'.format(filename))
with open(filename, 'rb') as fd:
data += pickle.load(fd)
# Fix chinese coordinates
for hotel in data:
if hotel['countrycode'] == 'cn' and 'location' in hotel:
try:
hotel['location']['latitude'], hotel['location']['longitude'] = eviltransform.gcj2wgs_exact(
float(hotel['location']['latitude']), float(hotel['location']['longitude']))
except ValueError:
# We don't care if there were errors converting coordinates to float
pass
# Dict of dicts city_id -> { currency -> [prices] }
cities = defaultdict(lambda: defaultdict(list))
def valid(hotel):
return 'city_id' in hotel and 'currencycode' in hotel and 'minrate' in hotel and hotel['minrate'] is not None
# Collect prices
for hotel in data:
if valid(hotel):
cities[hotel['city_id']][hotel['currencycode']].append(float(hotel['minrate']))
# Replaces list of prices by a median price.
for city in cities:
for cur in cities[city]:
cities[city][cur] = sorted(cities[city][cur])[len(cities[city][cur]) / 2]
# Price rate ranges, relative to the median price for a city
rates = (0.7, 1.3)
def get_hotel_field(hotel, field, rate):
if field == '.lat':
return hotel['location']['latitude']
elif field == '.lon':
return hotel['location']['longitude']
elif field == '.rate':
return rate
elif field == '.trans':
# Translations are packed into a single column: lang1|name1|address1|lang2|name2|address2|...
if 'translations' in hotel:
tr_list = []
for tr_lang, tr_values in hotel['translations'].items():
tr_list.append(tr_lang)
tr_list.extend([tr_values[e] for e in ('name', 'address')])
return '|'.join([s.replace('|', ';') for s in tr_list])
else:
return ''
elif field in hotel:
return hotel[field]
elif field == 'ranking':
# This field is not used yet, and booking.com sometimes blocks it.
return ''
logging.error('Unknown hotel field: {0}, URL: {1}'.format(field, hotel['url']))
return ''
with open(output, 'w') as fd:
for hotel in data:
rate = 0
if valid(hotel):
avg = cities[hotel['city_id']][hotel['currencycode']]
price = float(hotel['minrate'])
rate = 1
# Find a range that contains the price
while rate <= len(rates) and price > avg * rates[rate - 1]:
rate += 1
l = [get_hotel_field(hotel, e, rate) for e in HOTEL_FIELDS]
print('\t'.join([unicode(f).encode('utf8').replace('\t', ' ').replace('\n', ' ').replace('\r', '') for f in l]), file=fd)
def process_options():
parser = argparse.ArgumentParser(description='Download and process booking hotels.')
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose")
parser.add_argument("-q", "--quiet", action="store_false", dest="verbose")
parser.add_argument("--password", dest="password", help="Booking.com account password")
parser.add_argument("--user", dest="user", help="Booking.com account user name")
parser.add_argument("--path", dest="path", help="Path to data files")
parser.add_argument("--output", dest="output", help="Name and destination for output file")
parser.add_argument("--download", action="store_true", dest="download", default=False)
parser.add_argument("--translate", action="store_true", dest="translate", default=False)
options = parser.parse_args()
if not options.download and not options.translate:
parser.print_help()
# TODO(mgsergio): implpement it with argparse facilities.
if options.translate and not options.output:
print("--output isn't set")
parser.print_help()
exit()
return options
def main():
options = process_options()
if options.download:
download(options.user, options.password, options.path)
if options.translate:
translate(options.path, options.output)
if __name__ == "__main__":
main()
|
bykoianko/omim
|
tools/python/booking_hotels.py
|
Python
|
apache-2.0
| 9,528
|
from common import delete_container, docker_client, event_test
def test_inspect_by_name(agent):
delete_container('/inspect_test')
client = docker_client()
c = client.create_container('ibuildthecloud/helloworld',
name='inspect_test')
inspect = docker_client().inspect_container(c['Id'])
def post(req, resp):
response_inspect = resp['data']['instanceInspect']
# diff_dict(inspect, response_inspect)
assert response_inspect['Id'] == inspect['Id']
del resp['links']
del resp['actions']
event_test(agent, 'docker/instance_inspect',
post_func=post, diff=False)
def test_inspect_by_id(agent):
delete_container('/inspect_test')
client = docker_client()
c = client.create_container('ibuildthecloud/helloworld',
name='inspect_test')
inspect = docker_client().inspect_container(c['Id'])
def pre(req):
instance_inspect = req['data']['instanceInspect']
instance_inspect['id'] = c['Id']
del instance_inspect['name']
def post(req, resp, valid_resp):
response_inspect = resp['data']['instanceInspect']
# can't compare the inspect from go api and py api
# TODO find a new way to assert
assert response_inspect['Id'] == inspect['Id']
# diff_dict(inspect, response_inspect)
event_test(agent, 'docker/instance_inspect', pre_func=pre,
post_func=post, diff=False)
def test_inspect_not_found(agent):
delete_container('/inspect_test')
def post(req, resp):
assert "Id" not in resp['data']['instanceInspect']
assert "Name" not in resp['data']['instanceInspect']
event_test(agent, 'docker/instance_inspect', post_func=post, diff=False)
|
StrongMonkey/agent
|
tests/tests/test_docker_inspect.py
|
Python
|
apache-2.0
| 1,803
|
import json
import os
import uuid
from socket import socket, AF_INET, SOCK_STREAM
from mating.logging import l
from mating.network import DEFAULT_PACKET_SIZE, TCP_IP, TCP_PORT
MATING_CMD = ('./RobotCrossover parent/{}.txt parent/{}.txt '
'confs/evolConf-full.txt -/tmp/{}.genome.txt')
def run_mating_server():
def handle_received_data(packet, mates):
dhash = str(packet['hash_code'])
l('Mating server received {}'.format(dhash))
if mates.get(dhash):
mates[dhash].append(packet['message'])
else:
mates[dhash] = [packet['message']]
s = socket(AF_INET, SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
# Initialize an empty dictionary - mating pairs will be stored here
mates = {}
while True:
conn, addr = s.accept()
packet = conn.recv(DEFAULT_PACKET_SIZE)
if packet:
handle_received_data(json.loads(packet.decode()), mates)
for pair_id in mates:
# If both robots have agreed to mate
if len(mates[pair_id]) == 2:
l('Mating server formed a pair {}'.format(mates[pair_id]))
command = MATING_CMD.format(mates[pair_id][0],
mates[pair_id][1],
str(uuid.uuid4()))
l('Running command {}'.format(command))
# Call the Cpp module that does the crossover
os.system(command)
conn.close()
|
portaloffreedom/robot-baby
|
Mating/mating/server/server.py
|
Python
|
apache-2.0
| 1,524
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ClientConnectionFailure(Exception):
"""
An exception raised for errors with the client socket connection.
"""
pass
class ClientConnectionNotOpen(Exception):
"""
An exception raised when operations are issued to a closed connection.
"""
def __init__(self):
"""
Construct the closed client connection error message.
"""
super(ClientConnectionNotOpen, self).__init__(
"client connection not open")
class KmipOperationFailure(Exception):
"""
An exception raised upon the failure of a KMIP appliance operation.
"""
def __init__(self, status, reason, message):
"""
Construct the error message for the KMIP operation failure.
Args:
status: a ResultStatus enumeration
reason: a ResultReason enumeration
message: a string providing additional error information
"""
msg = "{0}: {1} - {2}".format(status.name, reason.name, message)
super(KmipOperationFailure, self).__init__(msg)
|
viktorTarasov/PyKMIP
|
kmip/pie/exceptions.py
|
Python
|
apache-2.0
| 1,703
|
import bs4
import requests
def select_all_links_from_url(url):
"""
:param url: a given address
:return:
"""
return select_all_tags_from_url(url, 'a')
def select_all_tags_from_url(url, tag):
"""
:param url: a given address
:param tag: a tag to select
:return:
"""
soup = boil_soup(url)
# select all links
selected = soup.select(tag)
# return the links found
return selected
def boil_soup(book_url):
# get text from the url
# http://hwanho.net/blog/2014/12/03/python-bs4/
source_code = requests.get(book_url)
plain_text = source_code.text
# run bs4 parser
soup = bs4.BeautifulSoup(plain_text, 'lxml')
return soup
|
autodrive/utils3
|
utils3/url_utils.py
|
Python
|
apache-2.0
| 705
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import logging
from datetime import datetime, timedelta
from changes.config import db
from changes.db.utils import try_create
from changes.lib.flaky_tests import get_flaky_tests
from changes.models import FlakyTestStat, Project, TestCase
import urllib2
def log_metrics(key, **kws):
try:
urllib2.urlopen(
"https://www.dropbox.com/build_metrics" +
"?key=%s" % key +
"".join(
"&%s=%s" % (urllib2.quote(str(k)), urllib2.quote(str(v)))
for (k, v) in kws.items()
),
timeout=10
).read()
except Exception as e:
print("Logging Failed", e)
pass
def aggregate_flaky_tests(day=None, max_flaky_tests=200):
if day is None:
day = datetime.utcnow().date() - timedelta(days=1)
try:
projects = Project.query.all()
for project in projects:
tests = get_flaky_tests(day, day + timedelta(days=1), [project], max_flaky_tests)
for test in tests:
first_run = db.session.query(
TestCase.date_created
).filter(
TestCase.project_id == test['project_id'],
TestCase.name_sha == test['hash']
).order_by(
TestCase.date_created
).limit(1).scalar()
log_metrics(
"flaky_test_reruns",
flaky_test_reruns_name=test['name'],
flaky_test_reruns_project_id=test['project_id'],
flaky_test_reruns_flaky_runs=test['flaky_runs'],
flaky_test_reruns_passing_runs=test['passing_runs'],
)
try_create(FlakyTestStat, {
'name': test['name'],
'project_id': test['project_id'],
'date': day,
'last_flaky_run_id': test['id'],
'flaky_runs': test['flaky_runs'],
'double_reruns': test['double_reruns'],
'passing_runs': test['passing_runs'],
'first_run': first_run
})
db.session.commit()
except Exception as err:
logging.exception(unicode(err))
|
wfxiang08/changes
|
changes/jobs/flaky_tests.py
|
Python
|
apache-2.0
| 2,351
|
import cherrypy
# TODO: If we add get requests to this, we should add a dictionary lookup for which method to service. See: Datawake scraper
from datawake.util.exceptions import datawakeexception
def is_in_session(callback):
def has_session(**kwargs):
if 'user' in cherrypy.session:
return callback(**kwargs)
raise datawakeexception.SessionError(repr(callback), "No User in the current session")
return has_session
def get_user():
user = cherrypy.session.get('user')
return user
def get_org():
user = get_user()
if user is not None:
return user.get_org()
return None
def get_token():
return cherrypy.session.get('token')
def is_token_in_session():
return 'token' in cherrypy.session
def expire_user():
if 'user' in cherrypy.session:
del cherrypy.session['user']
if 'token' in cherrypy.session:
del cherrypy.session['token']
cherrypy.lib.sessions.expire()
return True
def set_user(user):
cherrypy.session['user'] = user
return True
def set_token(token):
cherrypy.session['token'] = token
return True
|
Sotera/datawake-prefetch
|
server/datawake/util/session/helper.py
|
Python
|
apache-2.0
| 1,140
|
# -*- coding: utf-8 -*-
"""A resolver for Windows paths to file system specific formats."""
import re
from dfvfs.lib import errors
from dfvfs.path import factory as path_spec_factory
class WindowsPathResolver(object):
"""Resolver object for Windows paths."""
_PATH_SEPARATOR = u'\\'
_PATH_EXPANSION_VARIABLE = re.compile(r'^[%][^%]+[%]$')
def __init__(self, file_system, mount_point, drive_letter=u'C'):
"""Initializes the Windows path helper.
The mount point indicates a path specification where the Windows
file system is mounted. This can either be a path specification
into a storage media image or a directory accessible by the operating
system.
Args:
file_system: the file system object (instance of vfs.FileSystem).
mount_point: the mount point path specification (instance of
path.PathSpec).
drive_letter: optional string that contains the drive letter used by
the file system. The default is C.
Raises:
PathSpecError: if the mount point path specification is incorrect.
ValueError: when file system or mount point is not set.
"""
if not file_system or not mount_point:
raise ValueError(u'Missing file system or mount point value.')
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
file_system.type_indicator):
if not hasattr(mount_point, u'location'):
raise errors.PathSpecError(
u'Mount point path specification missing location.')
super(WindowsPathResolver, self).__init__()
self._drive_letter = drive_letter
self._environment_variables = {}
self._file_system = file_system
self._mount_point = mount_point
# Windows paths:
# Device path: \\.\PhysicalDrive0
# Volume device path: \\.\C:
# Volume file system path: \\.\C:\
# Extended-length path: \\?\C:\directory\file.txt
# Extended-length UNC path: \\?\UNC\server\share\directory\file.txt
# Local 'absolute' path: \directory\file.txt
# \directory\\file.txt
# Local 'relative' path: ..\directory\file.txt
# Local 'relative' path: .\directory\file.txt
# Volume 'absolute' path: C:\directory\file.txt
# Volume 'relative' path: C:directory\file.txt
# UNC path: \\server\share\directory\file.txt
# Path with environment variable: %SystemRoot%\file.txt
#
# Note Windows also allows paths like:
# C:\..\directory\file.txt
def _PathStripPrefix(self, path):
"""Strips the prefix from a path.
Args:
path: the Windows path to strip the prefix from.
Returns:
The path without the prefix or None if the path is not supported.
"""
if path.startswith(u'\\\\.\\') or path.startswith(u'\\\\?\\'):
if len(path) < 7 or path[5] != u':' or path[6] != self._PATH_SEPARATOR:
# Cannot handle a non-volume path.
return
path = path[7:]
elif path.startswith(u'\\\\'):
# Cannot handle an UNC path.
return
elif len(path) >= 3 and path[1] == u':':
# Check if the path is a Volume 'absolute' path.
if path[2] != self._PATH_SEPARATOR:
# Cannot handle a Volume 'relative' path.
return
path = path[3:]
elif path.startswith(u'\\'):
path = path[1:]
else:
# Cannot handle a relative path.
return
return path
def _ResolvePath(self, path, expand_variables=True):
"""Resolves a Windows path in file system specific format.
This function will check if the individual path segments exists within
the file system. For this it will prefer the first case sensitive match
above a case insensitive match. If no match was found None is returned.
Args:
path: the Windows path to resolve.
expand_variables: optional value to indicate path variables should be
expanded or not. The default is to expand (True).
Returns:
A tuple of the path in file system specific format and the matching path
specification.
"""
# Allow for paths that start with an environment variable e.g.
# %SystemRoot%\file.txt
if path.startswith(u'%'):
path_segment, _, _ = path.partition(self._PATH_SEPARATOR)
if not self._PATH_EXPANSION_VARIABLE.match(path_segment):
path = None
else:
path = self._PathStripPrefix(path)
if path is None:
return None, None
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
file_entry = self._file_system.GetFileEntryByPathSpec(self._mount_point)
expanded_path_segments = self._file_system.SplitPath(
self._mount_point.location)
else:
file_entry = self._file_system.GetRootFileEntry()
expanded_path_segments = []
number_of_expanded_path_segments = 0
for path_segment in path.split(self._PATH_SEPARATOR):
if file_entry is None:
return None, None
# Ignore empty path segments or path segments containing a single dot.
if not path_segment or path_segment == u'.':
continue
if path_segment == u'..':
# Only allow to traverse back up to the mount point.
if number_of_expanded_path_segments > 0:
_ = expanded_path_segments.pop()
number_of_expanded_path_segments -= 1
file_entry = file_entry.GetParentFileEntry()
continue
if (expand_variables and
self._PATH_EXPANSION_VARIABLE.match(path_segment)):
path_segment = self._environment_variables.get(
path_segment[1:-1].upper(), path_segment)
sub_file_entry = file_entry.GetSubFileEntryByName(
path_segment, case_sensitive=False)
if sub_file_entry is None:
return None, None
expanded_path_segments.append(sub_file_entry.name)
number_of_expanded_path_segments += 1
file_entry = sub_file_entry
location = self._file_system.JoinPath(expanded_path_segments)
return location, file_entry.path_spec
def GetWindowsPath(self, path_spec):
"""Returns the Windows path based on a resolved path specification.
Args:
path_spec: the path specification (instance of path.PathSpec).
Returns:
The corresponding Windows path or None if the Windows path could not
be determined.
Raises:
PathSpecError: if the path specification is incorrect.
"""
location = getattr(path_spec, u'location', None)
if location is None:
raise errors.PathSpecError(u'Path specification missing location.')
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
if not location.startswith(self._mount_point.location):
raise errors.PathSpecError(
u'Path specification does not contain mount point.')
else:
if not hasattr(path_spec, u'parent'):
raise errors.PathSpecError(u'Path specification missing parent.')
if path_spec.parent != self._mount_point:
raise errors.PathSpecError(
u'Path specification does not contain mount point.')
path_segments = self._file_system.SplitPath(location)
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
mount_point_path_segments = self._file_system.SplitPath(
self._mount_point.location)
path_segments = path_segments[len(mount_point_path_segments):]
return u'{0:s}:\\{1:s}'.format(
self._drive_letter, self._PATH_SEPARATOR.join(path_segments))
def ResolvePath(self, path, expand_variables=True):
"""Resolves a Windows path in file system specific format.
Args:
path: the Windows path to resolve.
expand_variables: optional value to indicate path variables should be
expanded or not. The default is to expand (True).
Returns:
The path specification (instance of path.PathSpec) in file system
specific format.
"""
location, path_spec = self._ResolvePath(
path, expand_variables=expand_variables)
if not location or not path_spec:
return
# Note that we don't want to set the keyword arguments when not used because
# the path specification base class will check for unused keyword arguments
# and raise.
kwargs = path_spec_factory.Factory.GetProperties(path_spec)
kwargs[u'location'] = location
if not path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
kwargs[u'parent'] = self._mount_point
return path_spec_factory.Factory.NewPathSpec(
self._file_system.type_indicator, **kwargs)
def SetEnvironmentVariable(self, name, value):
"""Sets an environment variable in the Windows path helper.
Args:
name: the name of the environment variable without enclosing
%-characters, e.g. SystemRoot as in %SystemRoot%.
value: the value of the environment variable.
"""
value = self._PathStripPrefix(value)
if value is not None:
self._environment_variables[name.upper()] = value
|
manashmndl/dfvfs
|
dfvfs/helpers/windows_path_resolver.py
|
Python
|
apache-2.0
| 9,210
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for manipulating tensors.
See the @{$python/array_ops} guide.
@@string_to_number
@@to_double
@@to_float
@@to_bfloat16
@@to_int32
@@to_int64
@@cast
@@bitcast
@@saturate_cast
@@broadcast_dynamic_shape
@@broadcast_static_shape
@@shape
@@shape_n
@@size
@@rank
@@reshape
@@squeeze
@@expand_dims
@@meshgrid
@@slice
@@strided_slice
@@split
@@tile
@@pad
@@concat
@@stack
@@parallel_stack
@@unstack
@@reverse_sequence
@@reverse
@@reverse_v2
@@transpose
@@extract_image_patches
@@space_to_batch_nd
@@space_to_batch
@@required_space_to_batch_paddings
@@batch_to_space_nd
@@batch_to_space
@@space_to_depth
@@depth_to_space
@@gather
@@gather_nd
@@unique_with_counts
@@scatter_nd
@@dynamic_partition
@@dynamic_stitch
@@boolean_mask
@@one_hot
@@sequence_mask
@@dequantize
@@quantize_v2
@@quantized_concat
@@setdiff1d
@@fake_quant_with_min_max_args
@@fake_quant_with_min_max_args_gradient
@@fake_quant_with_min_max_vars
@@fake_quant_with_min_max_vars_gradient
@@fake_quant_with_min_max_vars_per_channel
@@fake_quant_with_min_max_vars_per_channel_gradient
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_baseslice = slice
# pylint: disable=redefined-builtin,protected-access
def expand_dims(input, axis=None, name=None, dim=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
shape(expand_dims(t, 0)) ==> [1, 2]
shape(expand_dims(t, 1)) ==> [2, 1]
shape(expand_dims(t, -1)) ==> [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to
expand the shape of `input`.
name: The name of the output `Tensor`.
dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
Raises:
ValueError: if both `dim` and `axis` are specified.
"""
# TODO(aselle): Remove argument dim
if dim is not None:
if axis is not None:
raise ValueError("can't specify both 'dim' and 'axis'")
axis = dim
return gen_array_ops._expand_dims(input, axis, name)
# pylint: enable=redefined-builtin,protected-access
# Aliases for some automatically-generated names.
# pylint: disable=protected-access
@deprecated(
"2016-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.setdiff1d().")
def listdiff(x, y, out_idx=None, name=None):
return gen_array_ops._list_diff(x, y, out_idx, name)
listdiff.__doc__ = gen_array_ops._list_diff.__doc__ + "\n" + listdiff.__doc__
# pylint: enable=protected-access
# pylint: disable=undefined-variable,protected-access
def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
return gen_array_ops._list_diff(x, y, index_dtype, name)
setdiff1d.__doc__ = gen_array_ops._list_diff.__doc__
# pylint: enable=protected-access
def broadcast_dynamic_shape(shape_x, shape_y):
# pylint: disable=protected-access
"""Returns the broadcasted dynamic shape between `shape_x` and `shape_y`.
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
"""
return gen_array_ops._broadcast_args(shape_x, shape_y)
# pylint: enable=protected-access
def broadcast_static_shape(shape_x, shape_y):
"""Returns the broadcasted static shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
return common_shapes.broadcast_shape(shape_x, shape_y)
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
shape(t) ==> [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
This operation returns an integer representing the number of elements in
`input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
size(t) ==> 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`. Defaults to tf.int32.
"""
return size_internal(input, name, optimize=True, out_type=out_type)
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops._prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
This operation returns an integer representing the rank of `input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
# shape of tensor 't' is [2, 2, 3]
rank(t) ==> 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
@compatibility(numpy)
Equivalent to np.ndim
@end_compatibility
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
def _SliceHelper(tensor, slice_spec, var=None):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to NumPy with the restriction that
currently only support basic indexing. That means that
using a tensor as input is not currently allowed
Some useful examples:
```python
# strip leading and trailing 2 elements
foo = tf.constant([1,2,3,4,5,6])
print(foo[2:-2].eval()) # => [3,4]
# skip every row and reverse every column
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]]
# Insert another dimension
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]], [[7],[8],[9]]]
# Ellipses (3 equivalent operations)
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
```
Notes:
- `tf.newaxis` is `None` as in NumPy.
- An implicit ellipsis is placed at the end of the `slice_spec`
- NumPy advanced indexing is currently not supported.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
var: In the case of variable slice assignment, the Variable
object to slice (i.e. tensor is the read-only view of this
variable).
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _baseslice):
strides.append(s.step if s.step is not None else 1)
# python doesn't always use None when constructing ranges
# for example a[:] gives slice(None,sys.maxsize,None)
# whereas a[::1] gives slice(None,None,None)
if s.start is not None and s.start is not sys.maxsize:
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and s.stop != sys.maxsize:
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
begin.append(s)
end.append(s + 1)
if isinstance(s, ops.Tensor):
strides.append(constant(1, s.dtype))
else:
strides.append(np.ones_like(s).dtype.type(1))
shrink_axis_mask |= (1 << index)
index += 1
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(None, "strided_slice",
[tensor] + begin + end + strides) as name:
if begin:
packed_begin, packed_end, packed_strides = (
stack(begin), stack(end), stack(strides))
else:
var_empty = constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
return strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
var=var,
name=name)
# pylint: disable=undefined-variable,protected-access
def slice(input_, begin, size, name=None):
# pylint: disable=redefined-builtin
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input`. In other
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
Note that @{tf.Tensor.__getitem__} is typically a more pythonic way to
perform slices, as it allows you to write `foo[3:7, :-2]` instead of
`tf.slice([3, 0], [4, foo.get_shape()[1]-2])`.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```python
# 'input' is [[[1, 1, 1], [2, 2, 2]],
# [[3, 3, 3], [4, 4, 4]],
# [[5, 5, 5], [6, 6, 6]]]
tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]]
tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],
[[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
def strided_slice(input_,
begin,
end,
strides=None,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
var=None,
name=None):
"""Extracts a strided slice of a tensor (generalized python array indexing).
**Most users will want to use @{tf.Tensor.__getitem__} and
@{tf.Variable.__getitem__}.** That allows NumPy style slicing syntax (i.e.
`tensor[..., 3:4:-1, tf.newaxis, 3]`).
This op is the low-level interface that are used to implement operators.
Those interfaces are much more friendly, and highly recommended.
To a first order, this operation extracts a slice of size `end - begin`
from a tensor `input`
starting at the location specified by `begin`. The slice continues by adding
`stride` to the `begin` index until all dimensions are not less than `end`.
Note that components of stride can be negative, which causes a reverse
slice.
This operation can be thought of an encoding of a numpy style sliced
range. Given a python slice input[<spec0>, <spec1>, ..., <specn>]
this function will be called as follows.
`begin`, `end`, and `strides` will be all length n. n is in general
not the same dimensionality as `input`.
For the ith spec,
`begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask`,
and `shrink_axis_mask` will have the ith bit corresponding to
the ith spec.
If the ith bit of `begin_mask` is non-zero, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask` is non-zero, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is one, then `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example `foo[3:5,4]` on a 10x8 tensor produces a shape 2 tensor
whereas `foo[3:5,4:5]` produces a shape 2x1 tensor with shrink_mask
being 1<<1 == 2.
If the ith bit of `shrink_axis_mask` is one, then `begin`,
`end[i]`, and `stride[i]` are used to do a slice in the appropriate
dimension, but the output tensor will be reduced in dimensionality
by one. This is only valid if the ith entry of slice[i]==1.
NOTE: `begin` and `end` are zero-indexed`.
`strides` entries must be non-zero.
```python
# 'input' is [[[1, 1, 1], [2, 2, 2]],
# [[3, 3, 3], [4, 4, 4]],
# [[5, 5, 5], [6, 6, 6]]]
tf.strided_slice(input, [1, 0, 0], [2, 1, 3], [1, 1, 1]) ==> [[[3, 3, 3]]]
tf.strided_slice(input, [1, 0, 0], [2, 2, 3], [1, 1, 1]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.strided_slice(input, [1, -1, 0], [2, -3, 3], [1, -1, 1]) ==>[[[4, 4, 4],
[3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
var: The variable corresponding to `input_` or None
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
if strides is None:
strides = ones_like(begin)
op = gen_array_ops.strided_slice(
input=input_,
begin=begin,
end=end,
strides=strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
parent_name = name
def assign(val, name=None):
"""Closure that holds all the arguments to create an assignment."""
if var is None:
raise ValueError("Sliced assignment is only supported for variables")
if name is None:
name = parent_name + "_assign"
return var._strided_slice_assign(
begin=begin,
end=end,
strides=strides,
value=val,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
op.assign = assign
return op
def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
of a variable. See ${tf.Tensor$`Tensor.__getitem__`}
for detailed examples of slicing.
This function in addition also allows assignment to a sliced range.
This is similar to `__setitem__` functionality in Python. However,
the syntax is different so that the user can capture the assignment
operation for grouping or passing to `sess.run()`.
For example,
```python
import tensorflow as tf
A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(A[:2, :2]) # => [[1,2], [4,5]]
op = A[:2,:2].assign(22. * tf.ones((2, 2)))
print sess.run(op) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
```
Note that assignments currently do not support NumPy broadcasting
semantics.
Args:
var: An `ops.Variable` object.
slice_spec: The arguments to `Tensor.__getitem__`.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
As an operator. The operator also has a `assign()` method
that can be used to generate an assignment operator.
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
return _SliceHelper(var._AsTensor(), slice_spec, var)
ops.Tensor._override_operator("__getitem__", _SliceHelper)
def parallel_stack(values, name="parallel_stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
Requires that the shape of inputs be known at graph construction time.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the first dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
tensor will have the shape `(N, A, B, C)`.
For example:
```python
# 'x' is [1, 4]
# 'y' is [2, 5]
# 'z' is [3, 6]
parallel_stack([x, y, z]) # => [[1, 4], [2, 5], [3, 6]]
```
The difference between `stack` and `parallel_stack` is that `stack` requires
all the inputs be computed before the operation will begin but doesn't require
that the input shapes be known during graph construction.
`parallel_stack` will copy pieces of the input into the output as they become
available, in some situations this can provide a performance benefit.
Unlike `stack`, `parallel_stack` does NOT support backpropagation.
This is the opposite of unstack. The numpy equivalent is
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
"""
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
output_shape = tensor_shape.TensorShape([len(values)])
output_shape = output_shape.concatenate(value_shape)
# expand_dims converts concat to stack.
return gen_array_ops._parallel_concat(
[expand_dims(value, 0) for value in values], shape=output_shape)
def stack(values, axis=0, name="stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```python
# 'x' is [1, 4]
# 'y' is [2, 5]
# 'z' is [3, 6]
stack([x, y, z]) # => [[1, 4], [2, 5], [3, 6]] (Pack along first dim.)
stack([x, y, z], axis=1) # => [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of unstack. The numpy equivalent is
```python
tf.stack([x, y, z]) = np.asarray([x, y, z])
```
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to stack along. Defaults to the first dimension.
Supports negative indexes.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name).get_shape()
if value_shape.ndims is not None:
expanded_num_dims = value_shape.ndims + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -expanded_num_dims, expanded_num_dims))
return gen_array_ops._pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if ops.is_dense_tensor_like(elem):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError(
"Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" % (elem.dtype, dtype, elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if ops.is_dense_tensor_like(converted_elem):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if ops.is_dense_tensor_like(elem):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops._pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be
converted to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if ops.is_dense_tensor_like(elem):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref:
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is not None and dtype != inferred_dtype:
return NotImplemented
return _autopacking_helper(v, inferred_dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function(
(list, tuple), _autopacking_conversion_function, 99)
def unstack(value, num=None, axis=0, name="unstack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[axis]` is not known, `ValueError` is raised.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice
`value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
(Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice
`value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of stack. The numpy equivalent is
tf.unstack(x, n) = list(x)
Args:
value: A rank `R > 0` `Tensor` to be unstacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred
if `None` (the default).
axis: An `int`. The axis to unstack along. Defaults to the first
dimension. Supports negative indexes.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unstacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
ValueError: If `axis` is out of the range [-R, R).
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops._unpack(value, num=num, axis=axis, name=name)
def concat(values, axis, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `axis`. If
`values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Raxis, ...Dn]
where
Raxis = sum(Daxis(i))
That is, the data from the input tensors is joined along the `axis`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `axis` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 1) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat([t3, t4], 0)) ==> [4, 3]
tf.shape(tf.concat([t3, t4], 1)) ==> [2, 6]
```
Note: If you are concatenating along a new axis consider using stack.
E.g.
```python
tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
```
can be rewritten as
```python
tf.stack(tensors, axis=axis)
```
Args:
values: A list of `Tensor` objects or a single `Tensor`.
axis: 0-D `int32` `Tensor`. Dimension along which to concatenate.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that axis is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(axis,
name="concat_dim",
dtype=dtypes.int32).get_shape(
).assert_is_compatible_with(tensor_shape.scalar())
return identity(values[0], name=scope)
return gen_array_ops._concat_v2(values=values,
axis=axis,
name=name)
def boolean_mask(tensor, mask, name="boolean_mask"):
"""Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) ==> [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) ==> [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where(mask), squeeze_dims=[1])
return gather(reshaped_tensor, indices)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"Number of mask dimensions must be specified, even if some dimensions"
" are None. E.g. shape=[None] is ok, but shape=None is not.")
shape_tensor[:ndims_mask].assert_is_compatible_with(shape_mask)
leading_size = gen_math_ops._prod(shape(tensor)[:ndims_mask], [0])
tensor = reshape(
tensor,
concat([[leading_size], shape(tensor)[ndims_mask:]], 0))
first_dim = shape_tensor[:ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape([first_dim])
.concatenate(shape_tensor[ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask)
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices => [12, 26, 37, 45]
tf.shape(a.values) => [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse_mask(a, [12, 45])
b.indices => [26, 37]
tf.shape(b.values) => [2, 10]
```
Args:
a: An `IndexedSlices` instance.
mask_indices: Indices of elements to mask.
name: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = setdiff1d(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
def split(value, num_or_size_splits, axis=0, num=None, name="split"):
"""Splits a tensor into sub tensors.
If `num_or_size_splits` is an integer type, `num_split`, then splits `value`
along dimension `axis` into `num_split` smaller tensors.
Requires that `num_split` evenly divides `value.shape[axis]`.
If `num_or_size_splits` is not an integer type, it is presumed to be a Tensor
`size_splits`, then splits `value` into `len(size_splits)` pieces. The shape
of the `i`-th piece has the same size as the `value` except along dimension
`axis` where the size is `size_splits[i]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
tf.shape(split0) ==> [5, 4]
tf.shape(split1) ==> [5, 15]
tf.shape(split2) ==> [5, 11]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
tf.shape(split0) ==> [5, 10]
```
Args:
value: The `Tensor` to split.
num_or_size_splits: Either a 0-D integer `Tensor` indicating the number of
splits along split_dim or a 1-D integer `Tensor` integer tensor containing
the sizes of each output tensor along split_dim. If a scalar then it must
evenly divide `value.shape[axis]`; otherwise the sum of sizes along the
split dimension must match that of the `value`.
axis: A 0-D `int32` `Tensor`. The dimension along which to split.
Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of `size_splits`.
name: A name for the operation (optional).
Returns:
if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`
objects; if `num_or_size_splits` is a 1-D Tensor returns
`num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
`value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
size_splits = ops.convert_to_tensor(num_or_size_splits)
if size_splits.get_shape().ndims == 0 and size_splits.dtype.is_integer:
return gen_array_ops._split(
split_dim=axis, num_split=num_or_size_splits, value=value, name=name)
else:
if num is None:
size_splits_shape = size_splits.get_shape()
num = size_splits_shape.dims[0]
if num._value is None:
raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
return gen_array_ops._split_v(
value=value,
size_splits=size_splits,
split_dim=axis,
num_split=num,
name=name)
def transpose(a, perm=None, name="transpose"):
"""Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example:
```python
# 'x' is [[1 2 3]
# [4 5 6]]
tf.transpose(x) ==> [[1 4]
[2 5]
[3 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) ==> [[1 4]
[2 5]
[3 6]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
# 'x' is [[[1 2 3]
# [4 5 6]]
# [[7 8 9]
# [10 11 12]]]
# Take the transpose of the matrices in dimension-0
tf.transpose(x, perm=[0, 2, 1]) ==> [[[1 4]
[2 5]
[3 6]]
[[7 10]
[8 11]
[9 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
if perm is None:
rank = gen_array_ops.rank(a)
perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
ret = gen_array_ops.transpose(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = gen_array_ops.transpose(a, perm, name=name)
return ret
# pylint: disable=invalid-name
def matrix_transpose(a, name="matrix_transpose"):
"""Transposes last two dimensions of tensor `a`.
For example:
```python
# Matrix with no batch dimension.
# 'x' is [[1 2 3]
# [4 5 6]]
tf.matrix_transpose(x) ==> [[1 4]
[2 5]
[3 6]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.matrix_transpose(x) is shape [1, 2, 4, 3]
```
Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
This is done with minimal cost, and is preferable to using this function. E.g.
```
# Good! Transpose is taken at minimal additional cost.
tf.matmul(matrix, b, transpose_b=True)
# Inefficient!
tf.matmul(matrix, tf.matrix_transpose(b))
```
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat(
(gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0)
return transpose(a, perm=perm)
# pylint: enable=invalid-name
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], tf.int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
else:
zero = 0
try:
shape = tensor_shape.as_shape(shape)
output = constant(zero, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, or `complex128`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
if tensor.shape.is_fully_defined():
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
return zeros(tensor.shape, dtype=dtype or tensor.dtype, name=name)
if dtype is not None and dtype != tensor.dtype:
return zeros(shape_internal(tensor, optimize=optimize), dtype=dtype,
name=name)
else:
return gen_array_ops._zeros_like(tensor, name=name)
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, `complex128` or
`bool`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
ret.set_shape(tensor.get_shape())
return ret
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], tf.int32) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
one = True if dtype == dtypes.bool else 1
try:
shape = tensor_shape.as_shape(shape)
output = constant(one, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
return gen_array_ops._placeholder(dtype=dtype, shape=shape, name=name)
# pylint: disable=redefined-outer-name
def _normalize_sparse_shape(shape, name):
"""Takes numpy array or Tensor or None and returns either None or Tensor."""
if shape is None: return None
if not isinstance(shape, ops.Tensor):
for el in shape:
if el is None:
return None
return ops.convert_to_tensor(shape, name=name)
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape_name = (name + "/shape") if name is not None else None
shape = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[None], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype, shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64, shape=[None, None],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
def pad(tensor, paddings, mode="CONSTANT", name=None): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
# 't' is [[1, 2, 3], [4, 5, 6]].
# 'paddings' is [[1, 1,], [2, 2]].
# rank of 't' is 2.
pad(t, paddings, "CONSTANT") ==> [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0, 0],
[0, 0, 4, 5, 6, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
pad(t, paddings, "REFLECT") ==> [[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1]]
pad(t, paddings, "SYMMETRIC") ==> [[2, 1, 1, 2, 3, 3, 2],
[2, 1, 1, 2, 3, 3, 2],
[5, 4, 4, 5, 6, 6, 5],
[5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
return gen_array_ops._pad(tensor, paddings, name=name)
if mode == "REFLECT":
return gen_array_ops._mirror_pad(tensor,
paddings,
mode="REFLECT",
name=name)
if mode == "SYMMETRIC":
return gen_array_ops._mirror_pad(tensor,
paddings,
mode="SYMMETRIC",
name=name)
raise ValueError("Unknown padding mode: %s" % mode)
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```python
x = [1, 2, 3]
y = [4, 5, 6]
```
results in
```python
X = [[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]
Y = [[4, 4, 4],
[5, 5, 5],
[6, 6, 6]]
```
Args:
*args: `Tensor`s with rank 1.
indexing: Either 'xy' or 'ij' (optional, default: 'xy').
name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N.
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])) )
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,)*(ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,)*(ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO: improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"]
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]]
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(
hypothesis, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(
truth, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops._edit_distance(hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name)
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxArgs op."""
return fake_quant_with_min_max_args_gradient(
grad,
op.inputs[0],
min=op.get_attr("min"),
max=op.get_attr("max"),
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVars op."""
return fake_quant_with_min_max_vars_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
return fake_quant_with_min_max_vars_per_channel_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
def required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings=None,
name=None):
"""Calculate padding required to make block_shape divide input_shape.
This function can be used to calculate a suitable paddings argument for use
with space_to_batch_nd and batch_to_space_nd.
Args:
input_shape: int32 Tensor of shape [N].
block_shape: int32 Tensor of shape [N].
base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum
amount of padding to use. All elements must be >= 0. If not specified,
defaults to 0.
name: string. Optional name prefix.
Returns:
(paddings, crops), where:
`paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
satisfying:
paddings[i, 0] = base_paddings[i, 0].
0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
crops[i, 0] = 0
crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
Raises: ValueError if called with incompatible shapes.
"""
with ops.name_scope(name, "required_space_to_batch_paddings",
[input_shape, block_shape]):
input_shape = ops.convert_to_tensor(input_shape,
dtype=dtypes.int32,
name="input_shape")
block_shape = ops.convert_to_tensor(block_shape,
dtype=dtypes.int32,
name="block_shape")
block_shape.get_shape().assert_is_fully_defined()
block_shape.get_shape().assert_has_rank(1)
num_block_dims = block_shape.get_shape()[0].value
if num_block_dims == 0:
return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
input_shape.get_shape().assert_is_compatible_with([num_block_dims])
if base_paddings is not None:
base_paddings = ops.convert_to_tensor(base_paddings,
dtype=dtypes.int32,
name="base_paddings")
base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
else:
base_paddings = zeros([num_block_dims, 2], dtypes.int32)
const_block_shape = tensor_util.constant_value(block_shape)
const_input_shape = tensor_util.constant_value(input_shape)
const_base_paddings = tensor_util.constant_value(base_paddings)
if (const_block_shape is not None and const_input_shape is not None and
const_base_paddings is not None):
block_shape = const_block_shape
input_shape = const_input_shape
base_paddings = const_base_paddings
# Use same expression for both constant and non-constant case.
pad_start = base_paddings[:, 0]
orig_pad_end = base_paddings[:, 1]
full_input_shape = input_shape + pad_start + orig_pad_end
pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
pad_end = orig_pad_end + pad_end_extra
result_paddings = stack(
[[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
name="paddings")
result_crops = stack(
[[0, pad_end_extra[i]] for i in range(num_block_dims)], name="crops")
return result_paddings, result_crops
def space_to_batch(input, paddings, block_size, name=None): # pylint: disable=redefined-builtin
result = space_to_batch_nd(input,
paddings=paddings,
block_shape=np.array([block_size, block_size],
dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
space_to_batch.__doc__ = gen_array_ops._space_to_batch.__doc__
def batch_to_space(input, crops, block_size, name=None): # pylint: disable=redefined-builtin
result = batch_to_space_nd(input,
crops=crops,
block_shape=np.array([block_size, block_size],
dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
batch_to_space.__doc__ = gen_array_ops._batch_to_space.__doc__
def one_hot(indices, depth, on_value=None, off_value=None,
axis=None, dtype=None, name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`.
Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
Examples
=========
Suppose that
```python
indices = [0, 2, -1, 1]
depth = 3
on_value = 5.0
off_value = 0.0
axis = -1
```
Then output is `[4 x 3]`:
```python
output =
[5.0 0.0 0.0] // one_hot(0)
[0.0 0.0 5.0] // one_hot(2)
[0.0 0.0 0.0] // one_hot(-1)
[0.0 5.0 0.0] // one_hot(1)
```
Suppose that
```python
indices = [[0, 2], [1, -1]]
depth = 3
on_value = 1.0
off_value = 0.0
axis = -1
```
Then output is `[2 x 2 x 3]`:
```python
output =
[
[1.0, 0.0, 0.0] // one_hot(0)
[0.0, 0.0, 1.0] // one_hot(2)
][
[0.0, 1.0, 0.0] // one_hot(1)
[0.0, 0.0, 0.0] // one_hot(-1)
]
```
Using default values for `on_value` and `off_value`:
```python
indices = [0, 1, 2]
depth = 3
```
The output will be
```python
output =
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(name, "one_hot", [indices, depth, on_value, off_value,
axis, dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
on_dtype = ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists \
else None
off_dtype = ops.convert_to_tensor(off_value).dtype.base_dtype if off_exists\
else None
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if (on_exists and on_dtype != dtype):
raise TypeError("dtype {0} of on_value does not match " \
"dtype parameter {1}".format(on_dtype, dtype))
if (off_exists and off_dtype != dtype):
raise TypeError("dtype {0} of off_value does not match " \
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match " \
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops._one_hot(indices, depth, on_value, off_value, axis,
name)
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
"""Return a mask tensor representing the first N positions of each row.
Example:
```python
tf.sequence_mask([1, 3, 2], 5) =
[[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]]
```
Args:
lengths: 1D integer tensor, all its values < maxlen.
maxlen: scalar integer tensor, maximum length of each row. Default: use
maximum over lengths.
dtype: output type of the resulting tensor.
name: name of the op.
Returns:
A 2D mask tensor, as shown in the example above, cast to specified dtype.
Raises:
ValueError: if the arguments have invalid rank.
"""
with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
lengths = ops.convert_to_tensor(lengths)
if lengths.get_shape().ndims != 1:
raise ValueError("lengths must be 1D for sequence_mask")
if maxlen is None:
maxlen = gen_math_ops._max(lengths, [0])
else:
maxlen = ops.convert_to_tensor(maxlen)
if maxlen.get_shape().ndims != 0:
raise ValueError("maxlen must be scalar for sequence_mask")
# The basic idea is to compare a range row vector of size maxlen:
# [0, 1, 2, 3, 4]
# to length as a matrix with 1 column: [[1], [3], [2]].
# Because of broadcasting on both arguments this comparison results
# in a matrix of size (len(lengths), maxlen)
row_vector = gen_math_ops._range(constant(0, maxlen.dtype),
maxlen,
constant(1, maxlen.dtype))
# Since maxlen >= max(lengths), it is safe to use maxlen as a cast
# authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
matrix = gen_math_ops.cast(expand_dims(lengths, 1), maxlen.dtype)
result = row_vector < matrix
if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
return result
else:
return gen_math_ops.cast(result, dtype)
def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
shape(squeeze(t)) # => [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
shape(squeeze(t, [2, 4])) # => [1, 2, 3, 1]
```
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`.
If specified, only squeezes the dimensions listed. The dimension
index starts at 0. It is an error to squeeze a dimension that is not 1.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified.
"""
if squeeze_dims is not None:
if axis is not None:
raise ValueError("Cannot specify both 'squeeze_dims' and 'axis'")
axis = squeeze_dims
if np.isscalar(axis):
axis = [axis]
return gen_array_ops._squeeze(input, axis, name)
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `x` and `y` must have the same shape.
The `condition` tensor must be a scalar if `x` and `y` are scalar.
If `x` and `y` are vectors of higher rank, then `condition` must be either a
vector with size matching the first dimension of `x`, or must have the same
shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then it
chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
has the same shape as `x` and `y`, then it chooses which element to copy from
`x` and `y`.
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which may have the same shape as `condition`. If `condition` is
rank 1, `x` may have higher rank, but its first dimension must match the
size of `condition`.
y: A `tensor` with the same shape and type as `x`.
name: A name of the operation (optional)
Returns:
A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
A `Tensor` with shape `(num_true, dim_size(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
return gen_array_ops.where(input=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops._select(condition=condition, t=x, e=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
def reverse(tensor, axis, name=None):
return gen_array_ops.reverse_v2(tensor, axis, name)
reverse.__doc__ = gen_array_ops.reverse_v2.__doc__
# pylint: disable=redefined-builtin
def reverse_sequence(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None,
seq_dim=None,
batch_dim=None):
seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
"seq_dim", seq_dim)
batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
"batch_dim", batch_dim)
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
# pylint: enable=redefined-builtin
reverse_sequence.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
"seq_dim", "seq_axis")
|
Bulochkin/tensorflow_pack
|
tensorflow/python/ops/array_ops.py
|
Python
|
apache-2.0
| 80,311
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Seq2seq layer operations for use in neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = ["Decoder", "dynamic_decode"]
_transpose_batch_time = rnn._transpose_batch_time # pylint: disable=protected-access
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
@six.add_metaclass(abc.ABCMeta)
class Decoder(object):
"""An RNN Decoder abstract interface object.
Concepts used by this interface:
- `inputs`: (structure of) tensors and TensorArrays that is passed as input to
the RNNCell composing the decoder, at each time step.
- `state`: (structure of) tensors and TensorArrays that is passed to the
RNNCell instance as the state.
- `finished`: boolean tensor telling whether each sequence in the batch is
finished.
- `outputs`: Instance of BasicDecoderOutput. Result of the decoding, at each
time step.
"""
@property
def batch_size(self):
"""The batch size of input values."""
raise NotImplementedError
@property
def output_size(self):
"""A (possibly nested tuple of...) integer[s] or `TensorShape` object[s]."""
raise NotImplementedError
@property
def output_dtype(self):
"""A (possibly nested tuple of...) dtype[s]."""
raise NotImplementedError
@abc.abstractmethod
def initialize(self, name=None):
"""Called before any decoding iterations.
This methods must compute initial input values and initial state.
Args:
name: Name scope for any created operations.
Returns:
`(finished, initial_inputs, initial_state)`: initial values of
'finished' flags, inputs and state.
"""
raise NotImplementedError
@abc.abstractmethod
def step(self, time, inputs, state, name=None):
"""Called per step of decoding (but only once for dynamic decoding).
Args:
time: Scalar `int32` tensor. Current step number.
inputs: RNNCell input (possibly nested tuple of) tensor[s] for this time
step.
state: RNNCell state (possibly nested tuple of) tensor[s] from previous
time step.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`: `outputs` is an object
containing the decoder output, `next_state` is a (structure of) state
tensors and TensorArrays, `next_inputs` is the tensor that should be used
as input for the next step, `finished` is a boolean tensor telling whether
the sequence is complete, for each sequence in the batch.
"""
raise NotImplementedError
def finalize(self, outputs, final_state, sequence_lengths):
raise NotImplementedError
@property
def tracks_own_finished(self):
"""Describes whether the Decoder keeps track of finished states.
Most decoders will emit a true/false `finished` value independently
at each time step. In this case, the `dynamic_decode` function keeps track
of which batch entries are already finished, and performs a logical OR to
insert new batches to the finished set.
Some decoders, however, shuffle batches / beams between time steps and
`dynamic_decode` will mix up the finished state across these entries because
it does not track the reshuffle across time steps. In this case, it is
up to the decoder to declare that it will keep track of its own finished
state by setting this property to `True`.
Returns:
Python bool.
"""
return False
def _create_zero_outputs(size, dtype, batch_size):
"""Create a zero outputs Tensor structure."""
def _create(s, d):
return _zero_state_tensors(s, batch_size, d)
return nest.map_structure(_create, size, dtype)
def dynamic_decode(decoder,
output_time_major=False,
impute_finished=False,
maximum_iterations=None,
parallel_iterations=32,
swap_memory=False,
scope=None):
"""Perform dynamic decoding with `decoder`.
Calls initialize() once and step() repeatedly on the Decoder object.
Args:
decoder: A `Decoder` instance.
output_time_major: Python boolean. Default: `False` (batch major). If
`True`, outputs are returned as time major tensors (this mode is faster).
Otherwise, outputs are returned as batch major tensors (this adds extra
time to the computation).
impute_finished: Python boolean. If `True`, then states for batch
entries which are marked as finished get copied through and the
corresponding outputs get zeroed out. This causes some slowdown at
each time step, but ensures that the final state and outputs have
the correct values and that backprop ignores time steps that were
marked as finished.
maximum_iterations: `int32` scalar, maximum allowed number of decoding
steps. Default is `None` (decode until the decoder is fully done).
parallel_iterations: Argument passed to `tf.while_loop`.
swap_memory: Argument passed to `tf.while_loop`.
scope: Optional variable scope to use.
Returns:
`(final_outputs, final_state, final_sequence_lengths)`.
Raises:
TypeError: if `decoder` is not an instance of `Decoder`.
ValueError: if `maximum_iterations` is provided but is not a scalar.
"""
if not isinstance(decoder, Decoder):
raise TypeError("Expected decoder to be type Decoder, but saw: %s" %
type(decoder))
with variable_scope.variable_scope(scope, "decoder") as varscope:
# Properly cache variable values inside the while_loop
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
if maximum_iterations is not None:
maximum_iterations = ops.convert_to_tensor(
maximum_iterations, dtype=dtypes.int32, name="maximum_iterations")
if maximum_iterations.get_shape().ndims != 0:
raise ValueError("maximum_iterations must be a scalar")
initial_finished, initial_inputs, initial_state = decoder.initialize()
zero_outputs = _create_zero_outputs(decoder.output_size,
decoder.output_dtype,
decoder.batch_size)
if maximum_iterations is not None:
initial_finished = math_ops.logical_or(
initial_finished, 0 >= maximum_iterations)
initial_sequence_lengths = array_ops.zeros_like(
initial_finished, dtype=dtypes.int32)
initial_time = constant_op.constant(0, dtype=dtypes.int32)
def _shape(batch_size, from_shape):
if (not isinstance(from_shape, tensor_shape.TensorShape) or
from_shape.ndims == 0):
return tensor_shape.TensorShape(None)
else:
batch_size = tensor_util.constant_value(
ops.convert_to_tensor(
batch_size, name="batch_size"))
return tensor_shape.TensorShape([batch_size]).concatenate(from_shape)
def _create_ta(s, d):
return tensor_array_ops.TensorArray(
dtype=d,
size=0,
dynamic_size=True,
element_shape=_shape(decoder.batch_size, s))
initial_outputs_ta = nest.map_structure(_create_ta, decoder.output_size,
decoder.output_dtype)
def condition(unused_time, unused_outputs_ta, unused_state, unused_inputs,
finished, unused_sequence_lengths):
return math_ops.logical_not(math_ops.reduce_all(finished))
def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
"""Internal while_loop body.
Args:
time: scalar int32 tensor.
outputs_ta: structure of TensorArray.
state: (structure of) state tensors and TensorArrays.
inputs: (structure of) input tensors.
finished: bool tensor (keeping track of what's finished).
sequence_lengths: int32 tensor (keeping track of time of finish).
Returns:
`(time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)`.
```
"""
(next_outputs, decoder_state, next_inputs,
decoder_finished) = decoder.step(time, inputs, state)
if decoder.tracks_own_finished:
next_finished = decoder_finished
else:
next_finished = math_ops.logical_or(decoder_finished, finished)
if maximum_iterations is not None:
next_finished = math_ops.logical_or(
next_finished, time + 1 >= maximum_iterations)
next_sequence_lengths = array_ops.where(
math_ops.logical_and(math_ops.logical_not(finished), next_finished),
array_ops.fill(array_ops.shape(sequence_lengths), time + 1),
sequence_lengths)
nest.assert_same_structure(state, decoder_state)
nest.assert_same_structure(outputs_ta, next_outputs)
nest.assert_same_structure(inputs, next_inputs)
# Zero out output values past finish
if impute_finished:
emit = nest.map_structure(
lambda out, zero: array_ops.where(finished, zero, out),
next_outputs,
zero_outputs)
else:
emit = next_outputs
# Copy through states past finish
def _maybe_copy_state(new, cur):
# TensorArrays and scalar states get passed through.
if isinstance(cur, tensor_array_ops.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = (new.shape.ndims == 0)
return new if pass_through else array_ops.where(finished, cur, new)
if impute_finished:
next_state = nest.map_structure(
_maybe_copy_state, decoder_state, state)
else:
next_state = decoder_state
outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
outputs_ta, emit)
return (time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)
res = control_flow_ops.while_loop(
condition,
body,
loop_vars=[
initial_time, initial_outputs_ta, initial_state, initial_inputs,
initial_finished, initial_sequence_lengths,
],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
final_outputs_ta = res[1]
final_state = res[2]
final_sequence_lengths = res[5]
final_outputs = nest.map_structure(lambda ta: ta.stack(), final_outputs_ta)
try:
final_outputs, final_state = decoder.finalize(
final_outputs, final_state, final_sequence_lengths)
except NotImplementedError:
pass
if not output_time_major:
final_outputs = nest.map_structure(_transpose_batch_time, final_outputs)
return final_outputs, final_state, final_sequence_lengths
|
allenlavoie/tensorflow
|
tensorflow/contrib/seq2seq/python/ops/decoder.py
|
Python
|
apache-2.0
| 12,173
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Tyler Smith, Cisco Systems
import logging
import unittest
import re
from quantum.common.serializer import Serializer
from quantum.client import Client
LOG = logging.getLogger('quantum.tests.test_api')
# Set a couple tenants to use for testing
TENANT_1 = 'totore'
TENANT_2 = 'totore2'
class ServerStub():
"""This class stubs a basic server for the API client to talk to"""
class Response(object):
"""This class stubs a basic response to send the API client"""
def __init__(self, content=None, status=None):
self.content = content
self.status = status
def read(self):
return self.content
def status(self):
return self.status
# To test error codes, set the host to 10.0.0.1, and the port to the code
def __init__(self, host, port=9696, key_file="", cert_file=""):
self.host = host
self.port = port
self.key_file = key_file
self.cert_file = cert_file
def request(self, method, action, body, headers):
self.method = method
self.action = action
self.body = body
def status(self, status=None):
return status or 200
def getresponse(self):
res = self.Response(status=self.status())
# If the host is 10.0.0.1, return the port as an error code
if self.host == "10.0.0.1":
res.status = self.port
return res
# Extract important information from the action string to assure sanity
match = re.search('tenants/(.+?)/(.+)\.(json|xml)$', self.action)
tenant = match.group(1)
path = match.group(2)
format = match.group(3)
data = {'data': {'method': self.method, 'action': self.action,
'body': self.body, 'tenant': tenant, 'path': path,
'format': format, 'key_file': self.key_file,
'cert_file': self.cert_file}}
# Serialize it to the proper format so the API client can handle it
if data['data']['format'] == 'json':
res.content = Serializer().serialize(data, "application/json")
else:
res.content = Serializer().serialize(data, "application/xml")
return res
class APITest(unittest.TestCase):
def setUp(self):
""" Setups a test environment for the API client """
HOST = '127.0.0.1'
PORT = 9696
USE_SSL = False
self.client = Client(HOST, PORT, USE_SSL, TENANT_1, 'json', ServerStub)
def _assert_sanity(self, call, status, method, path, data=[], params={}):
""" Perform common assertions to test the sanity of client requests """
# Handle an error case first
if status != 200:
(self.client.host, self.client.port) = ("10.0.0.1", status)
self.assertRaises(Exception, call, *data, **params)
return
# Make the call, then get the data from the root node and assert it
data = call(*data, **params)['data']
self.assertEqual(data['method'], method)
self.assertEqual(data['format'], params['format'])
self.assertEqual(data['tenant'], params['tenant'])
self.assertEqual(data['path'], path)
return data
def _test_list_networks(self, tenant=TENANT_1, format='json', status=200):
LOG.debug("_test_list_networks - tenant:%s "\
"- format:%s - START", format, tenant)
self._assert_sanity(self.client.list_networks,
status,
"GET",
"networks",
data=[],
params={'tenant': tenant, 'format': format})
LOG.debug("_test_list_networks - tenant:%s "\
"- format:%s - END", format, tenant)
def _test_show_network_details(self,
tenant=TENANT_1, format='json', status=200):
LOG.debug("_test_show_network_details - tenant:%s "\
"- format:%s - START", format, tenant)
self._assert_sanity(self.client.show_network_details,
status,
"GET",
"networks/001",
data=["001"],
params={'tenant': tenant, 'format': format})
LOG.debug("_test_show_network_details - tenant:%s "\
"- format:%s - END", format, tenant)
def _test_create_network(self, tenant=TENANT_1, format='json', status=200):
LOG.debug("_test_create_network - tenant:%s "\
"- format:%s - START", format, tenant)
self._assert_sanity(self.client.create_network,
status,
"POST",
"networks",
data=[{'network': {'net-name': 'testNetwork'}}],
params={'tenant': tenant, 'format': format})
LOG.debug("_test_create_network - tenant:%s "\
"- format:%s - END", format, tenant)
def _test_update_network(self, tenant=TENANT_1, format='json', status=200):
LOG.debug("_test_update_network - tenant:%s "\
"- format:%s - START", format, tenant)
self._assert_sanity(self.client.update_network,
status,
"PUT",
"networks/001",
data=["001",
{'network': {'net-name': 'newName'}}],
params={'tenant': tenant, 'format': format})
LOG.debug("_test_update_network - tenant:%s "\
"- format:%s - END", format, tenant)
def _test_delete_network(self, tenant=TENANT_1, format='json', status=200):
LOG.debug("_test_delete_network - tenant:%s "\
"- format:%s - START", format, tenant)
self._assert_sanity(self.client.delete_network,
status,
"DELETE",
"networks/001",
data=["001"],
params={'tenant': tenant, 'format': format})
LOG.debug("_test_delete_network - tenant:%s "\
"- format:%s - END", format, tenant)
def _test_list_ports(self, tenant=TENANT_1, format='json', status=200):
LOG.debug("_test_list_ports - tenant:%s "\
"- format:%s - START", format, tenant)
self._assert_sanity(self.client.list_ports,
status,
"GET",
"networks/001/ports",
data=["001"],
params={'tenant': tenant, 'format': format})
LOG.debug("_test_list_ports - tenant:%s "\
"- format:%s - END", format, tenant)
def _test_show_port_details(self,
tenant=TENANT_1, format='json', status=200):
LOG.debug("_test_show_port_details - tenant:%s "\
"- format:%s - START", format, tenant)
self._assert_sanity(self.client.show_port_details,
status,
"GET",
"networks/001/ports/001",
data=["001", "001"],
params={'tenant': tenant, 'format': format})
LOG.debug("_test_show_port_details - tenant:%s "\
"- format:%s - END", format, tenant)
def _test_create_port(self, tenant=TENANT_1, format='json', status=200):
LOG.debug("_test_create_port - tenant:%s "\
"- format:%s - START", format, tenant)
self._assert_sanity(self.client.create_port,
status,
"POST",
"networks/001/ports",
data=["001"],
params={'tenant': tenant, 'format': format})
LOG.debug("_test_create_port - tenant:%s "\
"- format:%s - END", format, tenant)
def _test_delete_port(self, tenant=TENANT_1, format='json', status=200):
LOG.debug("_test_delete_port - tenant:%s "\
"- format:%s - START", format, tenant)
self._assert_sanity(self.client.delete_port,
status,
"DELETE",
"networks/001/ports/001",
data=["001", "001"],
params={'tenant': tenant, 'format': format})
LOG.debug("_test_delete_port - tenant:%s "\
"- format:%s - END", format, tenant)
def _test_update_port(self, tenant=TENANT_1, format='json', status=200):
LOG.debug("_test_update_port - tenant:%s "\
"- format:%s - START", format, tenant)
self._assert_sanity(self.client.update_port,
status,
"PUT",
"networks/001/ports/001",
data=["001", "001",
{'port': {'state': 'ACTIVE'}}],
params={'tenant': tenant, 'format': format})
LOG.debug("_test_update_port - tenant:%s "\
"- format:%s - END", format, tenant)
def _test_show_port_attachment(self,
tenant=TENANT_1, format='json', status=200):
LOG.debug("_test_show_port_attachment - tenant:%s "\
"- format:%s - START", format, tenant)
self._assert_sanity(self.client.show_port_attachment,
status,
"GET",
"networks/001/ports/001/attachment",
data=["001", "001"],
params={'tenant': tenant, 'format': format})
LOG.debug("_test_show_port_attachment - tenant:%s "\
"- format:%s - END", format, tenant)
def _test_attach_resource(self, tenant=TENANT_1,
format='json', status=200):
LOG.debug("_test_attach_resource - tenant:%s "\
"- format:%s - START", format, tenant)
self._assert_sanity(self.client.attach_resource,
status,
"PUT",
"networks/001/ports/001/attachment",
data=["001", "001",
{'resource': {'id': '1234'}}],
params={'tenant': tenant, 'format': format})
LOG.debug("_test_attach_resource - tenant:%s "\
"- format:%s - END", format, tenant)
def _test_detach_resource(self, tenant=TENANT_1,
format='json', status=200):
LOG.debug("_test_detach_resource - tenant:%s "\
"- format:%s - START", format, tenant)
self._assert_sanity(self.client.detach_resource,
status,
"DELETE",
"networks/001/ports/001/attachment",
data=["001", "001"],
params={'tenant': tenant, 'format': format})
LOG.debug("_test_detach_resource - tenant:%s "\
"- format:%s - END", format, tenant)
def _test_ssl_certificates(self, tenant=TENANT_1,
format='json', status=200):
LOG.debug("_test_ssl_certificates - tenant:%s "\
"- format:%s - START", format, tenant)
# Set SSL, and our cert file
self.client.use_ssl = True
cert_file = "/fake.cert"
self.client.key_file = self.client.cert_file = cert_file
data = self._assert_sanity(self.client.list_networks,
status,
"GET",
"networks",
data=[],
params={'tenant': tenant, 'format': format})
self.assertEquals(data["key_file"], cert_file)
self.assertEquals(data["cert_file"], cert_file)
LOG.debug("_test_ssl_certificates - tenant:%s "\
"- format:%s - END", format, tenant)
def test_list_networks_json(self):
self._test_list_networks(format='json')
def test_list_networks_xml(self):
self._test_list_networks(format='xml')
def test_list_networks_alt_tenant(self):
self._test_list_networks(tenant=TENANT_2)
def test_list_networks_error_470(self):
self._test_list_networks(status=470)
def test_list_networks_error_401(self):
self._test_list_networks(status=401)
def test_show_network_details_json(self):
self._test_show_network_details(format='json')
def test_show_network_details_xml(self):
self._test_show_network_details(format='xml')
def test_show_network_details_alt_tenant(self):
self._test_show_network_details(tenant=TENANT_2)
def test_show_network_details_error_470(self):
self._test_show_network_details(status=470)
def test_show_network_details_error_401(self):
self._test_show_network_details(status=401)
def test_show_network_details_error_420(self):
self._test_show_network_details(status=420)
def test_create_network_json(self):
self._test_create_network(format='json')
def test_create_network_xml(self):
self._test_create_network(format='xml')
def test_create_network_alt_tenant(self):
self._test_create_network(tenant=TENANT_2)
def test_create_network_error_470(self):
self._test_create_network(status=470)
def test_create_network_error_401(self):
self._test_create_network(status=401)
def test_create_network_error_400(self):
self._test_create_network(status=400)
def test_create_network_error_422(self):
self._test_create_network(status=422)
def test_update_network_json(self):
self._test_update_network(format='json')
def test_update_network_xml(self):
self._test_update_network(format='xml')
def test_update_network_alt_tenant(self):
self._test_update_network(tenant=TENANT_2)
def test_update_network_error_470(self):
self._test_update_network(status=470)
def test_update_network_error_401(self):
self._test_update_network(status=401)
def test_update_network_error_400(self):
self._test_update_network(status=400)
def test_update_network_error_420(self):
self._test_update_network(status=420)
def test_update_network_error_422(self):
self._test_update_network(status=422)
def test_delete_network_json(self):
self._test_delete_network(format='json')
def test_delete_network_xml(self):
self._test_delete_network(format='xml')
def test_delete_network_alt_tenant(self):
self._test_delete_network(tenant=TENANT_2)
def test_delete_network_error_470(self):
self._test_delete_network(status=470)
def test_delete_network_error_401(self):
self._test_delete_network(status=401)
def test_delete_network_error_420(self):
self._test_delete_network(status=420)
def test_delete_network_error_421(self):
self._test_delete_network(status=421)
def test_list_ports_json(self):
self._test_list_ports(format='json')
def test_list_ports_xml(self):
self._test_list_ports(format='xml')
def test_list_ports_alt_tenant(self):
self._test_list_ports(tenant=TENANT_2)
def test_list_ports_error_470(self):
self._test_list_ports(status=470)
def test_list_ports_error_401(self):
self._test_list_ports(status=401)
def test_list_ports_error_420(self):
self._test_list_ports(status=420)
def test_show_port_details_json(self):
self._test_list_ports(format='json')
def test_show_port_details_xml(self):
self._test_list_ports(format='xml')
def test_show_port_details_alt_tenant(self):
self._test_list_ports(tenant=TENANT_2)
def test_show_port_details_error_470(self):
self._test_show_port_details(status=470)
def test_show_port_details_error_401(self):
self._test_show_port_details(status=401)
def test_show_port_details_error_420(self):
self._test_show_port_details(status=420)
def test_show_port_details_error_430(self):
self._test_show_port_details(status=430)
def test_create_port_json(self):
self._test_create_port(format='json')
def test_create_port_xml(self):
self._test_create_port(format='xml')
def test_create_port_alt_tenant(self):
self._test_create_port(tenant=TENANT_2)
def test_create_port_error_470(self):
self._test_create_port(status=470)
def test_create_port_error_401(self):
self._test_create_port(status=401)
def test_create_port_error_400(self):
self._test_create_port(status=400)
def test_create_port_error_420(self):
self._test_create_port(status=420)
def test_create_port_error_430(self):
self._test_create_port(status=430)
def test_create_port_error_431(self):
self._test_create_port(status=431)
def test_delete_port_json(self):
self._test_delete_port(format='json')
def test_delete_port_xml(self):
self._test_delete_port(format='xml')
def test_delete_port_alt_tenant(self):
self._test_delete_port(tenant=TENANT_2)
def test_delete_port_error_470(self):
self._test_delete_port(status=470)
def test_delete_port_error_401(self):
self._test_delete_port(status=401)
def test_delete_port_error_420(self):
self._test_delete_port(status=420)
def test_delete_port_error_430(self):
self._test_delete_port(status=430)
def test_delete_port_error_432(self):
self._test_delete_port(status=432)
def test_update_port_json(self):
self._test_update_port(format='json')
def test_update_port_xml(self):
self._test_update_port(format='xml')
def test_update_port_alt_tenant(self):
self._test_update_port(tenant=TENANT_2)
def test_update_port_error_470(self):
self._test_update_port(status=470)
def test_update_port_error_401(self):
self._test_update_port(status=401)
def test_update_port_error_400(self):
self._test_update_port(status=400)
def test_update_port_error_420(self):
self._test_update_port(status=420)
def test_update_port_error_430(self):
self._test_update_port(status=430)
def test_update_port_error_431(self):
self._test_update_port(status=431)
def test_show_port_attachment_json(self):
self._test_show_port_attachment(format='json')
def test_show_port_attachment_xml(self):
self._test_show_port_attachment(format='xml')
def test_show_port_attachment_alt_tenant(self):
self._test_show_port_attachment(tenant=TENANT_2)
def test_show_port_attachment_error_470(self):
self._test_show_port_attachment(status=470)
def test_show_port_attachment_error_401(self):
self._test_show_port_attachment(status=401)
def test_show_port_attachment_error_400(self):
self._test_show_port_attachment(status=400)
def test_show_port_attachment_error_420(self):
self._test_show_port_attachment(status=420)
def test_show_port_attachment_error_430(self):
self._test_show_port_attachment(status=430)
def test_attach_resource_json(self):
self._test_attach_resource(format='json')
def test_attach_resource_xml(self):
self._test_attach_resource(format='xml')
def test_attach_resource_alt_tenant(self):
self._test_attach_resource(tenant=TENANT_2)
def test_attach_resource_error_470(self):
self._test_attach_resource(status=470)
def test_attach_resource_error_401(self):
self._test_attach_resource(status=401)
def test_attach_resource_error_400(self):
self._test_attach_resource(status=400)
def test_attach_resource_error_420(self):
self._test_attach_resource(status=420)
def test_attach_resource_error_430(self):
self._test_attach_resource(status=430)
def test_attach_resource_error_432(self):
self._test_attach_resource(status=432)
def test_attach_resource_error_440(self):
self._test_attach_resource(status=440)
def test_detach_resource_json(self):
self._test_detach_resource(format='json')
def test_detach_resource_xml(self):
self._test_detach_resource(format='xml')
def test_detach_resource_alt_tenant(self):
self._test_detach_resource(tenant=TENANT_2)
def test_detach_resource_error_470(self):
self._test_detach_resource(status=470)
def test_detach_resource_error_401(self):
self._test_detach_resource(status=401)
def test_detach_resource_error_420(self):
self._test_detach_resource(status=420)
def test_detach_resource_error_430(self):
self._test_detach_resource(status=430)
def test_ssl_certificates(self):
self._test_ssl_certificates()
|
emonty/python-quantumclient
|
quantum/client/tests/unit/test_clientlib.py
|
Python
|
apache-2.0
| 22,105
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Find lines in files that should be faithful copies, and check that they are.
Inside a comment-marked section, any chunk of indented lines should be
faithfully copied from FILENAME. The indented lines are dedented before
comparing.
The section is between these comments:
.. copied_from <FILENAME>
.. end_copied_from
This tool will print any mismatches, and then exit with a count of mismatches.
"""
import glob
from itertools import groupby
from operator import itemgetter
import re
import sys
import textwrap
def check_copied_from(rst_name):
"""Check copies in a .rst file.
Prints problems. Returns count of bad copies.
"""
bad_copies = 0
file_read = None
file_text = None
with open(rst_name) as frst:
for filename, first_line, text in find_copied_chunks(frst):
if filename != file_read:
with open(filename) as f:
file_text = f.read()
file_read = filename
if text not in file_text:
print("{}:{}: Bad copy from {}, starting with {!r}".format(
rst_name, first_line, filename, text.splitlines()[0]
))
bad_copies += 1
return bad_copies
def find_copied_chunks(frst):
"""Find chunks of text that are meant to be faithful copies.
`frst` is an iterable of strings, the .rst text.
Yields (source_filename, first_line, text) tuples.
"""
for (_, filename), chunks in groupby(find_copied_lines(frst), itemgetter(0)):
chunks = list(chunks)
first_line = chunks[0][1]
text = textwrap.dedent("\n".join(map(itemgetter(2), chunks)))
yield filename, first_line, text
def find_copied_lines(frst):
"""Find lines of text that are meant to be faithful copies.
`frst` is an iterable of strings, the .rst text.
Yields tuples ((chunk_num, file_name), line_num, line).
`chunk_num` is an integer that is different for each distinct (blank
line separated) chunk of text, but has no meaning other than that.
`file_name` is the file the chunk should be copied from. `line_num`
is the line number in the .rst file, and `line` is the text of the line.
"""
in_section = False
source_file = None
chunk_num = 0
for line_num, line in enumerate(frst, start=1):
line = line.rstrip()
if in_section:
m = re.search(r"^.. end_copied_from", line)
if m:
in_section = False
else:
if re.search(r"^\s+\S", line):
# Indented line
yield (chunk_num, source_file), line_num, line
elif not line.strip():
# Blank line
chunk_num += 1
else:
m = re.search(r"^.. copied_from: (.*)", line)
if m:
in_section = True
source_file = m.group(1)
def main(args):
"""Check all the files in `args`, return count of bad copies."""
bad_copies = 0
for arg in args:
for fname in glob.glob(arg):
bad_copies += check_copied_from(fname)
return bad_copies
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
hugovk/coveragepy
|
doc/check_copied_from.py
|
Python
|
apache-2.0
| 3,400
|
# -*- coding: utf-8 -*-
'''
integration.cli_test
~~~~~~~~~~~~~~~~~~~~
CLI related unit testing
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
'''
# Import Python libs
from __future__ import absolute_import, print_function
# Import salt testing libs
from salttesting.unit import skipIf
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../')
# Import salt libs
import integration # pylint: disable=import-error
# Import 3rd-party libs
# pylint: disable=import-error
from salt.ext.six.moves import range # pylint: disable=redefined-builtin
try:
import libcloud # pylint: disable=unused-import
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
# pylint: enable=import-error
@skipIf(HAS_LIBCLOUD is False, 'salt-cloud requires >= libcloud 0.11.4')
class SaltCloudCliTest(integration.ShellCase,
integration.ShellCaseCommonTestsMixIn):
_call_binary_ = 'salt-cloud'
def test_function_arguments(self):
self.assertIn(
'salt-cloud: error: --function expects two arguments: '
'<function-name> <provider>',
self.run_cloud('--function show_image -h', catch_stderr=True)[1]
)
def test_list_providers_accepts_no_arguments(self):
self.assertIn(
'salt-cloud: error: \'--list-providers\' does not accept any '
'arguments',
self.run_cloud('--list-providers ec2', catch_stderr=True)[1]
)
def test_mutually_exclusive_query_options(self):
test_options = [
'--query', '--full-query', '--select-query', '--list-providers'
]
while True:
for idx in range(1, len(test_options)):
self.assertIn(
'salt-cloud: error: The options {0}/{1} are mutually '
'exclusive. Please only choose one of them'.format(
test_options[0], test_options[idx]
),
self.run_cloud(
'{0} {1}'.format(test_options[0], test_options[idx]),
catch_stderr=True)[1]
)
# Remove the first option from the list
test_options.pop(0)
if len(test_options) <= 1:
# Only one left? Stop iterating
break
def test_mutually_exclusive_list_options(self):
test_options = ['--list-locations', '--list-images', '--list-sizes']
while True:
for idx in range(1, len(test_options)):
output = self.run_cloud(
'{0} ec2 {1} ec2'.format(
test_options[0], test_options[idx]
), catch_stderr=True
)
try:
self.assertIn(
'salt-cloud: error: The options {0}/{1} are mutually '
'exclusive. Please only choose one of them'.format(
test_options[0], test_options[idx]
),
output[1]
)
except AssertionError:
print(output)
raise
# Remove the first option from the list
test_options.pop(0)
if len(test_options) <= 1:
# Only one left? Stop iterating
break
if __name__ == '__main__':
from integration import run_tests # pylint: disable=import-error
run_tests(SaltCloudCliTest)
|
stephane-martin/salt-debian-packaging
|
salt-2016.3.3/tests/integration/shell/cloud.py
|
Python
|
apache-2.0
| 3,544
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
import os
import weakref
from traits.api import HasTraits, Str, Any, List, Property
from traitsui.tabular_adapter import TabularAdapter
# ============= local library imports ==========================
from pychron.core.helpers.filetools import created_datetime, modified_datetime
from pychron.paths import paths
class FilePathAdapter(TabularAdapter):
columns = [
("Name", "name"),
("Create", "create_date"),
("Modified", "modified_date"),
]
name_text = Property
font = "10"
def _get_name_text(self):
return os.path.relpath(self.item.path, paths.labbook_dir)
class FilePath(HasTraits):
name = Str
root = Any
root_path = Str
@property
def path(self):
"""
recursively assemble the path to this resource
"""
if self.root:
return os.path.join(self.root.path, self.name)
elif self.root_path:
return self.root_path
# return '{}/{}'.format(self.root.path, self.name)
else:
return self.name
class Hierarchy(FilePath):
children = List
chronology = Property(List) # Property(depends_on='refresh_needed, children')
# refresh_needed=Event
# def reset_chronology(self):
# self.refresh_needed=True
def _get_chronology(self):
files = self._flatten()
return sorted(files, key=lambda x: x.create_date, reverse=True)
def _flatten(self):
for ci in self.children:
if isinstance(ci, Hierarchy):
for x in ci._flatten():
yield x
else:
yield ci
def _children_changed(self):
for ci in self.children:
ci.root = weakref.ref(self)()
ci.create_date = created_datetime(ci.path)
ci.modified_date = modified_datetime(ci.path)
def pwalk(self):
for ci in self.children:
print(self.name, ci.path, ci.__class__.__name__)
if isinstance(ci, Hierarchy):
ci.pwalk()
# @property
# def high_post(self):
# c=self._get_chronology()
# return c[0]
#
# @property
# def low_post(self):
# c=self._get_chronology()
# return c[-1]
# ============= EOF =============================================
|
USGSDenverPychron/pychron
|
pychron/core/hierarchy.py
|
Python
|
apache-2.0
| 3,182
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import exceptions as lib_exc
from tempest import test
from neutron.tests.api import base_security_groups as base
from neutron.tests.tempest import config
CONF = config.CONF
class NegativeSecGroupTest(base.BaseSecGroupTest):
_tenant_network_cidr = CONF.network.tenant_network_cidr
@classmethod
@test.requires_ext(extension="security-group", service="network")
def resource_setup(cls):
super(NegativeSecGroupTest, cls).resource_setup()
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('0d9c7791-f2ad-4e2f-ac73-abf2373b0d2d')
def test_create_security_group_rule_with_invalid_ports(self):
group_create_body, _ = self._create_security_group()
# Create rule for tcp protocol with invalid ports
states = [(-16, 80, 'Invalid value for port -16'),
(80, 79, 'port_range_min must be <= port_range_max'),
(80, 65536, 'Invalid value for port 65536'),
(None, 6, 'port_range_min must be <= port_range_max'),
(-16, 65536, 'Invalid value for port')]
for pmin, pmax, msg in states:
ex = self.assertRaises(
lib_exc.BadRequest, self.client.create_security_group_rule,
security_group_id=group_create_body['security_group']['id'],
protocol='tcp', port_range_min=pmin, port_range_max=pmax,
direction='ingress', ethertype=self.ethertype)
self.assertIn(msg, str(ex))
# Create rule for icmp protocol with invalid ports
states = [(1, 256, 'Invalid value for ICMP code'),
(-1, 25, 'Invalid value'),
(None, 6, 'ICMP type (port-range-min) is missing'),
(300, 1, 'Invalid value for ICMP type')]
for pmin, pmax, msg in states:
ex = self.assertRaises(
lib_exc.BadRequest, self.client.create_security_group_rule,
security_group_id=group_create_body['security_group']['id'],
protocol='icmp', port_range_min=pmin, port_range_max=pmax,
direction='ingress', ethertype=self.ethertype)
self.assertIn(msg, str(ex))
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('55100aa8-b24f-333c-0bef-64eefd85f15c')
def test_update_default_security_group_name(self):
sg_list = self.client.list_security_groups(name='default')
sg = sg_list['security_groups'][0]
self.assertRaises(lib_exc.Conflict, self.client.update_security_group,
sg['id'], name='test')
class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
|
MaximNevrov/neutron
|
neutron/tests/api/test_security_groups_negative.py
|
Python
|
apache-2.0
| 3,379
|
# Copyright (c) 2014 Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from barbican.common import utils as common_utils
from barbican.plugin.interface import secret_store as str
from barbican.tests import utils
class TestSecretStore(str.SecretStoreBase):
"""Secret store plugin for testing support."""
def __init__(self, supported_alg_list):
super(TestSecretStore, self).__init__()
self.alg_list = supported_alg_list
def generate_symmetric_key(self, key_spec):
raise NotImplementedError # pragma: no cover
def generate_asymmetric_key(self, key_spec):
raise NotImplementedError # pragma: no cover
def store_secret(self, secret_dto):
raise NotImplementedError # pragma: no cover
def get_secret(self, secret_metadata):
raise NotImplementedError # pragma: no cover
def generate_supports(self, key_spec):
return key_spec.alg in self.alg_list
def delete_secret(self, secret_metadata):
raise NotImplementedError # pragma: no cover
def store_secret_supports(self, key_spec):
return key_spec.alg in self.alg_list
class TestSecretStoreWithTransportKey(str.SecretStoreBase):
"""Secret store plugin for testing support.
This plugin will override the relevant methods for key wrapping.
"""
def __init__(self, supported_alg_list):
super(TestSecretStoreWithTransportKey, self).__init__()
self.alg_list = supported_alg_list
def generate_symmetric_key(self, key_spec):
raise NotImplementedError # pragma: no cover
def generate_asymmetric_key(self, key_spec):
raise NotImplementedError # pragma: no cover
def store_secret(self, secret_dto):
raise NotImplementedError # pragma: no cover
def get_secret(self, secret_metadata):
raise NotImplementedError # pragma: no cover
def generate_supports(self, key_spec):
return key_spec.alg in self.alg_list
def delete_secret(self, secret_metadata):
raise NotImplementedError # pragma: no cover
def store_secret_supports(self, key_spec):
return key_spec.alg in self.alg_list
def get_transport_key(self):
return "transport key"
def is_transport_key_current(self, transport_key):
return True
class WhenTestingSecretStorePluginManager(utils.BaseTestCase):
def setUp(self):
super(WhenTestingSecretStorePluginManager, self).setUp()
self.manager = str.SecretStorePluginManager()
def test_get_store_supported_plugin_no_plugin_name(self):
plugin = TestSecretStore([str.KeyAlgorithm.AES])
plugin_mock = mock.MagicMock(obj=plugin)
self.manager.extensions = [plugin_mock]
keySpec = str.KeySpec(str.KeyAlgorithm.AES, 128)
self.assertEqual(plugin,
self.manager.get_plugin_store(keySpec))
def test_get_store_supported_plugin_with_plugin_name(self):
plugin = TestSecretStore([str.KeyAlgorithm.AES])
plugin_mock = mock.MagicMock(obj=plugin)
self.manager.extensions = [plugin_mock]
plugin_found = self.manager.get_plugin_store(
None, plugin_name=common_utils.generate_fullname_for(plugin))
self.assertEqual(plugin, plugin_found)
def test_get_generate_supported_plugin(self):
plugin = TestSecretStore([str.KeyAlgorithm.AES])
plugin_mock = mock.MagicMock(obj=plugin)
self.manager.extensions = [plugin_mock]
keySpec = str.KeySpec(str.KeyAlgorithm.AES, 128)
self.assertEqual(plugin,
self.manager.get_plugin_generate(keySpec))
def test_get_store_no_plugin_found(self):
self.manager.extensions = []
keySpec = str.KeySpec(str.KeyAlgorithm.AES, 128)
self.assertRaises(
str.SecretStorePluginsNotConfigured,
self.manager.get_plugin_store,
keySpec,
)
def test_get_store_no_plugin_found_by_name(self):
plugin = TestSecretStore([str.KeyAlgorithm.AES])
plugin_mock = mock.MagicMock(obj=plugin)
self.manager.extensions = [plugin_mock]
keySpec = str.KeySpec(str.KeyAlgorithm.AES, 128)
plugin_name = 'plugin'
exception_result = self.assertRaises(
str.SecretStorePluginNotFound,
self.manager.get_plugin_store,
keySpec,
plugin_name=plugin_name
)
self.assertEqual(
'Secret store plugin "{name}" not found.'.format(name=plugin_name),
exception_result.message)
def test_get_generate_no_plugin_found(self):
self.manager.extensions = []
keySpec = str.KeySpec(str.KeyAlgorithm.AES, 128)
self.assertRaises(
str.SecretStorePluginsNotConfigured,
self.manager.get_plugin_generate,
keySpec,
)
def test_get_store_no_supported_plugin(self):
plugin = TestSecretStore([])
plugin_mock = mock.MagicMock(obj=plugin)
self.manager.extensions = [plugin_mock]
keySpec = str.KeySpec(str.KeyAlgorithm.AES, 128)
self.assertRaises(
str.SecretStoreSupportedPluginNotFound,
self.manager.get_plugin_store,
keySpec,
)
def test_get_generate_no_supported_plugin(self):
plugin = TestSecretStore([])
plugin_mock = mock.MagicMock(obj=plugin)
self.manager.extensions = [plugin_mock]
keySpec = str.KeySpec(str.KeyAlgorithm.AES, 128)
self.assertRaises(
str.SecretStoreSupportedPluginNotFound,
self.manager.get_plugin_generate,
keySpec,
)
def test_get_store_no_plugin_with_tkey_and_no_supports_storage(self):
plugin = TestSecretStore([])
plugin_mock = mock.MagicMock(obj=plugin)
self.manager.extensions = [plugin_mock]
keySpec = str.KeySpec(str.KeyAlgorithm.AES, 128)
self.assertRaises(
str.SecretStoreSupportedPluginNotFound,
self.manager.get_plugin_store,
key_spec=keySpec,
transport_key_needed=True,
)
def test_get_store_plugin_with_tkey_and_no_supports_storage(self):
plugin = TestSecretStoreWithTransportKey([])
plugin_mock = mock.MagicMock(obj=plugin)
self.manager.extensions = [plugin_mock]
keySpec = str.KeySpec(str.KeyAlgorithm.AES, 128)
self.assertRaises(
str.SecretStoreSupportedPluginNotFound,
self.manager.get_plugin_store,
key_spec=keySpec,
transport_key_needed=True,
)
def test_get_store_plugin_with_no_tkey_and_supports_storage(self):
plugin = TestSecretStore([str.KeyAlgorithm.AES])
plugin_mock = mock.MagicMock(obj=plugin)
self.manager.extensions = [plugin_mock]
keySpec = str.KeySpec(str.KeyAlgorithm.AES, 128)
self.assertRaises(
str.SecretStoreSupportedPluginNotFound,
self.manager.get_plugin_store,
key_spec=keySpec,
transport_key_needed=True,
)
@mock.patch('barbican.common.utils.generate_fullname_for')
def test_get_retrieve_plugin_raises_when_not_available(
self, generate_full_name_for):
plugin = TestSecretStore([str.KeyAlgorithm.AES])
plugin_mock = mock.MagicMock(obj=plugin)
self.manager.extensions = [plugin_mock]
generate_full_name_for.return_value = "another plugin name"
plugin_name = 'plugin name searched for'
exception_result = self.assertRaises(
str.StorePluginNotAvailableOrMisconfigured,
self.manager.get_plugin_retrieve_delete,
plugin_name=plugin_name,
)
self.assertIn(plugin_name, exception_result.message)
def test_get_store_plugin_with_tkey_and_supports_storage(self):
plugin1 = TestSecretStore([str.KeyAlgorithm.AES])
plugin1_mock = mock.MagicMock(obj=plugin1)
plugin2 = TestSecretStoreWithTransportKey([str.KeyAlgorithm.AES])
plugin2_mock = mock.MagicMock(obj=plugin2)
self.manager.extensions = [plugin1_mock, plugin2_mock]
keySpec = str.KeySpec(str.KeyAlgorithm.AES, 128)
self.assertEqual(plugin2,
self.manager.get_plugin_store(
key_spec=keySpec,
transport_key_needed=True))
|
cneill/barbican
|
barbican/tests/plugin/interface/test_secret_store.py
|
Python
|
apache-2.0
| 8,971
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResourceGroup Resource formatter."""
from operator import attrgetter
from aquilon.worker.formats.formatters import ObjectFormatter
from aquilon.worker.formats.resource import ResourceFormatter
from aquilon.aqdb.model import ResourceGroup
class ResourceGroupFormatter(ResourceFormatter):
protocol = "aqdsystems_pb2"
def extra_details(self, rg, indent=""):
details = []
if rg.required_type:
details.append(indent + " Type: %s" % rg.required_type)
if rg.resholder:
for resource in sorted(rg.resholder.resources,
key=attrgetter('resource_type', 'name')):
details.append(self.redirect_raw(resource, indent + " "))
return details
def format_proto(self, rg, skeleton=None):
container = skeleton
if not container:
container = self.loaded_protocols[self.protocol].ResourceList()
skeleton = container.resources.add()
if rg.required_type:
skeleton.resourcegroup.required_type = rg.required_type
if rg.resholder and rg.resholder.resources:
for resource in rg.resholder.resources:
r = skeleton.resourcegroup.resources.add()
self.redirect_proto(resource, r)
return super(ResourceGroupFormatter, self).format_proto(rg, skeleton)
ObjectFormatter.handlers[ResourceGroup] = ResourceGroupFormatter()
|
jrha/aquilon
|
lib/python2.6/aquilon/worker/formats/resourcegroup.py
|
Python
|
apache-2.0
| 2,143
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text category_encoding preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers.preprocessing import category_encoding
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CategoryEncodingInputTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_dense_input_sparse_output(self):
input_array = constant_op.constant([[1, 2, 3], [3, 3, 0]])
# The expected output should be (X for missing value):
# [[X, 1, 1, 1, X, X]
# [1, X, X, 2, X, X]]
expected_indices = [[0, 1], [0, 2], [0, 3], [1, 0], [1, 3]]
expected_values = [1, 1, 1, 1, 2]
num_tokens = 6
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
sp_output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
# Assert sparse output is same as dense output.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens,
output_mode=category_encoding.COUNT,
sparse=False)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(sp_output_dataset, default_value=0),
output_dataset)
def test_sparse_input(self):
input_array = np.array([[1, 2, 3, 0], [0, 3, 1, 0]], dtype=np.int64)
sparse_tensor_data = sparse_ops.from_dense(input_array)
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 0]]
# pyformat: enable
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.BINARY)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(sparse_tensor_data, steps=1)
self.assertAllEqual(expected_output, output_dataset)
def test_sparse_input_with_weights(self):
input_array = np.array([[1, 2, 3, 4], [4, 3, 1, 4]], dtype=np.int64)
weights_array = np.array([[.1, .2, .3, .4], [.2, .1, .4, .3]])
sparse_tensor_data = sparse_ops.from_dense(input_array)
sparse_weight_data = sparse_ops.from_dense(weights_array)
# pyformat: disable
expected_output = [[0, .1, .2, .3, .4, 0],
[0, .4, 0, .1, .5, 0]]
# pyformat: enable
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
weight_data = keras.Input(shape=(None,), dtype=dtypes.float32, sparse=True)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT)
int_data = layer(input_data, count_weights=weight_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=[input_data, weight_data], outputs=int_data)
output_dataset = model.predict([sparse_tensor_data, sparse_weight_data],
steps=1)
self.assertAllClose(expected_output, output_dataset)
def test_sparse_input_sparse_output(self):
sp_inp = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1], [2, 0], [2, 1], [3, 1]],
values=[0, 2, 1, 1, 0],
dense_shape=[4, 2])
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
# The expected output should be (X for missing value):
# [[1, X, X, X]
# [X, X, 1, X]
# [X, 2, X, X]
# [1, X, X, X]]
expected_indices = [[0, 0], [1, 2], [2, 1], [3, 0]]
expected_values = [1, 1, 2, 1]
num_tokens = 6
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
sp_output_dataset = model.predict(sp_inp, steps=1)
self.assertAllEqual(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
# Assert sparse output is same as dense output.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens,
output_mode=category_encoding.COUNT,
sparse=False)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(sp_inp, steps=1)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(sp_output_dataset, default_value=0),
output_dataset)
def test_sparse_input_sparse_output_with_weights(self):
indices = [[0, 0], [1, 1], [2, 0], [2, 1], [3, 1]]
sp_inp = sparse_tensor.SparseTensor(
indices=indices, values=[0, 2, 1, 1, 0], dense_shape=[4, 2])
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
sp_weight = sparse_tensor.SparseTensor(
indices=indices, values=[.1, .2, .4, .3, .2], dense_shape=[4, 2])
weight_data = keras.Input(shape=(None,), dtype=dtypes.float32, sparse=True)
# The expected output should be (X for missing value):
# [[1, X, X, X]
# [X, X, 1, X]
# [X, 2, X, X]
# [1, X, X, X]]
expected_indices = [[0, 0], [1, 2], [2, 1], [3, 0]]
expected_values = [.1, .2, .7, .2]
num_tokens = 6
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data, count_weights=weight_data)
model = keras.Model(inputs=[input_data, weight_data], outputs=int_data)
sp_output_dataset = model.predict([sp_inp, sp_weight], steps=1)
self.assertAllClose(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
def test_ragged_input(self):
input_array = ragged_factory_ops.constant([[1, 2, 3], [3, 1]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 0]]
# pyformat: enable
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32, ragged=True)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.BINARY)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_output, output_dataset)
def test_ragged_input_sparse_output(self):
input_array = ragged_factory_ops.constant([[1, 2, 3], [3, 3]])
# The expected output should be (X for missing value):
# [[X, 1, 1, 1]
# [X, X, X, 2]]
expected_indices = [[0, 1], [0, 2], [0, 3], [1, 3]]
expected_values = [1, 1, 1, 2]
num_tokens = 6
input_data = keras.Input(shape=(None,), dtype=dtypes.int32, ragged=True)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
sp_output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
# Assert sparse output is same as dense output.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens,
output_mode=category_encoding.COUNT,
sparse=False)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(sp_output_dataset, default_value=0),
output_dataset)
def test_sparse_output_and_dense_layer(self):
input_array = constant_op.constant([[1, 2, 3], [3, 3, 0]])
num_tokens = 4
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
encoding_layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = encoding_layer(input_data)
dense_layer = keras.layers.Dense(units=1)
output_data = dense_layer(int_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array, steps=1)
def test_dense_oov_input(self):
input_array = constant_op.constant([[0, 1, 2], [2, 3, 1]])
num_tokens = 3
expected_output_shape = [None, num_tokens]
encoder_layer = category_encoding.CategoryEncoding(num_tokens)
input_data = keras.Input(shape=(3,), dtype=dtypes.int32)
int_data = encoder_layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
".*must be in the range 0 <= values < num_tokens.*"):
_ = model.predict(input_array, steps=1)
def test_dense_negative(self):
input_array = constant_op.constant([[1, 2, 0], [2, 2, -1]])
num_tokens = 3
expected_output_shape = [None, num_tokens]
encoder_layer = category_encoding.CategoryEncoding(num_tokens)
input_data = keras.Input(shape=(3,), dtype=dtypes.int32)
int_data = encoder_layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
".*must be in the range 0 <= values < num_tokens.*"):
_ = model.predict(input_array, steps=1)
def test_legacy_max_tokens_arg(self):
input_array = np.array([[1, 2, 3, 1]])
expected_output = [[0, 1, 1, 1, 0, 0]]
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = category_encoding.CategoryEncoding(
max_tokens=num_tokens, output_mode=category_encoding.BINARY)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_all_keras_modes
class CategoryEncodingOutputTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_binary_output(self):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[1, 1, 0, 1, 0, 0]]
# pyformat: enable
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.BINARY)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_count_output(self):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
# pyformat: disable
expected_output = [[0, 2, 1, 1, 0, 0],
[2, 1, 0, 1, 0, 0]]
# pyformat: enable
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = category_encoding.CategoryEncoding(
num_tokens=6, output_mode=category_encoding.COUNT)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
class CategoryEncodingModelBuildingTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(
{
"testcase_name": "count_output",
"num_tokens": 5,
"output_mode": category_encoding.COUNT
}, {
"testcase_name": "binary_output",
"num_tokens": 5,
"output_mode": category_encoding.BINARY
})
def test_end_to_end_bagged_modeling(self, output_mode, num_tokens):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=output_mode)
weights = []
if num_tokens is None:
layer.set_num_elements(5)
layer.set_weights(weights)
int_data = layer(input_data)
float_data = backend.cast(int_data, dtype="float32")
output_data = core.Dense(64)(float_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array)
if __name__ == "__main__":
test.main()
|
annarev/tensorflow
|
tensorflow/python/keras/layers/preprocessing/category_encoding_test.py
|
Python
|
apache-2.0
| 15,282
|
#!/usr/bin/env python
#
# revert_tests.py: testing 'svn revert'.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import re, os, stat, shutil
# Our testing module
import svntest
from svntest import wc, main, actions
from svntest.actions import run_and_verify_svn
from svntest.main import file_append, file_write, run_svn
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
######################################################################
# Helpers
def revert_replacement_with_props(sbox, wc_copy):
"""Helper implementing the core of
revert_{repos,wc}_to_wc_replace_with_props().
Uses a working copy (when wc_copy == True) or a URL (when wc_copy ==
False) source to copy from."""
sbox.build()
wc_dir = sbox.wc_dir
# Use a temp file to set properties with wildcards in their values
# otherwise Win32/VS2005 will expand them
prop_path = os.path.join(wc_dir, 'proptmp')
svntest.main.file_append(prop_path, '*')
# Set props on file which is copy-source later on
pi_path = os.path.join(wc_dir, 'A', 'D', 'G', 'pi')
rho_path = os.path.join(wc_dir, 'A', 'D', 'G', 'rho')
svntest.actions.run_and_verify_svn(None, None, [],
'ps', 'phony-prop', '-F', prop_path,
pi_path)
os.remove(prop_path)
svntest.actions.run_and_verify_svn(None, None, [],
'ps', 'svn:eol-style', 'LF', rho_path)
# Verify props having been set
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_disk.tweak('A/D/G/pi',
props={ 'phony-prop': '*' })
expected_disk.tweak('A/D/G/rho',
props={ 'svn:eol-style': 'LF' })
svntest.actions.verify_disk(wc_dir, expected_disk, True)
# Commit props
expected_output = svntest.wc.State(wc_dir, {
'A/D/G/pi': Item(verb='Sending'),
'A/D/G/rho': Item(verb='Sending'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/pi', wc_rev='2')
expected_status.tweak('A/D/G/rho', wc_rev='2')
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None, wc_dir)
# Bring wc into sync
svntest.actions.run_and_verify_svn(None, None, [], 'up', wc_dir)
# File scheduled for deletion
svntest.actions.run_and_verify_svn(None, None, [], 'rm', rho_path)
# Status before attempting copies
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
expected_status.tweak('A/D/G/rho', status='D ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# The copy shouldn't fail
if wc_copy:
pi_src = os.path.join(wc_dir, 'A', 'D', 'G', 'pi')
else:
pi_src = sbox.repo_url + '/A/D/G/pi'
svntest.actions.run_and_verify_svn(None, None, [],
'cp', pi_src, rho_path)
# Verify both content and props have been copied
if wc_copy:
props = { 'phony-prop' : '*' }
else:
props = { 'phony-prop' : '*' }
expected_disk.tweak('A/D/G/rho',
contents="This is the file 'pi'.\n",
props=props)
svntest.actions.verify_disk(wc_dir, expected_disk.old_tree(), True)
# Now revert
expected_status.tweak('A/D/G/rho', status='R ', copied='+', wc_rev='-')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_status.tweak('A/D/G/rho', status=' ', copied=None, wc_rev='2')
expected_output = ["Reverted '" + rho_path + "'\n"]
svntest.actions.run_and_verify_svn(None, expected_output, [],
'revert', '-R', wc_dir)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Check disk status
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_disk.tweak('A/D/G/pi',
props={ 'phony-prop': '*' })
expected_disk.tweak('A/D/G/rho',
props={ 'svn:eol-style': 'LF' })
svntest.actions.verify_disk(wc_dir, expected_disk.old_tree(), True)
######################################################################
# Tests
#
# Each test must return on success or raise on failure.
#----------------------------------------------------------------------
def revert_from_wc_root(sbox):
"revert relative to wc root"
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
os.chdir(wc_dir)
# Mostly taken from basic_revert
# Modify some files and props.
beta_path = os.path.join('A', 'B', 'E', 'beta')
gamma_path = os.path.join('A', 'D', 'gamma')
iota_path = 'iota'
rho_path = os.path.join('A', 'D', 'G', 'rho')
zeta_path = os.path.join('A', 'D', 'H', 'zeta')
svntest.main.file_append(beta_path, "Added some text to 'beta'.\n")
svntest.main.file_append(iota_path, "Added some text to 'iota'.\n")
svntest.main.file_append(rho_path, "Added some text to 'rho'.\n")
svntest.main.file_append(zeta_path, "Added some text to 'zeta'.\n")
svntest.actions.run_and_verify_svn("Add command", None, [],
'add', zeta_path)
svntest.actions.run_and_verify_svn("Add prop command", None, [],
'ps', 'random-prop', 'propvalue',
gamma_path)
svntest.actions.run_and_verify_svn("Add prop command", None, [],
'ps', 'random-prop', 'propvalue',
iota_path)
svntest.actions.run_and_verify_svn("Add prop command", None, [],
'ps', 'random-prop', 'propvalue',
'.')
svntest.actions.run_and_verify_svn("Add prop command", None, [],
'ps', 'random-prop', 'propvalue',
'A')
# Verify modified status.
expected_output = svntest.actions.get_virginal_state('', 1)
expected_output.tweak('A/B/E/beta', 'A/D/G/rho', status='M ')
expected_output.tweak('iota', status='MM')
expected_output.tweak('', 'A/D/gamma', 'A', status=' M')
expected_output.add({
'A/D/H/zeta' : Item(status='A ', wc_rev=0),
})
svntest.actions.run_and_verify_status('', expected_output)
# Run revert
svntest.actions.run_and_verify_svn("Revert command", None, [],
'revert', beta_path)
svntest.actions.run_and_verify_svn("Revert command", None, [],
'revert', gamma_path)
svntest.actions.run_and_verify_svn("Revert command", None, [],
'revert', iota_path)
svntest.actions.run_and_verify_svn("Revert command", None, [],
'revert', rho_path)
svntest.actions.run_and_verify_svn("Revert command", None, [],
'revert', zeta_path)
svntest.actions.run_and_verify_svn("Revert command", None, [],
'revert', '.')
svntest.actions.run_and_verify_svn("Revert command", None, [],
'revert', 'A')
# Verify unmodified status.
expected_output = svntest.actions.get_virginal_state('', 1)
svntest.actions.run_and_verify_status('', expected_output)
@Issue(1663)
def revert_reexpand_keyword(sbox):
"revert reexpands manually contracted keyword"
# This is for issue #1663. The bug is that if the only difference
# between a locally modified working file and the base version of
# same was that the former had a contracted keyword that would be
# expanded in the latter, then 'svn revert' wouldn't notice the
# difference, and therefore wouldn't revert. And why wouldn't it
# notice? Since text bases are always stored with keywords
# contracted, and working files are contracted before comparison
# with text base, there would appear to be no difference when the
# contraction is the only difference. For most commands, this is
# correct -- but revert's job is to restore the working file, not
# the text base.
sbox.build()
wc_dir = sbox.wc_dir
newfile_path = os.path.join(wc_dir, "newfile")
unexpanded_contents = "This is newfile: $Rev$.\n"
# Put an unexpanded keyword into iota.
svntest.main.file_write(newfile_path, unexpanded_contents)
# Commit, without svn:keywords property set.
svntest.main.run_svn(None, 'add', newfile_path)
svntest.main.run_svn(None,
'commit', '-m', 'r2', newfile_path)
# Set the property and commit. This should expand the keyword.
svntest.main.run_svn(None, 'propset', 'svn:keywords', 'rev', newfile_path)
svntest.main.run_svn(None,
'commit', '-m', 'r3', newfile_path)
# Verify that the keyword got expanded.
def check_expanded(path):
fp = open(path, 'r')
lines = fp.readlines()
fp.close()
if lines[0] != "This is newfile: $Rev: 3 $.\n":
raise svntest.Failure
check_expanded(newfile_path)
# Now un-expand the keyword again.
svntest.main.file_write(newfile_path, unexpanded_contents)
# Revert the file. The keyword should reexpand.
svntest.main.run_svn(None, 'revert', newfile_path)
# Verify that the keyword got re-expanded.
check_expanded(newfile_path)
# Ok, the first part of this test was written in 2004. We are now in 2011
# and note that there is more to test:
# If the recorded timestamp and size match the file then revert won't
# reinstall the file as the file was not modified when last compared in
# the repository normal form.
#
# The easiest way to get the information recorded would be calling cleanup,
# because that 'repairs' the recorded information. But some developers
# (including me) would call that cheating, so I just use a failed commit.
# Un-expand the keyword again.
svntest.main.file_write(newfile_path, unexpanded_contents)
# And now we trick svn in ignoring the file on newfile_path
newfile2_path = newfile_path + '2'
svntest.main.file_write(newfile2_path, 'This is file 2')
svntest.main.run_svn(None, 'add', newfile2_path)
os.remove(newfile2_path)
# This commit fails because newfile2_path is missing, but only after
# we call svn_wc__internal_file_modified_p() on new_file.
svntest.actions.run_and_verify_commit(wc_dir, None, None, "2' is scheduled"+
" for addition, but is missing",
newfile_path, newfile2_path,
'-m', "Shouldn't be committed")
# Revert the file. The file is not reverted!
svntest.actions.run_and_verify_svn(None, [], [], 'revert', newfile_path)
#----------------------------------------------------------------------
# Regression test for issue #1775:
# Should be able to revert a file with no properties i.e. no prop-base
@Issue(1775)
def revert_replaced_file_without_props(sbox):
"revert a replaced file with no properties"
sbox.build()
wc_dir = sbox.wc_dir
file1_path = os.path.join(wc_dir, 'file1')
# Add a new file, file1, that has no prop-base
svntest.main.file_append(file1_path, "This is the file 'file1' revision 2.")
svntest.actions.run_and_verify_svn(None, None, [], 'add', file1_path)
# commit file1
expected_output = svntest.wc.State(wc_dir, {
'file1' : Item(verb='Adding')
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'file1' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status, None, wc_dir)
# delete file1
svntest.actions.run_and_verify_svn(None, None, [], 'rm', file1_path)
# test that file1 is scheduled for deletion.
expected_status.tweak('file1', status='D ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# recreate and add file1
svntest.main.file_append(file1_path, "This is the file 'file1' revision 3.")
svntest.actions.run_and_verify_svn(None, None, [], 'add', file1_path)
# Test to see if file1 is schedule for replacement
expected_status.tweak('file1', status='R ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# revert file1
svntest.actions.run_and_verify_svn(None, ["Reverted '" + file1_path + "'\n"],
[], 'revert', file1_path)
# test that file1 really was reverted
expected_status.tweak('file1', status=' ', wc_rev=2)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# Note that issue #876 has been rejected. This now basically tests that
# reverting the delete side of a move does *not* also revert the copy side.
@Issue(876)
def revert_moved_file(sbox):
"revert a moved file"
# svntest.factory.make(sbox, """svn mv iota iota_moved
# svn st
# svn revert iota
# svn st
# """)
sbox.build()
wc_dir = sbox.wc_dir
iota = os.path.join(wc_dir, 'iota')
iota_moved = os.path.join(wc_dir, 'iota_moved')
# svn mv iota iota_moved
expected_stdout = svntest.verify.UnorderedOutput([
'A ' + iota_moved + '\n',
'D ' + iota + '\n',
])
actions.run_and_verify_svn2('OUTPUT', expected_stdout, [], 0, 'mv', iota,
iota_moved)
# svn st
expected_status = actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'iota_moved' : Item(status='A ', copied='+', wc_rev='-',
moved_from='iota'),
})
expected_status.tweak('iota', status='D ', moved_to='iota_moved')
actions.run_and_verify_unquiet_status(wc_dir, expected_status)
# svn revert iota
expected_stdout = ["Reverted '" + iota + "'\n"]
actions.run_and_verify_svn2('OUTPUT', expected_stdout, [], 0, 'revert',
iota)
# svn st
expected_status.tweak('iota', status=' ', moved_to=None)
expected_status.tweak('iota_moved', moved_from=None)
actions.run_and_verify_unquiet_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# Test for issue 2135
#
# It is like merge_file_replace (in merge_tests.py), but reverts file
# instead of commit.
@Issue(2135)
def revert_file_merge_replace_with_history(sbox):
"revert a merge replacement of file with history"
sbox.build()
wc_dir = sbox.wc_dir
# File scheduled for deletion
rho_path = os.path.join(wc_dir, 'A', 'D', 'G', 'rho')
svntest.actions.run_and_verify_svn(None, None, [], 'rm', rho_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/rho', status='D ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = svntest.wc.State(wc_dir, {
'A/D/G/rho': Item(verb='Deleting'),
})
expected_status.remove('A/D/G/rho')
# Commit rev 2
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None, wc_dir)
# create new rho file
svntest.main.file_write(rho_path, "new rho\n")
# Add the new file
svntest.actions.run_and_verify_svn(None, None, [], 'add', rho_path)
# Commit revsion 3
expected_status.add({
'A/D/G/rho' : Item(status='A ', wc_rev='0')
})
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = svntest.wc.State(wc_dir, {
'A/D/G/rho': Item(verb='Adding'),
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
None, None, wc_dir)
# Update working copy
expected_output = svntest.wc.State(wc_dir, {})
expected_disk = svntest.main.greek_state.copy()
expected_disk.tweak('A/D/G/rho', contents='new rho\n' )
expected_status.tweak(wc_rev='3')
expected_status.tweak('A/D/G/rho', status=' ')
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status)
# merge changes from r3:1
expected_output = svntest.wc.State(wc_dir, {
'A/D/G/rho': Item(status='R ')
})
expected_mergeinfo_output = svntest.wc.State(wc_dir, {
'' : Item(status=' U')
})
expected_elision_output = svntest.wc.State(wc_dir, {
'' : Item(status=' U')
})
expected_status.tweak('A/D/G/rho', status='R ', copied='+', wc_rev='-')
expected_skip = wc.State(wc_dir, { })
expected_disk.tweak('A/D/G/rho', contents="This is the file 'rho'.\n")
svntest.actions.run_and_verify_merge(wc_dir, '3', '1',
sbox.repo_url, None,
expected_output,
expected_mergeinfo_output,
expected_elision_output,
expected_disk,
expected_status,
expected_skip)
# Now revert
svntest.actions.run_and_verify_svn(None,
None,
[], 'revert', rho_path)
# test that rho really was reverted
expected_status.tweak('A/D/G/rho', copied=None, status=' ', wc_rev=3)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_disk.tweak('A/D/G/rho', contents="new rho\n")
svntest.actions.verify_disk(wc_dir, expected_disk.old_tree(), True)
# Make sure the revert removed the copy from information.
expected_infos = [
{ 'Copied' : None }
]
svntest.actions.run_and_verify_info(expected_infos, rho_path)
def revert_wc_to_wc_replace_with_props(sbox):
"revert svn cp PATH PATH replace file with props"
revert_replacement_with_props(sbox, 1)
def revert_repos_to_wc_replace_with_props(sbox):
"revert svn cp URL PATH replace file with props"
revert_replacement_with_props(sbox, 0)
def revert_after_second_replace(sbox):
"revert file after second replace"
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
# File scheduled for deletion
rho_path = os.path.join(wc_dir, 'A', 'D', 'G', 'rho')
svntest.actions.run_and_verify_svn(None, None, [], 'rm', rho_path)
# Status before attempting copy
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/rho', status='D ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Replace file for the first time
pi_src = os.path.join(wc_dir, 'A', 'D', 'G', 'pi')
svntest.actions.run_and_verify_svn(None, None, [],
'cp', pi_src, rho_path)
expected_status.tweak('A/D/G/rho', status='R ', copied='+', wc_rev='-')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Now delete replaced file.
svntest.actions.run_and_verify_svn(None, None, [], 'rm', '--force', rho_path)
# Status should be same as after first delete
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/rho', status='D ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Replace file for the second time
pi_src = os.path.join(wc_dir, 'A', 'D', 'G', 'pi')
svntest.actions.run_and_verify_svn(None, None, [], 'cp', pi_src, rho_path)
expected_status.tweak('A/D/G/rho', status='R ', copied='+', wc_rev='-')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Now revert
svntest.actions.run_and_verify_svn(None, None, [],
'revert', '-R', wc_dir)
# Check disk status
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.verify_disk(wc_dir, expected_disk.old_tree(), True)
#----------------------------------------------------------------------
# Tests for issue #2517.
#
# Manual conflict resolution leads to spurious revert report.
@Issue(2517)
def revert_after_manual_conflict_resolution__text(sbox):
"revert after manual text-conflict resolution"
# Make two working copies
sbox.build()
wc_dir_1 = sbox.wc_dir
wc_dir_2 = sbox.add_wc_path('other')
svntest.actions.duplicate_dir(wc_dir_1, wc_dir_2)
# Cause a (text) conflict
iota_path_1 = os.path.join(wc_dir_1, 'iota')
iota_path_2 = os.path.join(wc_dir_2, 'iota')
svntest.main.file_write(iota_path_1, 'Modified iota text')
svntest.main.file_write(iota_path_2, 'Conflicting iota text')
svntest.main.run_svn(None,
'commit', '-m', 'r2', wc_dir_1)
svntest.main.run_svn(None,
'update', wc_dir_2)
# Resolve the conflict "manually"
svntest.main.file_write(iota_path_2, 'Modified iota text')
os.remove(iota_path_2 + '.mine')
os.remove(iota_path_2 + '.r1')
os.remove(iota_path_2 + '.r2')
# Verify no output from status, diff, or revert
svntest.actions.run_and_verify_svn(None, [], [], "status", wc_dir_2)
svntest.actions.run_and_verify_svn(None, [], [], "diff", wc_dir_2)
svntest.actions.run_and_verify_svn(None, [], [], "revert", "-R", wc_dir_2)
def revert_after_manual_conflict_resolution__prop(sbox):
"revert after manual property-conflict resolution"
# Make two working copies
sbox.build()
wc_dir_1 = sbox.wc_dir
wc_dir_2 = sbox.add_wc_path('other')
svntest.actions.duplicate_dir(wc_dir_1, wc_dir_2)
# Cause a (property) conflict
iota_path_1 = os.path.join(wc_dir_1, 'iota')
iota_path_2 = os.path.join(wc_dir_2, 'iota')
svntest.main.run_svn(None, 'propset', 'foo', '1', iota_path_1)
svntest.main.run_svn(None, 'propset', 'foo', '2', iota_path_2)
svntest.main.run_svn(None,
'commit', '-m', 'r2', wc_dir_1)
svntest.main.run_svn(None,
'update', wc_dir_2)
# Resolve the conflict "manually"
svntest.main.run_svn(None, 'propset', 'foo', '1', iota_path_2)
os.remove(iota_path_2 + '.prej')
# Verify no output from status, diff, or revert
svntest.actions.run_and_verify_svn(None, [], [], "status", wc_dir_2)
svntest.actions.run_and_verify_svn(None, [], [], "diff", wc_dir_2)
svntest.actions.run_and_verify_svn(None, [], [], "revert", "-R", wc_dir_2)
def revert_propset__dir(sbox):
"revert a simple propset on a dir"
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
a_path = os.path.join(wc_dir, 'A')
svntest.main.run_svn(None, 'propset', 'foo', 'x', a_path)
expected_output = re.escape("Reverted '" + a_path + "'")
svntest.actions.run_and_verify_svn(None, expected_output, [], "revert",
a_path)
def revert_propset__file(sbox):
"revert a simple propset on a file"
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
iota_path = os.path.join(wc_dir, 'iota')
svntest.main.run_svn(None, 'propset', 'foo', 'x', iota_path)
expected_output = re.escape("Reverted '" + iota_path + "'")
svntest.actions.run_and_verify_svn(None, expected_output, [], "revert",
iota_path)
def revert_propdel__dir(sbox):
"revert a simple propdel on a dir"
sbox.build()
wc_dir = sbox.wc_dir
a_path = os.path.join(wc_dir, 'A')
svntest.main.run_svn(None, 'propset', 'foo', 'x', a_path)
svntest.main.run_svn(None,
'commit', '-m', 'ps', a_path)
svntest.main.run_svn(None, 'propdel', 'foo', a_path)
expected_output = re.escape("Reverted '" + a_path + "'")
svntest.actions.run_and_verify_svn(None, expected_output, [], "revert",
a_path)
def revert_propdel__file(sbox):
"revert a simple propdel on a file"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = os.path.join(wc_dir, 'iota')
svntest.main.run_svn(None, 'propset', 'foo', 'x', iota_path)
svntest.main.run_svn(None,
'commit', '-m', 'ps', iota_path)
svntest.main.run_svn(None, 'propdel', 'foo', iota_path)
expected_output = re.escape("Reverted '" + iota_path + "'")
svntest.actions.run_and_verify_svn(None, expected_output, [], "revert",
iota_path)
def revert_replaced_with_history_file_1(sbox):
"revert a committed replace-with-history == no-op"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = os.path.join(wc_dir, 'iota')
mu_path = os.path.join(wc_dir, 'A', 'mu')
# Remember the original text of 'mu'
exit_code, text_r1, err = svntest.actions.run_and_verify_svn(None, None, [],
'cat', mu_path)
# delete mu and replace it with a copy of iota
svntest.main.run_svn(None, 'rm', mu_path)
svntest.main.run_svn(None, 'mv', iota_path, mu_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', status=' ', wc_rev=2)
expected_status.remove('iota')
expected_output = svntest.wc.State(wc_dir, {
'iota': Item(verb='Deleting'),
'A/mu': Item(verb='Replacing'),
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None, wc_dir)
# update the working copy
svntest.main.run_svn(None, 'up', wc_dir)
# now revert back to the state in r1
expected_output = svntest.wc.State(wc_dir, {
'A/mu': Item(status='R '),
'iota': Item(status='A ')
})
expected_mergeinfo_output = svntest.wc.State(wc_dir, {
'': Item(status=' U'),
})
expected_elision_output = svntest.wc.State(wc_dir, {
'': Item(status=' U'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
expected_status.tweak('A/mu', status='R ', copied='+', wc_rev='-')
expected_status.tweak('iota', status='A ', copied='+', wc_rev='-')
expected_skip = wc.State(wc_dir, { })
expected_disk = svntest.main.greek_state.copy()
svntest.actions.run_and_verify_merge(wc_dir, '2', '1',
sbox.repo_url, None,
expected_output,
expected_mergeinfo_output,
expected_elision_output,
expected_disk,
expected_status,
expected_skip)
# and commit in r3
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
expected_status.tweak('A/mu', status=' ', wc_rev=3)
expected_status.tweak('iota', status=' ', wc_rev=3)
expected_output = svntest.wc.State(wc_dir, {
'iota': Item(verb='Adding'),
'A/mu': Item(verb='Replacing'),
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None, wc_dir)
# Verify the content of 'mu'
svntest.actions.run_and_verify_svn(None, text_r1, [], 'cat', mu_path)
# situation: no local modifications, mu has its original content again.
# revert 'mu' locally, shouldn't change a thing.
svntest.actions.run_and_verify_svn(None, [], [], "revert",
mu_path)
# Verify the content of 'mu'
svntest.actions.run_and_verify_svn(None, text_r1, [], 'cat', mu_path)
#----------------------------------------------------------------------
# Test for issue #2804.
@Issue(2804)
def status_of_missing_dir_after_revert(sbox):
"status after schedule-delete, revert, and local rm"
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
A_D_G_path = os.path.join(wc_dir, "A", "D", "G")
svntest.actions.run_and_verify_svn(None, None, [], "rm", A_D_G_path)
expected_output = re.escape("Reverted '" + A_D_G_path + "'")
svntest.actions.run_and_verify_svn(None, expected_output, [], "revert",
A_D_G_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/rho', 'A/D/G/pi', 'A/D/G/tau',
status='D ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.main.safe_rmtree(A_D_G_path)
expected_status.tweak('A/D/G', status='! ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# When using single-db, we can get back to the virginal state.
svntest.actions.run_and_verify_svn(None, None, [], "revert",
"-R", A_D_G_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# Test for issue #2804 with replaced directory
@Issue(2804)
def status_of_missing_dir_after_revert_replaced_with_history_dir(sbox):
"status after replace+, revert, and local rm"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# delete A/D/G and commit
G_path = os.path.join(wc_dir, "A", "D", "G")
svntest.actions.run_and_verify_svn(None, None, [], "rm", G_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/D/G', 'A/D/G/rho', 'A/D/G/pi', 'A/D/G/tau')
expected_output = svntest.wc.State(wc_dir, {
'A/D/G': Item(verb='Deleting'),
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None, wc_dir)
# copy A/D/G from A/B/E and commit
E_path = os.path.join(wc_dir, "A", "B", "E")
svntest.actions.run_and_verify_svn(None, None, [], "cp", E_path, G_path)
expected_status.add({
'A/D/G' : Item(status=' ', wc_rev='3'),
'A/D/G/alpha' : Item(status=' ', wc_rev='3'),
'A/D/G/beta' : Item(status=' ', wc_rev='3')
})
expected_output = svntest.wc.State(wc_dir, {
'A/D/G': Item(verb='Adding'),
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None, wc_dir)
# update the working copy
svntest.main.run_svn(None, 'up', wc_dir)
# now rollback to r1, thereby reinstating the old 'G'
expected_output = svntest.wc.State(wc_dir, {
'A/D/G': Item(status='R '),
'A/D/G/rho': Item(status='A '),
'A/D/G/pi': Item(status='A '),
'A/D/G/tau': Item(status='A '),
})
expected_mergeinfo_output = svntest.wc.State(wc_dir, {
'': Item(status=' U'),
})
expected_elision_output = svntest.wc.State(wc_dir, {
'': Item(status=' U'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.tweak('A/D/G', status='R ', copied='+', wc_rev='-')
expected_status.tweak('A/D/G/rho',
'A/D/G/pi',
'A/D/G/tau',
copied='+', wc_rev='-')
expected_status.add({
'A/D/G/alpha' : Item(status='D ', wc_rev='3'),
'A/D/G/beta' : Item(status='D ', wc_rev='3'),
})
expected_skip = wc.State(wc_dir, { })
expected_disk = svntest.main.greek_state.copy()
svntest.actions.run_and_verify_merge(wc_dir, '3', '1',
sbox.repo_url, None,
expected_output,
expected_mergeinfo_output,
expected_elision_output,
expected_disk,
expected_status,
expected_skip,
dry_run = 0)
# now test if the revert works ok
revert_paths = [G_path] + [os.path.join(G_path, child)
for child in ['alpha', 'beta', 'pi', 'rho', 'tau']]
expected_output = svntest.verify.UnorderedOutput([
"Reverted '%s'\n" % path for path in revert_paths])
svntest.actions.run_and_verify_svn(None, expected_output, [], "revert", "-R",
G_path)
svntest.actions.run_and_verify_svn(None, [], [],
"status", wc_dir)
svntest.main.safe_rmtree(G_path)
expected_output = svntest.verify.UnorderedOutput(
["! " + G_path + "\n",
"! " + os.path.join(G_path, "alpha") + "\n",
"! " + os.path.join(G_path, "beta") + "\n"])
svntest.actions.run_and_verify_svn(None, expected_output, [], "status",
wc_dir)
# Test for issue #2928.
@Issue(2928)
def revert_replaced_with_history_file_2(sbox):
"reverted replace with history restores checksum"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = os.path.join(wc_dir, 'iota')
mu_path = os.path.join(wc_dir, 'A', 'mu')
# Delete mu and replace it with a copy of iota
svntest.main.run_svn(None, 'rm', mu_path)
svntest.main.run_svn(None, 'cp', iota_path, mu_path)
# Revert mu.
svntest.main.run_svn(None, 'revert', mu_path)
# If we make local mods to the reverted mu the commit will
# fail if the checksum is incorrect.
svntest.main.file_write(mu_path, "new text")
expected_output = svntest.wc.State(wc_dir, {
'A/mu': Item(verb='Sending'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', status=' ', wc_rev=2)
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None, wc_dir)
#----------------------------------------------------------------------
def revert_tree_conflicts_in_updated_files(sbox):
"revert tree conflicts in updated files"
# See use cases 1-3 in notes/tree-conflicts/use-cases.txt for background.
svntest.actions.build_greek_tree_conflicts(sbox)
wc_dir = sbox.wc_dir
G = os.path.join(wc_dir, 'A', 'D', 'G')
G_pi = os.path.join(G, 'pi')
G_rho = os.path.join(G, 'rho')
G_tau = os.path.join(G, 'tau')
# Duplicate wc for tests
wc_dir_2 = sbox.add_wc_path('2')
svntest.actions.duplicate_dir(wc_dir, wc_dir_2)
G2 = os.path.join(wc_dir_2, 'A', 'D', 'G')
G2_pi = os.path.join(G2, 'pi')
G2_rho = os.path.join(G2, 'rho')
G2_tau = os.path.join(G2, 'tau')
# Expectations
expected_output = svntest.verify.UnorderedOutput(
["Reverted '%s'\n" % G_pi,
"Reverted '%s'\n" % G_rho,
"Reverted '%s'\n" % G_tau,
])
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
expected_status.tweak('A/D/G/pi', status=' ')
expected_status.remove('A/D/G/rho')
expected_status.remove('A/D/G/tau')
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('A/D/G/rho')
expected_disk.tweak('A/D/G/pi',
contents="This is the file 'pi'.\nIncoming edit.\n")
expected_disk.remove('A/D/G/tau')
# Revert individually in wc
svntest.actions.run_and_verify_svn(None, expected_output, [],
'revert', G_pi, G_rho, G_tau)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.verify_disk(wc_dir, expected_disk)
# Expectations
expected_output = svntest.verify.UnorderedOutput(
["Reverted '%s'\n" % G2_pi,
"Reverted '%s'\n" % G2_rho,
"Reverted '%s'\n" % G2_tau,
])
expected_status.wc_dir = wc_dir_2
# Revert recursively in wc 2
svntest.actions.run_and_verify_svn(None, expected_output, [],
'revert', '-R', G2)
svntest.actions.run_and_verify_status(wc_dir_2, expected_status)
svntest.actions.verify_disk(wc_dir_2, expected_disk)
def revert_add_over_not_present_dir(sbox):
"reverting an add over not present directory"
sbox.build()
wc_dir = sbox.wc_dir
main.run_svn(None, 'rm', os.path.join(wc_dir, 'A/C'))
main.run_svn(None, 'ci', wc_dir, '-m', 'Deleted dir')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/C')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
main.run_svn(None, 'mkdir', os.path.join(wc_dir, 'A/C'))
# This failed in some WC-NG intermediate format (r927318-r958992).
main.run_svn(None, 'revert', os.path.join(wc_dir, 'A/C'))
svntest.actions.run_and_verify_status(wc_dir, expected_status)
def revert_added_tree(sbox):
"revert an added tree fails"
sbox.build()
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None, None, [],
'mkdir', sbox.ospath('X'), sbox.ospath('X/Y'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'X' : Item(status='A ', wc_rev=0),
'X/Y' : Item(status='A ', wc_rev=0),
})
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Revert is non-recursive and fails, status is unchanged
expected_error = '.*Try \'svn revert --depth infinity\'.*'
svntest.actions.run_and_verify_svn(None, None, expected_error,
'revert', sbox.ospath('X'))
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@Issue(3834)
def revert_child_of_copy(sbox):
"revert a child of a copied directory"
sbox.build()
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None, None, [],
'cp',
sbox.ospath('A/B/E'),
sbox.ospath('A/B/E2'))
svntest.main.file_append(sbox.ospath('A/B/E2/beta'), 'extra text\n')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/B/E2' : Item(status='A ', copied='+', wc_rev='-'),
'A/B/E2/alpha' : Item(status=' ', copied='+', wc_rev='-'),
'A/B/E2/beta' : Item(status='M ', copied='+', wc_rev='-'),
})
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# First revert removes text change, child is still copied
expected_output = ["Reverted '%s'\n" % sbox.ospath('A/B/E2/beta')]
svntest.actions.run_and_verify_svn(None, expected_output, [],
'revert', sbox.ospath('A/B/E2/beta'))
expected_status.tweak('A/B/E2/beta', status=' ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Second revert of child does nothing, child is still copied
svntest.actions.run_and_verify_svn(None, None, [],
'revert', sbox.ospath('A/B/E2/beta'))
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@Issue(3783)
def revert_non_recusive_after_delete(sbox):
"non-recursive revert after delete"
sbox.build(read_only=True)
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None, None, [], 'rm', sbox.ospath('A/B'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/B', 'A/B/E', 'A/B/E/alpha', 'A/B/E/beta', 'A/B/F',
'A/B/lambda', status='D ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# This appears to work but gets the op-depth wrong
expected_output = ["Reverted '%s'\n" % sbox.ospath('A/B')]
svntest.actions.run_and_verify_svn(None, expected_output, [],
'revert', sbox.ospath('A/B'))
expected_status.tweak('A/B', status=' ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, None, [],
'mkdir', sbox.ospath('A/B/E'))
expected_status.tweak('A/B/E', status='R ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Since the op-depth was wrong A/B/E erroneously remains deleted
expected_output = ["Reverted '%s'\n" % sbox.ospath('A/B/E')]
svntest.actions.run_and_verify_svn(None, expected_output, [],
'revert', sbox.ospath('A/B/E'))
expected_status.tweak('A/B/E', status=' ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
def revert_permissions_only(sbox):
"permission-only reverts"
sbox.build()
wc_dir = sbox.wc_dir
# Helpers pinched/adapted from lock_tests.py. Put them somewhere common?
def check_writability(path, writable):
bits = stat.S_IWGRP | stat.S_IWOTH | stat.S_IWRITE
mode = os.stat(path)[0]
if bool(mode & bits) != writable:
raise svntest.Failure("path '%s' is unexpectedly %s (mode %o)"
% (path, ["writable", "read-only"][writable], mode))
def is_writable(path):
"Raise if PATH is not writable."
check_writability(path, True)
def is_readonly(path):
"Raise if PATH is not readonly."
check_writability(path, False)
def check_executability(path, executable):
bits = stat.S_IXGRP | stat.S_IXOTH | stat.S_IEXEC
mode = os.stat(path)[0]
if bool(mode & bits) != executable:
raise svntest.Failure("path '%s' is unexpectedly %s (mode %o)"
% (path,
["executable", "non-executable"][executable],
mode))
def is_executable(path):
"Raise if PATH is not executable."
check_executability(path, True)
def is_non_executable(path):
"Raise if PATH is executable."
check_executability(path, False)
os.chmod(sbox.ospath('A/B/E/alpha'), 0444) # read-only
is_readonly(sbox.ospath('A/B/E/alpha'))
expected_output = ["Reverted '%s'\n" % sbox.ospath('A/B/E/alpha')]
svntest.actions.run_and_verify_svn(None, expected_output, [],
'revert', sbox.ospath('A/B/E/alpha'))
is_writable(sbox.ospath('A/B/E/alpha'))
if svntest.main.is_posix_os():
os.chmod(sbox.ospath('A/B/E/beta'), 0777) # executable
is_executable(sbox.ospath('A/B/E/beta'))
expected_output = ["Reverted '%s'\n" % sbox.ospath('A/B/E/beta')]
svntest.actions.run_and_verify_svn(None, expected_output, [],
'revert', sbox.ospath('A/B/E/beta'))
is_non_executable(sbox.ospath('A/B/E/beta'))
svntest.actions.run_and_verify_svn(None, None, [],
'propset', 'svn:needs-lock', '1',
sbox.ospath('A/B/E/alpha'))
svntest.actions.run_and_verify_svn(None, None, [],
'propset', 'svn:executable', '1',
sbox.ospath('A/B/E/beta'))
expected_output = svntest.wc.State(wc_dir, {
'A/B/E/alpha': Item(verb='Sending'),
'A/B/E/beta': Item(verb='Sending'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/B/E/alpha', wc_rev='2')
expected_status.tweak('A/B/E/beta', wc_rev='2')
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None, wc_dir)
os.chmod(sbox.ospath('A/B/E/alpha'), 0666) # not read-only
is_writable(sbox.ospath('A/B/E/alpha'))
expected_output = ["Reverted '%s'\n" % sbox.ospath('A/B/E/alpha')]
svntest.actions.run_and_verify_svn(None, expected_output, [],
'revert', sbox.ospath('A/B/E/alpha'))
is_readonly(sbox.ospath('A/B/E/alpha'))
if svntest.main.is_posix_os():
os.chmod(sbox.ospath('A/B/E/beta'), 0666) # not executable
is_non_executable(sbox.ospath('A/B/E/beta'))
expected_output = ["Reverted '%s'\n" % sbox.ospath('A/B/E/beta')]
svntest.actions.run_and_verify_svn(None, expected_output, [],
'revert', sbox.ospath('A/B/E/beta'))
is_executable(sbox.ospath('A/B/E/beta'))
# copied file is always writeable
sbox.simple_update()
expected_output = ["A %s\n" % sbox.ospath('A/B/E2')]
svntest.actions.run_and_verify_svn(None, expected_output, [], 'copy',
sbox.ospath('A/B/E'),
sbox.ospath('A/B/E2'))
is_writable(sbox.ospath('A/B/E2/alpha'))
svntest.actions.run_and_verify_svn(None, [], [],
'revert', sbox.ospath('A/B/E2/alpha'))
is_writable(sbox.ospath('A/B/E2/alpha'))
@XFail()
@Issue(3851)
def revert_copy_depth_files(sbox):
"revert a copy with depth=files"
sbox.build(read_only=True)
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None, None, [],
'copy',
sbox.ospath('A/B/E'),
sbox.ospath('A/B/E2'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/B/E2' : Item(status='A ', copied='+', wc_rev='-'),
'A/B/E2/alpha' : Item(status=' ', copied='+', wc_rev='-'),
'A/B/E2/beta' : Item(status=' ', copied='+', wc_rev='-'),
})
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = svntest.verify.UnorderedOutput([
"Reverted '%s'\n" % sbox.ospath(path) for path in ['A/B/E2',
'A/B/E2/alpha',
'A/B/E2/beta']])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'revert', '--depth', 'files',
sbox.ospath('A/B/E2'))
expected_status.remove('A/B/E2', 'A/B/E2/alpha', 'A/B/E2/beta')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@XFail()
@Issue(3851)
def revert_nested_add_depth_immediates(sbox):
"revert a nested add with depth=immediates"
sbox.build(read_only=True)
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None, None, [],
'mkdir', '--parents', sbox.ospath('A/X/Y'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/X' : Item(status='A ', wc_rev='0'),
'A/X/Y' : Item(status='A ', wc_rev='0'),
})
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = svntest.verify.UnorderedOutput([
"Reverted '%s'\n" % sbox.ospath(path) for path in ['A/X', 'A/X/Y']])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'revert', '--depth', 'immediates',
sbox.ospath('A/X'))
expected_status.remove('A/X', 'A/X/Y')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
def create_superflous_actual_node(sbox):
"create a superfluous actual node"
sbox.build()
wc_dir = sbox.wc_dir
svntest.main.file_append(sbox.ospath('A/B/E/alpha'), 'their text\n')
sbox.simple_commit()
sbox.simple_update()
# Create a NODES row with op-depth>0
svntest.actions.run_and_verify_svn(None, None, [],
'copy', '-r', '1',
sbox.repo_url + '/A/B/E/alpha',
sbox.ospath('alpha'))
# Merge to create an ACTUAL with a conflict
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
expected_status.add({
'alpha' : Item(status='A ', copied='+', wc_rev='-'),
})
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.main.file_append(sbox.ospath('alpha'), 'my text\n')
svntest.actions.run_and_verify_svn(None, None, [],
'merge', '--accept', 'postpone',
'^/A/B/E/alpha', sbox.ospath('alpha'))
expected_status.tweak('alpha', status='CM', entry_status='A ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Clear merge property and remove conflict files
sbox.simple_propdel('svn:mergeinfo', 'alpha')
os.remove(sbox.ospath('alpha.merge-left.r1'))
os.remove(sbox.ospath('alpha.merge-right.r2'))
os.remove(sbox.ospath('alpha.working'))
expected_status.tweak('alpha', status='A ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@Issue(3859)
def revert_empty_actual(sbox):
"revert with superfluous actual node"
create_superflous_actual_node(sbox)
wc_dir = sbox.wc_dir
# Non-recursive code path works
svntest.actions.run_and_verify_svn(None,
["Reverted '%s'\n" % sbox.ospath('alpha')],
[],
'revert', sbox.ospath('alpha'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@Issue(3859)
def revert_empty_actual_recursive(sbox):
"recusive revert with superfluous actual node"
create_superflous_actual_node(sbox)
wc_dir = sbox.wc_dir
# Recursive code path fails, the superfluous actual node suppresses the
# notification
svntest.actions.run_and_verify_svn(None,
["Reverted '%s'\n" % sbox.ospath('alpha')],
[],
'revert', '-R', sbox.ospath('alpha'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@Issue(3879)
def revert_tree_conflicts_with_replacements(sbox):
"revert tree conflicts with replacements"
sbox.build()
wc_dir = sbox.wc_dir
wc = sbox.ospath
# Use case 1: local replace, incoming replace
# A/mu
# A/D/H --> A/D/H/chi, A/D/H/{loc,inc}_psi
# Use case 2: local edit, incoming replace
# A/D/gamma
# A/D/G --> A/D/G/pi, A/D/G/inc_rho
# Use case 3: local replace, incoming edit
# A/B/lambda
# A/B/E --> A/B/E/alpha, A/B/E/loc_beta
# Case 1: incoming replacements
sbox.simple_rm('A/mu', 'A/D/H')
file_write(wc('A/mu'), "A fresh file.\n")
os.mkdir(wc('A/D/H'))
file_write(wc('A/D/H/chi'), "A fresh file.\n")
file_write(wc('A/D/H/inc_psi'), "A fresh file.\n")
sbox.simple_add('A/mu', 'A/D/H')
# Case 2: incoming replacements
sbox.simple_rm('A/D/gamma', 'A/D/G')
file_write(wc('A/D/gamma'), "A fresh file.\n")
os.mkdir(wc('A/D/G'))
file_write(wc('A/D/G/pi'), "A fresh file.\n")
file_write(wc('A/D/G/inc_rho'), "A fresh file.\n")
sbox.simple_add('A/D/gamma','A/D/G')
# Case 3: incoming edits
file_append(wc('A/B/lambda'), "Incoming!\n")
file_write(wc('A/B/E/alpha'), "Incoming!.\n")
# Commit and roll back to r1.
sbox.simple_commit()
run_svn(None, 'up', wc_dir, '-r1', '-q')
# Case 1: local replacements
sbox.simple_rm('A/mu', 'A/D/H')
file_write(wc('A/mu'), "A fresh file.\n")
os.mkdir(wc('A/D/H'))
file_write(wc('A/D/H/chi'), "A fresh local file.\n")
file_write(wc('A/D/H/loc_psi'), "A fresh local file.\n")
sbox.simple_add('A/mu', 'A/D/H')
# Case 2: local edits
file_append(wc('A/D/gamma'), "Local change.\n")
file_append(wc('A/D/G/pi'), "Local change.\n")
# Case 3: local replacements
sbox.simple_rm('A/B/lambda', 'A/B/E')
file_write(wc('A/B/lambda'), "A fresh local file.\n")
os.mkdir(wc('A/B/E'))
file_write(wc('A/B/E/alpha'), "A fresh local file.\n")
file_write(wc('A/B/E/loc_beta'), "A fresh local file.\n")
sbox.simple_add('A/B/lambda', 'A/B/E')
# Update and check tree conflict status.
run_svn(None, 'up', wc_dir)
expected_status = svntest.wc.State(wc_dir, {
'' : Item(status=' ', wc_rev=2),
'A' : Item(status=' ', wc_rev=2),
'A/B' : Item(status=' ', wc_rev=2),
'A/B/E' : Item(status='R ', wc_rev=2, treeconflict='C'),
'A/B/E/alpha' : Item(status='A ', wc_rev='-'),
'A/B/E/beta' : Item(status='D ', wc_rev=2),
'A/B/E/loc_beta' : Item(status='A ', wc_rev='-'),
'A/B/F' : Item(status=' ', wc_rev=2),
'A/B/lambda' : Item(status='R ', wc_rev=2, treeconflict='C'),
'A/C' : Item(status=' ', wc_rev=2),
'A/D' : Item(status=' ', wc_rev=2),
'A/D/G' : Item(status='R ', wc_rev='-', copied='+',
treeconflict='C'),
'A/D/G/inc_rho' : Item(status='D ', wc_rev=2),
'A/D/G/pi' : Item(status='M ', wc_rev='-', copied='+'),
'A/D/G/rho' : Item(status=' ', wc_rev='-', copied='+'),
'A/D/G/tau' : Item(status=' ', wc_rev='-', copied='+'),
'A/D/H' : Item(status='R ', wc_rev=2, treeconflict='C'),
'A/D/H/chi' : Item(status='A ', wc_rev='-'),
'A/D/H/inc_psi' : Item(status='D ', wc_rev=2),
'A/D/H/loc_psi' : Item(status='A ', wc_rev='-'),
'A/D/gamma' : Item(status='R ', wc_rev='-', copied='+',
treeconflict='C'),
'A/mu' : Item(status='R ', wc_rev=2, treeconflict='C'),
'iota' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_unquiet_status(wc_dir, expected_status)
def cd_and_status_u(dir_target):
was_cwd = os.getcwd()
os.chdir(os.path.abspath(wc(dir_target)))
run_svn(None, 'status', '-u')
os.chdir(was_cwd)
cd_and_status_u('A')
cd_and_status_u('A/D')
# Until r1102143, the following 'status -u' commands failed with "svn:
# E165004: Two top-level reports with no target".
cd_and_status_u('A/D/G')
cd_and_status_u('A/D/H')
# Revert everything (i.e., accept "theirs-full").
svntest.actions.run_and_verify_revert([
wc('A/B/E'),
wc('A/B/E/alpha'), # incoming & local
wc('A/B/E/beta'),
wc('A/B/E/loc_beta'),
wc('A/B/lambda'),
wc('A/D/G'),
wc('A/D/G/pi'),
wc('A/D/G/inc_rho'), # incoming
wc('A/D/G/rho'),
wc('A/D/G/tau'),
wc('A/D/H'),
wc('A/D/H/chi'),
wc('A/D/H/inc_psi'), # incoming
wc('A/D/H/loc_psi'),
wc('A/D/gamma'),
wc('A/mu'),
], '-R', wc_dir)
# Remove a few unversioned files that revert left behind.
os.remove(wc('A/B/E/loc_beta'))
os.remove(wc('A/D/H/loc_psi'))
# The update operation should have put all incoming items in place.
expected_status = svntest.wc.State(wc_dir, {
'' : Item(status=' ', wc_rev=2),
'A' : Item(status=' ', wc_rev=2),
'A/B' : Item(status=' ', wc_rev=2),
'A/B/E' : Item(status=' ', wc_rev=2),
'A/B/E/alpha' : Item(status=' ', wc_rev=2),
'A/B/E/beta' : Item(status=' ', wc_rev=2),
'A/B/F' : Item(status=' ', wc_rev=2),
'A/B/lambda' : Item(status=' ', wc_rev=2),
'A/C' : Item(status=' ', wc_rev=2),
'A/D' : Item(status=' ', wc_rev=2),
'A/D/G' : Item(status=' ', wc_rev=2),
'A/D/G/inc_rho' : Item(status=' ', wc_rev=2),
'A/D/G/pi' : Item(status=' ', wc_rev=2),
'A/D/H' : Item(status=' ', wc_rev=2),
'A/D/H/chi' : Item(status=' ', wc_rev=2),
'A/D/H/inc_psi' : Item(status=' ', wc_rev=2),
'A/D/gamma' : Item(status=' ', wc_rev=2),
'A/mu' : Item(status=' ', wc_rev=2),
'iota' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_unquiet_status(wc_dir, expected_status)
def create_no_text_change_conflict(sbox):
"create conflict with no text change"
sbox.build()
wc_dir = sbox.wc_dir
shutil.copyfile(sbox.ospath('A/B/E/alpha'), sbox.ospath('A/B/E/alpha-copy'))
svntest.main.file_append(sbox.ospath('A/B/E/alpha'), 'their text\n')
sbox.simple_commit()
sbox.simple_update()
# Update to create a conflict
svntest.main.file_append(sbox.ospath('A/B/E/alpha'), 'my text\n')
svntest.actions.run_and_verify_svn(None, None, [],
'up', '-r1', '--accept', 'postpone',
wc_dir)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/B/E/alpha', status='C ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Reset the text with the file still marked as a conflict
os.remove(sbox.ospath('A/B/E/alpha'))
shutil.move(sbox.ospath('A/B/E/alpha-copy'), sbox.ospath('A/B/E/alpha'))
@Issue(3859)
def revert_no_text_change_conflict(sbox):
"revert conflict with no text change"
create_no_text_change_conflict(sbox)
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None,
["Reverted '%s'\n"
% sbox.ospath('A/B/E/alpha')],
[],
'revert', sbox.ospath('A/B/E/alpha'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@Issue(3859)
def revert_no_text_change_conflict_recursive(sbox):
"revert -R conflict with no text change"
create_no_text_change_conflict(sbox)
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None,
["Reverted '%s'\n"
% sbox.ospath('A/B/E/alpha')],
[],
'revert', '-R', wc_dir)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@Issue(3938)
def revert_with_unversioned_targets(sbox):
"revert with unversioned targets"
sbox.build()
wc_dir = sbox.wc_dir
chi_path = sbox.ospath('A/D/H/chi')
delta_path = sbox.ospath('A/D/H/delta')
psi_path = sbox.ospath('A/D/H/psi')
chi_contents = "modified chi\n"
delta_contents = "This is the unversioned file 'delta'.\n"
psi_contents = "modified psi\n"
# touch delta
open(delta_path, 'w').write(delta_contents)
# modify chi psi
open(chi_path, 'w').write(chi_contents)
open(psi_path, 'w').write(psi_contents)
# revert
expected_output = svntest.verify.UnorderedOutput([
"Reverted '%s'\n" % sbox.ospath('A/D/H/chi'),
"Skipped '%s'\n" % sbox.ospath('A/D/H/delta'),
"Reverted '%s'\n" % sbox.ospath('A/D/H/psi'),
])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'revert', chi_path, delta_path, psi_path)
# verify status
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/D/H/delta': Item(status='? '),
})
svntest.actions.run_and_verify_unquiet_status(wc_dir, expected_status)
# verify disk
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'A/D/H/delta': Item(delta_contents),
})
svntest.actions.verify_disk(wc_dir, expected_disk.old_tree(), True)
def revert_nonexistent(sbox):
'svn revert -R nonexistent'
sbox.build(read_only=True)
svntest.actions.run_and_verify_svn(None, 'Skipped.*nonexistent', [],
'revert', '-R', sbox.ospath('nonexistent'))
@Issue(4168)
def revert_obstructing_wc(sbox):
"revert with an obstructing working copy"
sbox.build(create_wc=False, read_only=True)
wc_dir = sbox.wc_dir
expected_output = svntest.wc.State(wc_dir, {})
expected_disk = svntest.wc.State(wc_dir, {})
# Checkout wc as depth empty
svntest.actions.run_and_verify_checkout(sbox.repo_url, wc_dir,
expected_output, expected_disk,
None, None, None, None,
'--depth', 'empty')
# And create an obstructing working copy as A
svntest.actions.run_and_verify_checkout(sbox.repo_url, wc_dir + '/A',
expected_output, expected_disk,
None, None, None, None,
'--depth', 'empty')
# Now try to fetch the entire wc, which will find an obstruction
expected_output = svntest.wc.State(wc_dir, {
'A' : Item(verb='Skipped'),
'iota' : Item(status='A '),
})
expected_status = svntest.wc.State(wc_dir, {
'' : Item(status=' ', wc_rev='1'),
'iota' : Item(status=' ', wc_rev='1'),
# A is not versioned but exists
})
# Use expected_status.old_tree() to avoid doing an entries comparion
svntest.actions.run_and_verify_update(wc_dir,
expected_output, None,
expected_status.old_tree(),
None, None, None,
None, None, None,
wc_dir, '--set-depth', 'infinity')
# Revert should do nothing (no local changes), and report the obstruction
# (reporting the obstruction is nice for debuging, but not really required
# in this specific case, as the node was not modified)
svntest.actions.run_and_verify_svn(None, "Skipped '.*A' -- .*obstruct.*", [],
'revert', '-R', wc_dir)
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
revert_from_wc_root,
revert_reexpand_keyword,
revert_replaced_file_without_props,
revert_moved_file,
revert_wc_to_wc_replace_with_props,
revert_file_merge_replace_with_history,
revert_repos_to_wc_replace_with_props,
revert_after_second_replace,
revert_after_manual_conflict_resolution__text,
revert_after_manual_conflict_resolution__prop,
revert_propset__dir,
revert_propset__file,
revert_propdel__dir,
revert_propdel__file,
revert_replaced_with_history_file_1,
status_of_missing_dir_after_revert,
status_of_missing_dir_after_revert_replaced_with_history_dir,
revert_replaced_with_history_file_2,
revert_tree_conflicts_in_updated_files,
revert_add_over_not_present_dir,
revert_added_tree,
revert_child_of_copy,
revert_non_recusive_after_delete,
revert_permissions_only,
revert_copy_depth_files,
revert_nested_add_depth_immediates,
revert_empty_actual,
revert_tree_conflicts_with_replacements,
revert_empty_actual_recursive,
revert_no_text_change_conflict,
revert_no_text_change_conflict_recursive,
revert_with_unversioned_targets,
revert_nonexistent,
revert_obstructing_wc,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
|
centic9/subversion-ppa
|
subversion/tests/cmdline/revert_tests.py
|
Python
|
apache-2.0
| 64,202
|
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test suite for XenAPI."""
import ast
import base64
import contextlib
import copy
import functools
import os
import re
import uuid
import mock
from mox3 import mox
from oslo_concurrency import lockutils
from oslo_config import fixture as config_fixture
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
import testtools
from nova.compute import api as compute_api
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.db import fakes as db_fakes
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit import fake_processutils
import nova.tests.unit.image.fake as fake_image
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_aggregate
from nova.tests.unit import utils as test_utils
from nova.tests.unit.virt.xenapi import stubs
from nova.tests import uuidsentinel as uuids
from nova.virt import fake
from nova.virt.xenapi import agent
from nova.virt.xenapi.client import session as xenapi_session
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import host
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
IMAGE_MACHINE = uuids.image_ref
IMAGE_KERNEL = uuids.image_kernel_id
IMAGE_RAMDISK = uuids.image_ramdisk_id
IMAGE_RAW = uuids.image_raw
IMAGE_VHD = uuids.image_vhd
IMAGE_ISO = uuids.image_iso
IMAGE_IPXE_ISO = uuids.image_ipxe_iso
IMAGE_FROM_VOLUME = uuids.image_from_volume
IMAGE_FIXTURES = {
IMAGE_MACHINE: {
'image_meta': {'name': 'fakemachine', 'size': 0,
'disk_format': 'ami',
'container_format': 'ami',
'id': 'fake-image'},
},
IMAGE_KERNEL: {
'image_meta': {'name': 'fakekernel', 'size': 0,
'disk_format': 'aki',
'container_format': 'aki',
'id': 'fake-kernel'},
},
IMAGE_RAMDISK: {
'image_meta': {'name': 'fakeramdisk', 'size': 0,
'disk_format': 'ari',
'container_format': 'ari',
'id': 'fake-ramdisk'},
},
IMAGE_RAW: {
'image_meta': {'name': 'fakeraw', 'size': 0,
'disk_format': 'raw',
'container_format': 'bare',
'id': 'fake-image-raw'},
},
IMAGE_VHD: {
'image_meta': {'name': 'fakevhd', 'size': 0,
'disk_format': 'vhd',
'container_format': 'ovf',
'id': 'fake-image-vhd'},
},
IMAGE_ISO: {
'image_meta': {'name': 'fakeiso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare',
'id': 'fake-image-iso'},
},
IMAGE_IPXE_ISO: {
'image_meta': {'name': 'fake_ipxe_iso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare',
'id': 'fake-image-pxe',
'properties': {'ipxe_boot': 'true'}},
},
IMAGE_FROM_VOLUME: {
'image_meta': {'name': 'fake_ipxe_iso',
'id': 'fake-image-volume',
'properties': {'foo': 'bar'}},
},
}
def get_session():
return xenapi_session.XenAPISession('test_url', 'root', 'test_pass')
def set_image_fixtures():
image_service = fake_image.FakeImageService()
image_service.images.clear()
for image_id, image_meta in IMAGE_FIXTURES.items():
image_meta = image_meta['image_meta']
image_meta['id'] = image_id
image_service.create(None, image_meta)
def get_fake_device_info():
# FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
# can be removed from the dict when LP bug #1087308 is fixed
fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
fake = {'block_device_mapping':
[{'connection_info': {'driver_volume_type': 'iscsi',
'data': {'sr_uuid': 'falseSR',
'introduce_sr_keys': ['sr_type'],
'sr_type': 'iscsi',
'vdi_uuid': fake_vdi_uuid,
'target_discovered': False,
'target_iqn': 'foo_iqn:foo_volid',
'target_portal': 'localhost:3260',
'volume_id': 'foo_volid',
'target_lun': 1,
'auth_password': 'my-p@55w0rd',
'auth_username': 'johndoe',
'auth_method': u'CHAP'}, },
'mount_device': 'vda',
'delete_on_termination': False}, ],
'root_device_name': '/dev/sda',
'ephemerals': [],
'swap': None, }
return fake
def stub_vm_utils_with_vdi_attached(function):
"""vm_utils.with_vdi_attached needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_image_download(*args, **kwargs):
pass
orig_vdi_attached = vm_utils.vdi_attached
orig_image_download = fake_image._FakeImageService.download
try:
vm_utils.vdi_attached = fake_vdi_attached
fake_image._FakeImageService.download = fake_image_download
return function(self, *args, **kwargs)
finally:
fake_image._FakeImageService.download = orig_image_download
vm_utils.vdi_attached = orig_vdi_attached
return decorated_function
def create_instance_with_system_metadata(context, instance_values):
inst = objects.Instance(context=context,
system_metadata={})
for k, v in instance_values.items():
setattr(inst, k, v)
inst.flavor = objects.Flavor.get_by_id(context,
instance_values['instance_type_id'])
inst.old_flavor = None
inst.new_flavor = None
inst.create()
inst.pci_devices = objects.PciDeviceList(objects=[])
return inst
class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.instance = fake_instance.fake_db_instance(name='foo')
@classmethod
def _make_connection_info(cls):
target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
return {'driver_volume_type': 'iscsi',
'data': {'volume_id': 1,
'target_iqn': target_iqn,
'target_portal': '127.0.0.1:3260,fake',
'target_lun': None,
'auth_method': 'CHAP',
'auth_username': 'username',
'auth_password': 'password'}}
def test_attach_volume(self):
# This shows how to test Ops classes' methods.
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
conn_info = self._make_connection_info()
self.assertIsNone(
conn.attach_volume(None, conn_info, self.instance, '/dev/sdc'))
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
# This shows how to test when exceptions are raised.
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(self.instance['name'], 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
None, {'driver_volume_type': 'nonexist'},
self.instance, '/dev/sdc')
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIVMTestCase(stubs.XenAPITestBase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.network = importutils.import_object(CONF.network_manager)
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
db_fakes.stub_out_db_instance_api(self)
xenapi_fake.create_network('fake', 'fake_br1')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stub_out_vm_methods(self.stubs)
fake_processutils.stub_out_processutils_execute(self.stubs)
self.user_id = 'fake'
self.project_id = fakes.FAKE_PROJECT_ID
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn._session.is_local_connection = False
fake_image.stub_out_image_service(self)
set_image_fixtures()
stubs.stubout_image_service_download(self.stubs)
stubs.stubout_stream_disk(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
name_label = "fakenamelabel"
disk_type = "fakedisktype"
virtual_size = 777
return vm_utils.create_vdi(
session, sr_ref, instance, name_label, disk_type,
virtual_size)
self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
def fake_unpause_and_wait(self, vm_ref, instance, power_on):
self._update_last_dom_id(vm_ref)
self.stubs.Set(vmops.VMOps, '_unpause_and_wait',
fake_unpause_and_wait)
def tearDown(self):
fake_image.FakeImageService_reset()
super(XenAPIVMTestCase, self).tearDown()
def test_init_host(self):
session = get_session()
vm = vm_utils._get_this_vm_ref(session)
# Local root disk
vdi0 = xenapi_fake.create_vdi('compute', None)
vbd0 = xenapi_fake.create_vbd(vm, vdi0)
# Instance VDI
vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
other_config={'nova_instance_uuid': 'aaaa'})
xenapi_fake.create_vbd(vm, vdi1)
# Only looks like instance VDI
vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
vbd2 = xenapi_fake.create_vbd(vm, vdi2)
self.conn.init_host(None)
self.assertEqual(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
@mock.patch.object(vm_utils, 'lookup', return_value=True)
def test_instance_exists(self, mock_lookup):
self.stubs.Set(objects.Instance, 'name', 'foo')
instance = objects.Instance(uuid=uuids.instance)
self.assertTrue(self.conn.instance_exists(instance))
mock_lookup.assert_called_once_with(mock.ANY, 'foo')
@mock.patch.object(vm_utils, 'lookup', return_value=None)
def test_instance_not_exists(self, mock_lookup):
self.stubs.Set(objects.Instance, 'name', 'bar')
instance = objects.Instance(uuid=uuids.instance)
self.assertFalse(self.conn.instance_exists(instance))
mock_lookup.assert_called_once_with(mock.ANY, 'bar')
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEqual(instances, [])
def test_list_instance_uuids_0(self):
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(instance_uuids, [])
def test_list_instance_uuids(self):
uuids = []
for x in range(1, 4):
instance = self._create_instance()
uuids.append(instance['uuid'])
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(len(uuids), len(instance_uuids))
self.assertEqual(set(uuids), set(instance_uuids))
def test_get_rrd_server(self):
self.flags(connection_url='myscheme://myaddress/',
group='xenserver')
server_info = vm_utils._get_rrd_server()
self.assertEqual(server_info[0], 'myscheme')
self.assertEqual(server_info[1], 'myaddress')
expected_raw_diagnostics = {
'vbd_xvdb_write': '0.0',
'memory_target': '4294967296.0000',
'memory_internal_free': '1415564.0000',
'memory': '4294967296.0000',
'vbd_xvda_write': '0.0',
'cpu0': '0.0042',
'vif_0_tx': '287.4134',
'vbd_xvda_read': '0.0',
'vif_0_rx': '1816.0144',
'vif_2_rx': '0.0',
'vif_2_tx': '0.0',
'vbd_xvdb_read': '0.0',
'last_update': '1328795567',
}
def test_get_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
expected = self.expected_raw_diagnostics
instance = self._create_instance()
actual = self.conn.get_diagnostics(instance)
self.assertThat(actual, matchers.DictMatches(expected))
def test_get_instance_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
expected = {
'config_drive': False,
'state': 'running',
'driver': 'xenapi',
'version': '1.0',
'uptime': 0,
'hypervisor_os': None,
'cpu_details': [{'time': 0}, {'time': 0},
{'time': 0}, {'time': 0}],
'nic_details': [{'mac_address': '00:00:00:00:00:00',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 0,
'rx_packets': 0,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 0,
'read_requests': 0,
'write_bytes': 0,
'write_requests': 0}],
'memory_details': {'maximum': 8192, 'used': 0}}
instance = self._create_instance(obj=True)
actual = self.conn.get_instance_diagnostics(instance)
self.assertEqual(expected, actual.serialize())
def test_get_vnc_console(self):
instance = self._create_instance(obj=True)
session = get_session()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(session, instance['name'])
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We don't care about session id in test
# they will always differ so strip that out
actual_path = console.internal_access_path.split('&')[0]
expected_path = "/console?ref=%s" % str(vm_ref)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_for_rescue(self):
instance = self._create_instance(obj=True)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
'Running')
# Set instance state to rescued
instance['vm_state'] = 'rescued'
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We don't care about session id in test
# they will always differ so strip that out
actual_path = console.internal_access_path.split('&')[0]
expected_path = "/console?ref=%s" % str(rescue_vm)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_instance_not_ready(self):
instance = self._create_instance(obj=True, spawn=False)
instance.vm_state = 'building'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotFound,
conn.get_vnc_console, self.context, instance)
def test_get_vnc_console_rescue_not_ready(self):
instance = self._create_instance(obj=True, spawn=False)
instance.vm_state = 'rescued'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotReady,
conn.get_vnc_console, self.context, instance)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=False,
osvol=False):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
image_id = "my_snapshot_id"
self.assertRaises(exception.NovaException, self.conn.snapshot,
self.context, instance, image_id,
lambda *args, **kwargs: None)
def test_instance_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
image_id = "my_snapshot_id"
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
self.fake_upload_called = False
def fake_image_upload(_self, ctx, session, inst, img_id, vdi_uuids):
self.fake_upload_called = True
self.assertEqual(ctx, self.context)
self.assertEqual(inst, instance)
self.assertIsInstance(vdi_uuids, list)
self.assertEqual(img_id, image_id)
self.stubs.Set(glance.GlanceStore, 'upload_image',
fake_image_upload)
self.conn.snapshot(self.context, instance, image_id,
func_call_matcher.call)
# Ensure VM was torn down
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEqual(vm_labels, [instance['name']])
# Ensure VBDs were torn down
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEqual(vbd_labels, [instance['name']])
# Ensure task states changed in correct order
self.assertIsNone(func_call_matcher.match())
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assertFalse(name_label.endswith('snapshot'))
self.assertTrue(self.fake_upload_called)
def create_vm_record(self, conn, os_type, name):
instances = conn.list_instances()
self.assertEqual(instances, [name])
# Get Nova record for VM
vm_info = conn.get_info({'name': name})
# Get XenAPI record for VM
vms = [rec for ref, rec
in six.iteritems(xenapi_fake.get_all_records('VM'))
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn, instance_type_id, check_injection):
flavor = objects.Flavor.get_by_id(self.context, instance_type_id)
mem_kib = int(flavor['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = flavor['vcpus']
vcpu_weight = flavor['vcpu_weight']
self.assertEqual(self.vm_info.max_mem_kb, mem_kib)
self.assertEqual(self.vm_info.mem_kb, mem_kib)
self.assertEqual(self.vm['memory_static_max'], mem_bytes)
self.assertEqual(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEqual(self.vm['VCPUs_max'], str(vcpus))
self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus))
if vcpu_weight is None:
self.assertEqual(self.vm['VCPUs_params'], {})
else:
self.assertEqual(self.vm['VCPUs_params'],
{'weight': str(vcpu_weight), 'cap': '0'})
# Check that the VM is running according to Nova
self.assertEqual(self.vm_info.state, power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEqual(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
self.assertNotIn('vm-data/hostname', xenstore_data)
key = 'vm-data/networking/DEADBEEF0001'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertJsonEqual({'broadcast': '192.168.1.255',
'dns': ['192.168.1.4', '192.168.1.3'],
'gateway': '192.168.1.1',
'gateway_v6': '2001:db8:0:1::1',
'ip6s': [{'enabled': '1',
'ip': '2001:db8:0:1:dcad:beff:feef:1',
'netmask': 64,
'gateway': '2001:db8:0:1::1'}],
'ips': [{'enabled': '1',
'ip': '192.168.1.100',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'},
{'enabled': '1',
'ip': '192.168.1.101',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'}],
'label': 'test1',
'mac': 'DE:AD:BE:EF:00:01'}, tcpip_data)
def check_vm_params_for_windows(self):
self.assertEqual(self.vm['platform']['nx'], 'true')
self.assertEqual(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEqual(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEqual(self.vm['PV_args'], '')
self.assertEqual(self.vm['PV_bootloader'], '')
self.assertEqual(self.vm['PV_kernel'], '')
self.assertEqual(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEqual(self.vm['platform']['nx'], 'false')
self.assertEqual(self.vm['PV_args'], '')
self.assertEqual(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEqual(self.vm['PV_kernel'], '')
self.assertEqual(self.vm['PV_ramdisk'], '')
self.assertEqual(self.vm['HVM_boot_params'], {})
self.assertEqual(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEqual(self.vm['platform']['nx'], 'false')
self.assertEqual(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEqual(self.vm['PV_kernel'], '')
self.assertNotEqual(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEqual(self.vm['HVM_boot_params'], {})
self.assertEqual(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
session = get_session()
return session.call_xenapi('VDI.get_all')
def _list_vms(self):
session = get_session()
return session.call_xenapi('VM.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if vdi_ref not in start_list:
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
# If the cache is turned on then the base disk will be
# there even after the cleanup
if 'other_config' in vdi_rec:
if 'image-id' not in vdi_rec['other_config']:
self.fail('Found unexpected VDI:%s' % vdi_ref)
else:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
injected_files=None, check_injection=False,
create_record=True, empty_dns=False,
block_device_info=None,
key_data=None):
if injected_files is None:
injected_files = []
# Fake out inject_instance_metadata
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
if create_record:
flavor = objects.Flavor.get_by_id(self.context,
instance_type_id)
instance = objects.Instance(context=self.context)
instance.project_id = self.project_id
instance.user_id = self.user_id
instance.image_ref = image_ref
instance.kernel_id = kernel_id
instance.ramdisk_id = ramdisk_id
instance.root_gb = flavor.root_gb
instance.ephemeral_gb = flavor.ephemeral_gb
instance.instance_type_id = instance_type_id
instance.os_type = os_type
instance.hostname = hostname
instance.key_data = key_data
instance.architecture = architecture
instance.system_metadata = {}
instance.flavor = flavor
instance.create()
else:
instance = objects.Instance.get_by_id(self.context, instance_id,
expected_attrs=['flavor'])
network_info = fake_network.fake_get_instance_nw_info(self)
if empty_dns:
# NOTE(tr3buchet): this is a terrible way to do this...
network_info[0]['network']['subnets'][0]['dns'] = []
image_meta = objects.ImageMeta.from_dict(
IMAGE_FIXTURES[image_ref]["image_meta"])
self.conn.spawn(self.context, instance, image_meta, injected_files,
'herp', network_info, block_device_info)
self.create_vm_record(self.conn, os_type, instance['name'])
self.check_vm_record(self.conn, instance_type_id, check_injection)
self.assertEqual(instance['os_type'], os_type)
self.assertEqual(instance['architecture'], architecture)
def test_spawn_ipxe_iso_success(self):
self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
vm_utils.get_sr_path(mox.IgnoreArg()).AndReturn('/sr/path')
self.flags(ipxe_network_name='test1',
ipxe_boot_menu_url='http://boot.example.com',
ipxe_mkisofs_cmd='/root/mkisofs',
group='xenserver')
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized(
'ipxe.py', 'inject', '/sr/path', mox.IgnoreArg(),
'http://boot.example.com', '192.168.1.100', '255.255.255.0',
'192.168.1.1', '192.168.1.3', '/root/mkisofs')
self.conn._session.call_plugin_serialized('partition_utils.py',
'make_partition',
'fakedev', '2048', '-')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_no_network_name(self):
self.flags(ipxe_network_name=None,
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# ipxe inject shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized('partition_utils.py',
'make_partition',
'fakedev', '2048', '-')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_no_boot_menu_url(self):
self.flags(ipxe_network_name='test1',
ipxe_boot_menu_url=None,
group='xenserver')
# ipxe inject shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized('partition_utils.py',
'make_partition',
'fakedev', '2048', '-')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_unknown_network_name(self):
self.flags(ipxe_network_name='test2',
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# ipxe inject shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized('partition_utils.py',
'make_partition',
'fakedev', '2048', '-')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_empty_dns(self):
# Test spawning with an empty dns list.
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
self.assertRaises(exception.InsufficientFreeMemory, self._test_spawn,
IMAGE_MACHINE, IMAGE_KERNEL,
IMAGE_RAMDISK, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
self.assertRaises(xenapi_fake.Failure, self._test_spawn,
IMAGE_MACHINE, IMAGE_KERNEL, IMAGE_RAMDISK)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure, self._test_spawn,
IMAGE_MACHINE, IMAGE_KERNEL, IMAGE_RAMDISK)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_3(self):
"""Simulates an error while attaching disks.
Verifies that the VM and VDIs created are properly cleaned up.
"""
stubs.stubout_attach_disks(self.stubs)
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
self.assertRaises(xenapi_fake.Failure, self._test_spawn,
IMAGE_MACHINE, IMAGE_KERNEL, IMAGE_RAMDISK)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_raw_glance(self):
self._test_spawn(IMAGE_RAW, None, None, os_type=None)
self.check_vm_params_for_windows()
def test_spawn_vhd_glance_linux(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_windows(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="windows", architecture="i386",
instance_type_id=5)
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
def fake_fetch_disk_image(context, session, instance, name_label,
image_id, image_type):
sr_ref = vm_utils.safe_find_sr(session)
image_type_str = vm_utils.ImageType.to_string(image_type)
vdi_ref = vm_utils.create_vdi(session, sr_ref, instance,
name_label, image_type_str, "20")
vdi_role = vm_utils.ImageType.get_role(image_type)
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
self.stubs.Set(vm_utils, '_fetch_disk_image',
fake_fetch_disk_image)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_boot_from_volume_no_glance_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(IMAGE_FROM_VOLUME, None, None,
block_device_info=dev_info)
def test_spawn_boot_from_volume_with_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(IMAGE_VHD, None, None,
block_device_info=dev_info)
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
actual = kwargs.get('process_input', None)
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether DE:AD:BE:EF:00:01
address 192.168.1.100
netmask 255.255.255.0
broadcast 192.168.1.255
gateway 192.168.1.1
dns-nameservers 192.168.1.3 192.168.1.4
iface eth0 inet6 static
hwaddress ether DE:AD:BE:EF:00:01
address 2001:db8:0:1:dcad:beff:feef:1
netmask 64
gateway 2001:db8:0:1::1
"""
self.assertEqual(expected, actual)
self._tee_executed = True
return '', ''
def _readlink_handler(cmd_parts, **kwargs):
return os.path.realpath(cmd_parts[2]), ''
fake_processutils.fake_execute_set_repliers([
# Capture the tee .../etc/network/interfaces command
(r'tee.*interfaces', _tee_handler),
(r'readlink -nm.*', _readlink_handler),
])
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_spawn_netinject_xenstore(self):
db_fakes.stub_out_db_instance_api(self, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug('Creating files in %s to simulate guest agent',
self._tmpdir)
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normally make files in the mounted filesystem
# disappear, so do that here
LOG.debug('Removing simulated guest agent files in %s',
self._tmpdir)
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_processutils.fake_execute_set_repliers([
(r'mount', _mount_handler),
(r'umount', _umount_handler),
(r'tee.*interfaces', _tee_handler)])
self._test_spawn(IMAGE_MACHINE, IMAGE_KERNEL,
IMAGE_RAMDISK, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_injects_auto_disk_config_to_xenstore(self):
instance = self._create_instance(spawn=False, obj=True)
self.mox.StubOutWithMock(self.conn._vmops, '_inject_auto_disk_config')
self.conn._vmops._inject_auto_disk_config(instance, mox.IgnoreArg())
self.mox.ReplayAll()
image_meta = objects.ImageMeta.from_dict(
IMAGE_FIXTURES[IMAGE_MACHINE]["image_meta"])
self.conn.spawn(self.context, instance, image_meta, [], 'herp', '')
def test_spawn_vlanmanager(self):
self.flags(network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
inst2 = self._create_instance(False, obj=True)
networks = self.network.db.network_get_all(ctxt)
with mock.patch('nova.objects.network.Network._from_db_object'):
for network in networks:
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt,
instance_id=inst2.id,
instance_uuid=inst2.uuid,
host=CONF.host,
vpn=None,
rxtx_factor=3,
project_id=self.project_id,
macs=None)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
instance_id=inst2.id,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEqual(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEqual(vif_rec['qos_algorithm_params']['kbps'],
str(3 * 10 * 1024))
def test_spawn_ssh_key_injection(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
self.assertEqual("ssh-rsa fake_keydata", sshkey)
return "fake"
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nssh-rsa fake_keydata\n')
injected_files = [('/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-rsa fake_keydata')
self.assertEqual(actual_injected_files, injected_files)
def test_spawn_ssh_key_injection_non_rsa(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
raise NotImplementedError("Should not be called")
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nssh-dsa fake_keydata\n')
injected_files = [('/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-dsa fake_keydata')
self.assertEqual(actual_injected_files, injected_files)
def test_spawn_injected_files(self):
# Test spawning with injected_files.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
injected_files = [('/tmp/foo', 'foobar')]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
injected_files=injected_files)
self.check_vm_params_for_linux()
self.assertEqual(actual_injected_files, injected_files)
@mock.patch('nova.db.agent_build_get_by_triple')
def test_spawn_agent_upgrade(self, mock_get):
self.flags(use_agent_default=True,
group='xenserver')
mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf",
'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': False,
'id': 1}
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
@mock.patch('nova.db.agent_build_get_by_triple')
def test_spawn_agent_upgrade_fails_silently(self, mock_get):
mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf",
'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': False,
'id': 1}
self._test_spawn_fails_silently_with(exception.AgentError,
method="_plugin_agent_agentupdate", failure="fake_error")
def test_spawn_with_resetnetwork_alternative_returncode(self):
self.flags(use_agent_default=True,
group='xenserver')
def fake_resetnetwork(self, method, args):
fake_resetnetwork.called = True
# NOTE(johngarbutt): as returned by FreeBSD and Gentoo
return jsonutils.dumps({'returncode': '500',
'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_resetnetwork', fake_resetnetwork)
fake_resetnetwork.called = False
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.assertTrue(fake_resetnetwork.called)
def _test_spawn_fails_silently_with(self, expected_exception_cls,
method="_plugin_agent_version",
failure=None, value=None):
self.flags(use_agent_default=True,
agent_version_timeout=0,
group='xenserver')
def fake_agent_call(self, method, args):
if failure:
raise xenapi_fake.Failure([failure])
else:
return value
self.stubs.Set(stubs.FakeSessionForVMTests,
method, fake_agent_call)
called = {}
def fake_add_instance_fault(*args, **kwargs):
called["fake_add_instance_fault"] = args[2]
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
fake_add_instance_fault)
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
actual_exception = called["fake_add_instance_fault"]
self.assertIsInstance(actual_exception, expected_exception_cls)
def test_spawn_fails_silently_with_agent_timeout(self):
self._test_spawn_fails_silently_with(exception.AgentTimeout,
failure="TIMEOUT:fake")
def test_spawn_fails_silently_with_agent_not_implemented(self):
self._test_spawn_fails_silently_with(exception.AgentNotImplemented,
failure="NOT IMPLEMENTED:fake")
def test_spawn_fails_silently_with_agent_error(self):
self._test_spawn_fails_silently_with(exception.AgentError,
failure="fake_error")
def test_spawn_fails_silently_with_agent_bad_return(self):
error = jsonutils.dumps({'returncode': -1, 'message': 'fake'})
self._test_spawn_fails_silently_with(exception.AgentError,
value=error)
def test_spawn_sets_last_dom_id(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.assertEqual(self.vm['domid'],
self.vm['other_config']['last_dom_id'])
def test_rescue(self):
instance = self._create_instance(spawn=False, obj=True)
xenapi_fake.create_vm(instance['name'], 'Running')
session = get_session()
vm_ref = vm_utils.lookup(session, instance['name'])
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
root_vdi_ref = xenapi_fake.create_vdi('root', None)
eph1_vdi_ref = xenapi_fake.create_vdi('eph', None)
eph2_vdi_ref = xenapi_fake.create_vdi('eph', None)
vol_vdi_ref = xenapi_fake.create_vdi('volume', None)
xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=2)
xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
xenapi_fake.create_vbd(vm_ref, eph1_vdi_ref, userdevice=4)
xenapi_fake.create_vbd(vm_ref, eph2_vdi_ref, userdevice=5)
xenapi_fake.create_vbd(vm_ref, vol_vdi_ref, userdevice=6,
other_config={'osvol': True})
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict(
{'id': IMAGE_VHD,
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}})
conn.rescue(self.context, instance, [], image_meta, '')
vm = xenapi_fake.get_record('VM', vm_ref)
rescue_name = "%s-rescue" % vm["name_label"]
rescue_ref = vm_utils.lookup(session, rescue_name)
rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
vdi_refs = {}
for vbd_ref in rescue_vm['VBDs']:
vbd = xenapi_fake.get_record('VBD', vbd_ref)
vdi_refs[vbd['VDI']] = vbd['userdevice']
self.assertEqual('1', vdi_refs[root_vdi_ref])
self.assertEqual('2', vdi_refs[swap_vdi_ref])
self.assertEqual('4', vdi_refs[eph1_vdi_ref])
self.assertEqual('5', vdi_refs[eph2_vdi_ref])
self.assertNotIn(vol_vdi_ref, vdi_refs)
def test_rescue_preserve_disk_on_failure(self):
# test that the original disk is preserved if rescue setup fails
# bug #1227898
instance = self._create_instance(obj=True)
session = get_session()
image_meta = objects.ImageMeta.from_dict(
{'id': IMAGE_VHD,
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}})
vm_ref = vm_utils.lookup(session, instance['name'])
vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
# raise an error in the spawn setup process and trigger the
# undo manager logic:
def fake_start(*args, **kwargs):
raise test.TestingException('Start Error')
self.stubs.Set(self.conn._vmops, '_start', fake_start)
self.assertRaises(test.TestingException, self.conn.rescue,
self.context, instance, [], image_meta, '')
# confirm original disk still exists:
vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
self.assertEqual(vdi_ref, vdi_ref2)
self.assertEqual(vdi_rec['uuid'], vdi_rec2['uuid'])
def test_unrescue(self):
instance = self._create_instance(obj=True)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running')
conn.unrescue(instance, None)
def test_unrescue_not_in_rescue(self):
instance = self._create_instance(obj=True)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
def test_finish_revert_migration(self):
instance = self._create_instance()
class VMOpsMock(object):
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, context, instance, block_info,
power_on):
self.finish_revert_migration_called = True
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(self.context, instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def test_reboot_hard(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "HARD")
def test_poll_rebooting_instances(self):
self.mox.StubOutWithMock(compute_api.API, 'reboot')
compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
instance = self._create_instance()
instances = [instance]
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.poll_rebooting_instances(60, instances)
def test_reboot_soft(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "SOFT")
def test_reboot_halted(self):
session = get_session()
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Halted')
conn.reboot(self.context, instance, None, "SOFT")
vm_ref = vm_utils.lookup(session, instance['name'])
vm = xenapi_fake.get_record('VM', vm_ref)
self.assertEqual(vm['power_state'], 'Running')
def test_reboot_unknown_state(self):
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Unknown')
self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context,
instance, None, "SOFT")
def test_reboot_rescued(self):
instance = self._create_instance()
instance['vm_state'] = vm_states.RESCUED
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
real_result = vm_utils.lookup(conn._session, instance['name'])
with mock.patch.object(vm_utils, 'lookup',
return_value=real_result) as mock_lookup:
conn.reboot(self.context, instance, None, "SOFT")
mock_lookup.assert_called_once_with(conn._session,
instance['name'], True)
def test_get_console_output_succeeds(self):
def fake_get_console_output(instance):
self.assertEqual("instance", instance)
return "console_log"
self.stubs.Set(self.conn._vmops, 'get_console_output',
fake_get_console_output)
self.assertEqual(self.conn.get_console_output('context', "instance"),
"console_log")
def _test_maintenance_mode(self, find_host, find_aggregate):
real_call_xenapi = self.conn._session.call_xenapi
instance = self._create_instance(spawn=True)
api_calls = {}
# Record all the xenapi calls, and return a fake list of hosts
# for the host.get_all call
def fake_call_xenapi(method, *args):
api_calls[method] = args
if method == 'host.get_all':
return ['foo', 'bar', 'baz']
return real_call_xenapi(method, *args)
self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
def fake_aggregate_get(context, host, key):
if find_aggregate:
return [test_aggregate.fake_aggregate]
else:
return []
self.stub_out('nova.db.aggregate_get_by_host',
fake_aggregate_get)
def fake_host_find(context, session, src, dst):
if find_host:
return 'bar'
else:
raise exception.NoValidHost("I saw this one coming...")
self.stubs.Set(host, '_host_find', fake_host_find)
result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
self.assertEqual(result, 'on_maintenance')
# We expect the VM.pool_migrate call to have been called to
# migrate our instance to the 'bar' host
vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
host_ref = "foo"
expected = (vm_ref, host_ref, {"live": "true"})
self.assertEqual(api_calls.get('VM.pool_migrate'), expected)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], task_states.MIGRATING)
def test_maintenance_mode(self):
self._test_maintenance_mode(True, True)
def test_maintenance_mode_no_host(self):
self.assertRaises(exception.NoValidHost,
self._test_maintenance_mode, False, True)
def test_maintenance_mode_no_aggregate(self):
self.assertRaises(exception.NotFound,
self._test_maintenance_mode, True, False)
def test_uuid_find(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
fake_inst = fake_instance.fake_db_instance(id=123)
fake_inst2 = fake_instance.fake_db_instance(id=456)
db.instance_get_all_by_host(self.context, fake_inst['host'],
columns_to_join=None
).AndReturn([fake_inst, fake_inst2])
self.mox.ReplayAll()
expected_name = CONF.instance_name_template % fake_inst['id']
inst_uuid = host._uuid_find(self.context, fake_inst['host'],
expected_name)
self.assertEqual(inst_uuid, fake_inst['uuid'])
def test_session_virtapi(self):
was = {'called': False}
def fake_aggregate_get_by_host(self, *args, **kwargs):
was['called'] = True
raise test.TestingException()
self.stub_out("nova.db.aggregate_get_by_host",
fake_aggregate_get_by_host)
self.stubs.Set(self.conn._session, "is_slave", True)
self.assertRaises(test.TestingException,
self.conn._session._get_host_uuid)
self.assertTrue(was['called'])
def test_session_handles_aggregate_metadata(self):
def fake_aggregate_get(context, host, key):
agg = copy.copy(test_aggregate.fake_aggregate)
agg['metadetails'][CONF.host] = 'this_should_be_metadata'
return [agg]
self.stub_out('nova.db.aggregate_get_by_host',
fake_aggregate_get)
self.stubs.Set(self.conn._session, "is_slave", True)
self.assertEqual('this_should_be_metadata',
self.conn._session._get_host_uuid())
def test_per_instance_usage_running(self):
instance = self._create_instance(spawn=True)
flavor = objects.Flavor.get_by_id(self.context, 3)
expected = {instance['uuid']: {'memory_mb': flavor['memory_mb'],
'uuid': instance['uuid']}}
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
# Paused instances still consume resources:
self.conn.pause(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
def test_per_instance_usage_suspended(self):
# Suspended instances do not consume memory:
instance = self._create_instance(spawn=True)
self.conn.suspend(self.context, instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def test_per_instance_usage_halted(self):
instance = self._create_instance(spawn=True, obj=True)
self.conn.power_off(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def _create_instance(self, spawn=True, obj=False, **attrs):
"""Creates and spawns a test instance."""
instance_values = {
'uuid': str(uuid.uuid4()),
'display_name': 'host-',
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': IMAGE_MACHINE,
'kernel_id': IMAGE_KERNEL,
'ramdisk_id': IMAGE_RAMDISK,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'vm_mode': 'hvm',
'architecture': 'x86-64'}
instance_values.update(attrs)
instance = create_instance_with_system_metadata(self.context,
instance_values)
network_info = fake_network.fake_get_instance_nw_info(self)
image_meta = objects.ImageMeta.from_dict(
{'id': uuids.image_id,
'disk_format': 'vhd'})
if spawn:
self.conn.spawn(self.context, instance, image_meta, [], 'herp',
network_info)
if obj:
return instance
return base.obj_to_primitive(instance)
def test_destroy_clean_up_kernel_and_ramdisk(self):
def fake_lookup_kernel_ramdisk(session, vm_ref):
return "kernel", "ramdisk"
self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
fake_lookup_kernel_ramdisk)
def fake_destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
fake_destroy_kernel_ramdisk.called = True
self.assertEqual("kernel", kernel)
self.assertEqual("ramdisk", ramdisk)
fake_destroy_kernel_ramdisk.called = False
self.stubs.Set(vm_utils, "destroy_kernel_ramdisk",
fake_destroy_kernel_ramdisk)
instance = self._create_instance(spawn=True, obj=True)
network_info = fake_network.fake_get_instance_nw_info(self)
self.conn.destroy(self.context, instance, network_info)
vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
self.assertIsNone(vm_ref)
self.assertTrue(fake_destroy_kernel_ramdisk.called)
class XenAPIDiffieHellmanTestCase(test.NoDBTestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = agent.SimpleDH()
self.bob = agent.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEqual(alice_shared, bob_shared)
def _test_encryption(self, message):
enc = self.alice.encrypt(message)
self.assertFalse(enc.endswith('\n'))
dec = self.bob.decrypt(enc)
self.assertEqual(dec, message)
def test_encrypt_simple_message(self):
self._test_encryption('This is a simple message.')
def test_encrypt_message_with_newlines_at_end(self):
self._test_encryption('This message has a newline at the end.\n')
def test_encrypt_many_newlines_at_end(self):
self._test_encryption('Message with lotsa newlines.\n\n\n')
def test_encrypt_newlines_inside_message(self):
self._test_encryption('Message\nwith\ninterior\nnewlines.')
def test_encrypt_with_leading_newlines(self):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in range(1024)]))
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIMigrateInstance(stubs.XenAPITestBase):
"""Unit test for verifying migration-related actions."""
REQUIRES_LOCKING = True
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self)
xenapi_fake.create_network('fake', 'fake_br1')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance_values = {
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': IMAGE_MACHINE,
'kernel_id': None,
'ramdisk_id': None,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
migration_values = {
'source_compute': 'nova-compute',
'dest_compute': 'nova-compute',
'dest_host': '10.127.5.114',
'status': 'post-migrating',
'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
'old_instance_type_id': 5,
'new_instance_type_id': 1
}
self.migration = db.migration_create(
context.get_admin_context(), migration_values)
fake_processutils.stub_out_processutils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
def fake_unpause_and_wait(self, vm_ref, instance, power_on):
pass
self.stubs.Set(vmops.VMOps, '_unpause_and_wait',
fake_unpause_and_wait)
def _create_instance(self, **kw):
values = self.instance_values.copy()
values.update(kw)
instance = objects.Instance(context=self.context, **values)
instance.flavor = objects.Flavor(root_gb=80,
ephemeral_gb=0)
instance.create()
return instance
def test_migrate_disk_and_power_off(self):
instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80,
ephemeral_gb=0)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(conn._session, instance['name'])
self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume')
volume_utils.is_booted_from_volume(conn._session, vm_ref)
self.mox.ReplayAll()
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80,
ephemeral_gb=0)
def fake_raise(*args, **kwargs):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_disk_resizing_up", fake_raise)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self):
instance = self._create_instance()
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0,
ephemeral_gb=0)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ResizeError,
conn.migrate_disk_and_power_off,
self.context, instance,
'fake_dest', flavor, None)
def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works(self):
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0,
ephemeral_gb=0)
instance = self._create_instance(root_gb=0, ephemeral_gb=0)
instance.flavor.root_gb = 0
instance.flavor.ephemeral_gb = 0
xenapi_fake.create_vm(instance['name'], 'Running')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(conn._session, instance['name'])
self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume')
volume_utils.is_booted_from_volume(conn._session, vm_ref)
self.mox.ReplayAll()
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', flavor, None)
def _test_revert_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
context = 'fake_context'
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_finish_revert_migration(*args, **kwargs):
self.fake_finish_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
fake_finish_revert_migration)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self)
image_meta = objects.ImageMeta.from_dict(
{'id': instance['image_ref'], 'disk_format': 'vhd'})
base = xenapi_fake.create_vdi('hurr', 'fake')
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
cow = xenapi_fake.create_vdi('durr', 'fake')
cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy=base_uuid, cow=cow_uuid),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertTrue(self.called)
self.assertEqual(self.fake_vm_start_called, power_on)
conn.finish_revert_migration(context, instance, network_info)
self.assertTrue(self.fake_finish_revert_migration_called)
def test_revert_migrate_power_on(self):
self._test_revert_migrate(True)
def test_revert_migrate_power_off(self):
self._test_revert_migrate(False)
def _test_finish_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self)
image_meta = objects.ImageMeta.from_dict(
{'id': instance['image_ref'], 'disk_format': 'vhd'})
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertTrue(self.called)
self.assertEqual(self.fake_vm_start_called, power_on)
def test_finish_migrate_power_on(self):
self._test_finish_migrate(True)
def test_finish_migrate_power_off(self):
self._test_finish_migrate(False)
def test_finish_migrate_no_local_storage(self):
values = copy.copy(self.instance_values)
values["root_gb"] = 0
values["ephemeral_gb"] = 0
instance = create_instance_with_system_metadata(self.context, values)
instance.flavor.root_gb = 0
instance.flavor.ephemeral_gb = 0
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self)
image_meta = objects.ImageMeta.from_dict(
{'id': instance['image_ref'], 'disk_format': 'vhd'})
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self)
# Resize instance would be determined by the compute call
image_meta = objects.ImageMeta.from_dict(
{'id': instance['image_ref'], 'disk_format': 'vhd'})
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
@stub_vm_utils_with_vdi_attached
def test_migrate_too_many_partitions_no_resize_down(self):
instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, 4, "", ""), (1, 2, 3, 4, "", "")]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
@stub_vm_utils_with_vdi_attached
def test_migrate_bad_fs_type_no_resize_down(self):
instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, "ext2", "", "boot")]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_rollback_when_resize_down_fs_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
self.mox.StubOutWithMock(vm_utils, 'resize_disk')
self.mox.StubOutWithMock(vm_utils, 'migrate_vhd')
self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely')
self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan')
instance = objects.Instance(context=self.context,
auto_disk_config=True,
uuid=uuids.instance)
instance.obj_reset_changes()
vm_ref = "vm_ref"
dest = "dest"
flavor = "type"
sr_path = "sr_path"
vmops._resize_ensure_vm_is_shutdown(instance, vm_ref)
vmops._apply_orig_vm_name_label(instance, vm_ref)
old_vdi_ref = "old_ref"
vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn(
(old_vdi_ref, None))
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
vm_utils.resize_disk(vmops._session, instance, old_vdi_ref,
flavor).AndReturn((new_vdi_ref, new_vdi_uuid))
vm_utils.migrate_vhd(vmops._session, instance, new_vdi_uuid, dest,
sr_path, 0).AndRaise(
exception.ResizeError(reason="asdf"))
vm_utils.destroy_vdi(vmops._session, new_vdi_ref)
vmops._restore_orig_vm_and_cleanup_orphan(instance)
self.mox.ReplayAll()
with mock.patch.object(instance, 'save') as mock_save:
self.assertRaises(exception.InstanceFaultRollback,
vmops._migrate_disk_resizing_down, self.context,
instance, dest, flavor, vm_ref, sr_path)
self.assertEqual(3, mock_save.call_count)
self.assertEqual(60.0, instance.progress)
def test_resize_ensure_vm_is_shutdown_cleanly(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_forced(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ResizeError,
vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_already_shutdown(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
class XenAPIImageTypeTestCase(test.NoDBTestCase):
"""Test ImageType class."""
def test_to_string(self):
# Can convert from type id to type string.
self.assertEqual(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def _assert_role(self, expected_role, image_type_id):
self.assertEqual(
expected_role,
vm_utils.ImageType.get_role(image_type_id))
def test_get_image_role_kernel(self):
self._assert_role('kernel', vm_utils.ImageType.KERNEL)
def test_get_image_role_ramdisk(self):
self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
def test_get_image_role_disk(self):
self._assert_role('root', vm_utils.ImageType.DISK)
def test_get_image_role_disk_raw(self):
self._assert_role('root', vm_utils.ImageType.DISK_RAW)
def test_get_image_role_disk_vhd(self):
self._assert_role('root', vm_utils.ImageType.DISK_VHD)
class XenAPIDetermineDiskImageTestCase(test.NoDBTestCase):
"""Unit tests for code that detects the ImageType."""
def assert_disk_type(self, image_meta, expected_disk_type):
actual = vm_utils.determine_disk_image_type(image_meta)
self.assertEqual(expected_disk_type, actual)
def test_machine(self):
image_meta = objects.ImageMeta.from_dict(
{'disk_format': 'ami'})
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
def test_raw(self):
image_meta = objects.ImageMeta.from_dict(
{'disk_format': 'raw'})
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
def test_vhd(self):
image_meta = objects.ImageMeta.from_dict(
{'disk_format': 'vhd'})
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIHostTestCase(stubs.XenAPITestBase):
"""Tests HostState, which holds metrics from XenServer that get
reported back to the Schedulers.
"""
def setUp(self):
super(XenAPIHostTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.instance = fake_instance.fake_db_instance(name='foo')
def test_host_state(self):
stats = self.conn.host_state.get_host_stats(False)
# Values from fake.create_local_srs (ext SR)
self.assertEqual(stats['disk_total'], 40000)
self.assertEqual(stats['disk_used'], 20000)
# Values from fake._plugin_xenhost_host_data
self.assertEqual(stats['host_memory_total'], 10)
self.assertEqual(stats['host_memory_overhead'], 20)
self.assertEqual(stats['host_memory_free'], 30)
self.assertEqual(stats['host_memory_free_computed'], 40)
self.assertEqual(stats['hypervisor_hostname'], 'fake-xenhost')
self.assertEqual(stats['host_cpu_info']['cpu_count'], 4)
self.assertThat({
'vendor': 'GenuineIntel',
'model': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz',
'topology': {
'sockets': 1,
'cores': 4,
'threads': 1,
},
'features': [
'fpu', 'de', 'tsc', 'msr', 'pae', 'mce',
'cx8', 'apic', 'sep', 'mtrr', 'mca',
'cmov', 'pat', 'clflush', 'acpi', 'mmx',
'fxsr', 'sse', 'sse2', 'ss', 'ht',
'nx', 'constant_tsc', 'nonstop_tsc',
'aperfmperf', 'pni', 'vmx', 'est', 'ssse3',
'sse4_1', 'sse4_2', 'popcnt', 'hypervisor',
'ida', 'tpr_shadow', 'vnmi', 'flexpriority',
'ept', 'vpid',
]},
matchers.DictMatches(stats['cpu_model']))
# No VMs running
self.assertEqual(stats['vcpus_used'], 0)
def test_host_state_vcpus_used(self):
stats = self.conn.host_state.get_host_stats(True)
self.assertEqual(stats['vcpus_used'], 0)
xenapi_fake.create_vm(self.instance['name'], 'Running')
stats = self.conn.host_state.get_host_stats(True)
self.assertEqual(stats['vcpus_used'], 4)
def test_pci_passthrough_devices(self):
stats = self.conn.host_state.get_host_stats(False)
self.assertEqual(len(stats['pci_passthrough_devices']), 2)
def test_host_state_missing_sr(self):
# Must trigger construction of 'host_state' property
# before introducing the stub which raises the error
hs = self.conn.host_state
def fake_safe_find_sr(session):
raise exception.StorageRepositoryNotFound('not there')
self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr)
self.assertRaises(exception.StorageRepositoryNotFound,
hs.get_host_stats,
refresh=True)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
if not expected:
expected = action
self.assertEqual(result, expected)
def _test_host_action_no_param(self, method, action, expected=None):
result = method(action)
if not expected:
expected = action
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action_no_param(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action_no_param(self.conn.host_power_action,
'shutdown')
def test_host_startup(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode,
True, 'on_maintenance')
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode,
False, 'off_maintenance')
def test_set_enable_host_enable(self):
_create_service_entries(self.context, values={'nova': ['fake-mini']})
self._test_host_action_no_param(self.conn.set_host_enabled,
True, 'enabled')
service = db.service_get_by_host_and_binary(self.context, 'fake-mini',
'nova-compute')
self.assertFalse(service.disabled)
def test_set_enable_host_disable(self):
_create_service_entries(self.context, values={'nova': ['fake-mini']})
self._test_host_action_no_param(self.conn.set_host_enabled,
False, 'disabled')
service = db.service_get_by_host_and_binary(self.context, 'fake-mini',
'nova-compute')
self.assertTrue(service.disabled)
def test_get_host_uptime(self):
result = self.conn.get_host_uptime()
self.assertEqual(result, 'fake uptime')
def test_supported_instances_is_included_in_host_state(self):
stats = self.conn.host_state.get_host_stats(False)
self.assertIn('supported_instances', stats)
def test_supported_instances_is_calculated_by_to_supported_instances(self):
def to_supported_instances(somedata):
return "SOMERETURNVALUE"
self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
stats = self.conn.host_state.get_host_stats(False)
self.assertEqual("SOMERETURNVALUE", stats['supported_instances'])
def test_update_stats_caches_hostname(self):
self.mox.StubOutWithMock(host, 'call_xenhost')
self.mox.StubOutWithMock(vm_utils, 'scan_default_sr')
self.mox.StubOutWithMock(vm_utils, 'list_vms')
self.mox.StubOutWithMock(self.conn._session, 'call_xenapi')
data = {'disk_total': 0,
'disk_used': 0,
'disk_available': 0,
'supported_instances': 0,
'host_capabilities': [],
'host_hostname': 'foo',
'vcpus_used': 0,
}
sr_rec = {
'physical_size': 0,
'physical_utilisation': 0,
'virtual_allocation': 0,
}
for i in range(3):
host.call_xenhost(mox.IgnoreArg(), 'host_data', {}).AndReturn(data)
vm_utils.scan_default_sr(self.conn._session).AndReturn("ref")
vm_utils.list_vms(self.conn._session).AndReturn([])
self.conn._session.call_xenapi('SR.get_record', "ref").AndReturn(
sr_rec)
if i == 2:
# On the third call (the second below) change the hostname
data = dict(data, host_hostname='bar')
self.mox.ReplayAll()
stats = self.conn.host_state.get_host_stats(refresh=True)
self.assertEqual('foo', stats['hypervisor_hostname'])
stats = self.conn.host_state.get_host_stats(refresh=True)
self.assertEqual('foo', stats['hypervisor_hostname'])
class ToSupportedInstancesTestCase(test.NoDBTestCase):
def test_default_return_value(self):
self.assertEqual([],
host.to_supported_instances(None))
def test_return_value(self):
self.assertEqual([(arch.X86_64, hv_type.XEN, 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64']))
def test_invalid_values_do_not_break(self):
self.assertEqual([(arch.X86_64, hv_type.XEN, 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
def test_multiple_values(self):
self.assertEqual(
[
(arch.X86_64, hv_type.XEN, 'xen'),
(arch.I686, hv_type.XEN, 'hvm')
],
host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
def setUp(self):
super(XenAPIAutoDiskConfigTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': IMAGE_MACHINE,
'kernel_id': IMAGE_KERNEL,
'ramdisk_id': IMAGE_RAMDISK,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False):
pass
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
def fake_resize_part_and_fs(dev, start, old_sectors, new_sectors,
flags):
marker["partition_called"] = True
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
context.RequestContext(self.user_id, self.project_id)
session = get_session()
disk_image_type = vm_utils.ImageType.DISK_VHD
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
image_meta = objects.ImageMeta.from_dict(
{'id': uuids.image_id,
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}})
self.mox.ReplayAll()
self.conn._vmops._attach_disks(self.context, instance, image_meta,
vm_ref, instance['name'], vdis, disk_image_type,
"fake_nw_inf")
self.assertEqual(marker["partition_called"], called)
def test_instance_not_auto_disk_config(self):
"""Should not partition unless instance is marked as
auto_disk_config.
"""
self.instance_values['auto_disk_config'] = False
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached
def test_instance_auto_disk_config_fails_safe_two_partitions(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4', "", ""), (2, 100, 200, 'ext4' "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached
def test_instance_auto_disk_config_fails_safe_badly_numbered(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(2, 100, 200, 'ext4', "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached
def test_instance_auto_disk_config_fails_safe_bad_fstype(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 100, 200, 'asdf', "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached
def test_instance_auto_disk_config_passes_fail_safes(self):
"""Should partition if instance is marked as auto_disk_config=True and
virt-layer specific fail-safe checks pass.
"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4', "", "boot")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(True)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIGenerateLocal(stubs.XenAPITestBase):
"""Test generating of local disks, like swap and ephemeral."""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': IMAGE_MACHINE,
'kernel_id': IMAGE_KERNEL,
'ramdisk_id': IMAGE_RAMDISK,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False, empty=False, unpluggable=True):
return session.call_xenapi('VBD.create', {'VM': vm_ref,
'VDI': vdi_ref})
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertCalled(self, instance,
disk_image_type=vm_utils.ImageType.DISK_VHD):
context.RequestContext(self.user_id, self.project_id)
session = get_session()
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdi_key = 'root'
if disk_image_type == vm_utils.ImageType.DISK_ISO:
vdi_key = 'iso'
vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.called = False
image_meta = objects.ImageMeta.from_dict(
{'id': uuids.image_id,
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}})
self.conn._vmops._attach_disks(self.context, instance, image_meta,
vm_ref, instance['name'], vdis, disk_image_type,
"fake_nw_inf")
self.assertTrue(self.called)
def test_generate_swap(self):
# Test swap disk generation.
instance_values = dict(self.instance_values, instance_type_id=5)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_swap(*args, **kwargs):
self.called = True
self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
self.assertCalled(instance)
def test_generate_ephemeral(self):
# Test ephemeral disk generation.
instance_values = dict(self.instance_values, instance_type_id=4)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
self.assertCalled(instance)
def test_generate_iso_blank_root_disk(self):
instance_values = dict(self.instance_values, instance_type_id=4)
instance_values.pop('kernel_id')
instance_values.pop('ramdisk_id')
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
pass
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
def fake_generate_iso(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk',
fake_generate_iso)
self.assertCalled(instance, vm_utils.ImageType.DISK_ISO)
class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB):
FAKE_VMS = {'test1:ref': dict(name_label='test1',
other_config=dict(nova_uuid='hash'),
domid='12',
_vifmap={'0': "a:b:c:d...",
'1': "e:f:12:q..."}),
'test2:ref': dict(name_label='test2',
other_config=dict(nova_uuid='hash'),
domid='42',
_vifmap={'0': "a:3:c:d...",
'1': "e:f:42:q..."}),
}
def setUp(self):
super(XenAPIBWCountersTestCase, self).setUp()
self.stubs.Set(vm_utils, 'list_vms',
XenAPIBWCountersTestCase._fake_list_vms)
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def _fake_get_vif_device_map(vm_rec):
return vm_rec['_vifmap']
self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
_fake_get_vif_device_map)
@classmethod
def _fake_list_vms(cls, session):
return six.iteritems(cls.FAKE_VMS)
@staticmethod
def _fake_fetch_bandwidth_mt(session):
return {}
@staticmethod
def _fake_fetch_bandwidth(session):
return {'42':
{'0': {'bw_in': 21024, 'bw_out': 22048},
'1': {'bw_in': 231337, 'bw_out': 221212121}},
'12':
{'0': {'bw_in': 1024, 'bw_out': 2048},
'1': {'bw_in': 31337, 'bw_out': 21212121}},
}
def test_get_all_bw_counters(self):
instances = [dict(name='test1', uuid='1-2-3'),
dict(name='test2', uuid='4-5-6')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
self._fake_fetch_bandwidth)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(len(result), 4)
self.assertIn(dict(uuid='1-2-3',
mac_address="a:b:c:d...",
bw_in=1024,
bw_out=2048), result)
self.assertIn(dict(uuid='1-2-3',
mac_address="e:f:12:q...",
bw_in=31337,
bw_out=21212121), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="a:3:c:d...",
bw_in=21024,
bw_out=22048), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="e:f:42:q...",
bw_in=231337,
bw_out=221212121), result)
def test_get_all_bw_counters_in_failure_case(self):
"""Test that get_all_bw_conters returns an empty list when
no data returned from Xenserver. c.f. bug #910045.
"""
instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
self._fake_fetch_bandwidth_mt)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(result, [])
# TODO(salvatore-orlando): this class and
# nova.tests.unit.virt.test_libvirt.IPTablesFirewallDriverTestCase
# share a lot of code. Consider abstracting common code in a base
# class for firewall driver testing.
#
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
REQUIRES_LOCKING = True
_in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*mangle',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
_in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def setUp(self):
super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.user_id = 'mappin'
self.project_id = 'fake'
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = importutils.import_object(CONF.network_manager)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': self.user_id,
'project_id': self.project_id,
'instance_type_id': 1})
def _create_test_security_group(self):
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
return secgroup
def _validate_security_group(self):
in_rules = [l for l in self._in_rules if not l.startswith('#')]
for rule in in_rules:
if 'nova' not in rule:
self.assertIn(rule, self._out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
' -s 192.168.11.0/24')
self.assertGreater(len(filter(regex.match, self._out_rules)), 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
' --icmp-type 8 -s 192.168.11.0/24')
self.assertGreater(len(filter(regex.match, self._out_rules)), 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
' -s 192.168.10.0/24')
self.assertGreater(len(filter(regex.match, self._out_rules)), 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = self._create_test_security_group()
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
network_model = fake_network.fake_get_instance_nw_info(self, 1)
from nova.compute import utils as compute_utils # noqa
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
self.fw.prepare_instance_filter(instance_ref, network_model)
self.fw.apply_instance_filter(instance_ref, network_model)
self._validate_security_group()
# Extra test for TCP acceptance rules
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
' --dport 80:81 -s %s' % ip['address'])
self.assertGreater(len(filter(regex.match, self._out_rules)), 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = fake_network.fake_get_instance_nw_info(self, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = fake_network.fake_get_instance_nw_info(self, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
_get_instance_nw_info = fake_network.fake_get_instance_nw_info
network_info = _get_instance_nw_info(self,
networks_count,
ipv4_addr_per_network)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEqual(ipv4_network_rules, rules)
self.assertEqual(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
admin_ctxt = context.get_admin_context()
instance_ref = self._create_instance_ref()
network_info = fake_network.fake_get_instance_nw_info(self, 1, 1)
secgroup = self._create_test_security_group()
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.instance_info[instance_ref['id']] = (instance_ref,
network_info)
self._validate_security_group()
# add a rule to the security group
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'udp',
'from_port': 200,
'to_port': 299,
'cidr': '192.168.99.0/24'})
# validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
self.assertGreater(len(filter(regex.match, self._out_rules)), 0,
"Rules were not updated properly. "
"The rule for UDP acceptance is missing")
class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for testing we find the right SR."""
def test_safe_find_sr_raise_exception(self):
# Ensure StorageRepositoryNotFound is raise when wrong filter.
self.flags(sr_matching_filter='yadayadayada', group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
self.assertRaises(exception.StorageRepositoryNotFound,
vm_utils.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
# Ensure the default local-storage is found.
self.flags(sr_matching_filter='other-config:i18n-key=local-storage',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
# This test is only guaranteed if there is one host in the pool
self.assertEqual(len(xenapi_fake.get_all('host')), 1)
host_ref = xenapi_fake.get_all('host')[0]
pbd_refs = xenapi_fake.get_all('PBD')
for pbd_ref in pbd_refs:
pbd_rec = xenapi_fake.get_record('PBD', pbd_ref)
if pbd_rec['host'] != host_ref:
continue
sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR'])
if sr_rec['other_config']['i18n-key'] == 'local-storage':
local_sr = pbd_rec['SR']
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
# Ensure the SR is found when using a different filter.
self.flags(sr_matching_filter='other-config:my_fake_sr=true',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
type='lvm',
other_config={'my_fake_sr': 'true'},
host_ref=host_ref)
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
# Ensure the default SR is found regardless of other-config.
self.flags(sr_matching_filter='default-sr:true',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
pool_ref = session.call_xenapi('pool.get_all')[0]
expected = vm_utils.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
expected)
def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
'fake_host2'],
'avail_zone2': ['fake_host3'], }):
for avail_zone, hosts in six.iteritems(values):
for service_host in hosts:
db.service_create(context,
{'host': service_host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0})
return values
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIAggregateTestCase(stubs.XenAPITestBase):
"""Unit tests for aggregate operations."""
def setUp(self):
super(XenAPIAggregateTestCase, self).setUp()
self.flags(connection_url='http://test_url',
connection_username='test_user',
connection_password='test_pass',
group='xenserver')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host',
compute_driver='xenapi.XenAPIDriver',
default_availability_zone='avail_zone1')
self.flags(use_local=True, group='conductor')
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.compute = importutils.import_object(CONF.compute_manager)
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
'metadata': {'availability_zone': 'test_zone',
pool_states.POOL_FLAG: 'XenAPI'}}
self.aggr = objects.Aggregate(context=self.context, id=1,
**values)
self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
'master_compute': 'host',
'availability_zone': 'fake_zone',
pool_states.KEY: pool_states.ACTIVE,
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
def test_pool_add_to_aggregate_called_by_driver(self):
calls = []
def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
self.assertEqual("SLAVEINFO", slave_info)
calls.append(pool_add_to_aggregate)
self.stubs.Set(self.conn._pool,
"add_to_aggregate",
pool_add_to_aggregate)
self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertIn(pool_add_to_aggregate, calls)
def test_pool_remove_from_aggregate_called_by_driver(self):
calls = []
def pool_remove_from_aggregate(context, aggregate, host,
slave_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
self.assertEqual("SLAVEINFO", slave_info)
calls.append(pool_remove_from_aggregate)
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
pool_remove_from_aggregate)
self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertIn(pool_remove_from_aggregate, calls)
def test_add_to_aggregate_for_first_host_sets_metadata(self):
def fake_init_pool(id, name):
fake_init_pool.called = True
self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
aggregate = self._aggregate_setup()
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
result = objects.Aggregate.get_by_id(self.context, aggregate.id)
self.assertTrue(fake_init_pool.called)
self.assertThat(self.fake_metadata,
matchers.DictMatches(result.metadata))
def test_join_slave(self):
# Ensure join_slave gets called when the request gets to master.
def fake_join_slave(id, compute_uuid, host, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
dict(compute_uuid='fake_uuid',
url='fake_url',
user='fake_user',
passwd='fake_pass',
xenhost_uuid='fake_uuid'))
self.assertTrue(fake_join_slave.called)
def test_add_to_aggregate_first_host(self):
def fake_pool_set_name_label(self, session, pool_ref, name):
fake_pool_set_name_label.called = True
self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
fake_pool_set_name_label)
self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
metadata = {'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.CREATED}
aggregate = objects.Aggregate(context=self.context)
aggregate.name = 'fake_aggregate'
aggregate.metadata = dict(metadata)
aggregate.create()
aggregate.add_host('host')
self.assertEqual(["host"], aggregate.hosts)
self.assertEqual(metadata, aggregate.metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
self.assertTrue(fake_pool_set_name_label.called)
def test_remove_from_aggregate_called(self):
def fake_remove_from_aggregate(context, aggregate, host):
fake_remove_from_aggregate.called = True
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
fake_remove_from_aggregate)
self.conn.remove_from_aggregate(None, None, None)
self.assertTrue(fake_remove_from_aggregate.called)
def test_remove_from_empty_aggregate(self):
result = self._aggregate_setup()
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn._pool.remove_from_aggregate,
self.context, result, "test_host")
def test_remove_slave(self):
# Ensure eject slave gets called.
def fake_eject_slave(id, compute_uuid, host_uuid):
fake_eject_slave.called = True
self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
self.fake_metadata['host2'] = 'fake_host2_uuid'
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
self.assertTrue(fake_eject_slave.called)
def test_remove_master_solo(self):
# Ensure metadata are cleared after removal.
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
aggregate = self._aggregate_setup(metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
result = objects.Aggregate.get_by_id(self.context, aggregate.id)
self.assertTrue(fake_clear_pool.called)
self.assertThat({'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: pool_states.ACTIVE},
matchers.DictMatches(result.metadata))
def test_remote_master_non_empty_pool(self):
# Ensure AggregateError is raised if removing the master.
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn._pool.remove_from_aggregate,
self.context, aggregate, "host")
def _aggregate_setup(self, aggr_name='fake_aggregate',
aggr_zone='fake_zone',
aggr_state=pool_states.CREATED,
hosts=['host'], metadata=None):
aggregate = objects.Aggregate(context=self.context)
aggregate.name = aggr_name
aggregate.metadata = {'availability_zone': aggr_zone,
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: aggr_state,
}
if metadata:
aggregate.metadata.update(metadata)
aggregate.create()
for aggregate_host in hosts:
aggregate.add_host(aggregate_host)
return aggregate
def test_add_host_to_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateActionAdd is raised when adding host while
aggregate is not ready.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
ex = self.assertRaises(exception.InvalidAggregateActionAdd,
self.conn.add_to_aggregate, self.context,
aggregate, 'host')
self.assertIn('setup in progress', str(ex))
def test_add_host_to_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateActionAdd is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
ex = self.assertRaises(exception.InvalidAggregateActionAdd,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
self.assertIn('aggregate deleted', str(ex))
def test_add_host_to_aggregate_invalid_error_status(self):
"""Ensure InvalidAggregateActionAdd is raised when aggregate is
in error.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
ex = self.assertRaises(exception.InvalidAggregateActionAdd,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
self.assertIn('aggregate in error', str(ex))
def test_remove_host_from_aggregate_error(self):
# Ensure we can remove a host from an aggregate even if in error.
values = _create_service_entries(self.context)
fake_zone = list(values.keys())[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
# let's mock the fact that the aggregate is ready!
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
self.api.update_aggregate_metadata(self.context,
aggr.id,
metadata)
for aggregate_host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr.id, aggregate_host)
# let's mock the fact that the aggregate is in error!
expected = self.api.remove_host_from_aggregate(self.context,
aggr.id,
values[fake_zone][0])
self.assertEqual(len(aggr.hosts) - 1, len(expected.hosts))
self.assertEqual(expected.metadata[pool_states.KEY],
pool_states.ACTIVE)
def test_remove_host_from_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateActionDelete is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_remove_host_from_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateActionDelete is raised when aggregate is
changing.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_add_aggregate_host_raise_err(self):
# Ensure the undo operation works correctly on add.
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
raise exception.AggregateError(
aggregate_id='', action='', reason='')
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
self.aggr.metadata = metadata
self.aggr.hosts = ['fake_host']
self.assertRaises(exception.AggregateError,
self.compute.add_aggregate_host,
self.context, host="fake_host",
aggregate=self.aggr,
slave_info=None)
self.assertEqual(self.aggr.metadata[pool_states.KEY],
pool_states.ERROR)
self.assertEqual(self.aggr.hosts, ['fake_host'])
class MockComputeAPI(object):
def __init__(self):
self._mock_calls = []
def add_aggregate_host(self, ctxt, aggregate,
host_param, host, slave_info):
self._mock_calls.append((
self.add_aggregate_host, ctxt, aggregate,
host_param, host, slave_info))
def remove_aggregate_host(self, ctxt, host, aggregate_id, host_param,
slave_info):
self._mock_calls.append((
self.remove_aggregate_host, ctxt, host, aggregate_id,
host_param, slave_info))
class StubDependencies(object):
"""Stub dependencies for ResourcePool."""
def __init__(self):
self.compute_rpcapi = MockComputeAPI()
def _is_hv_pool(self, *_ignore):
return True
def _get_metadata(self, *_ignore):
return {
pool_states.KEY: {},
'master_compute': 'master'
}
def _create_slave_info(self, *ignore):
return "SLAVE_INFO"
class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
"""A ResourcePool, use stub dependencies."""
class HypervisorPoolTestCase(test.NoDBTestCase):
fake_aggregate = {
'id': 98,
'hosts': [],
'metadata': {
'master_compute': 'master',
pool_states.POOL_FLAG: '',
pool_states.KEY: ''
}
}
fake_aggregate = objects.Aggregate(**fake_aggregate)
def test_slave_asks_master_to_add_slave_to_pool(self):
slave = ResourcePoolWithStubs()
slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.add_aggregate_host,
"CONTEXT", "slave", jsonutils.to_primitive(self.fake_aggregate),
"master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
def test_slave_asks_master_to_remove_slave_from_pool(self):
slave = ResourcePoolWithStubs()
slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.remove_aggregate_host,
"CONTEXT", "slave", 98, "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
class SwapXapiHostTestCase(test.NoDBTestCase):
def test_swapping(self):
self.assertEqual(
"http://otherserver:8765/somepath",
pool.swap_xapi_host(
"http://someserver:8765/somepath", 'otherserver'))
def test_no_port(self):
self.assertEqual(
"http://otherserver/somepath",
pool.swap_xapi_host(
"http://someserver/somepath", 'otherserver'))
def test_no_path(self):
self.assertEqual(
"http://otherserver",
pool.swap_xapi_host(
"http://someserver", 'otherserver'))
class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for live_migration."""
def setUp(self):
super(XenAPILiveMigrateTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host')
db_fakes.stub_out_db_instance_api(self)
self.context = context.get_admin_context()
def test_live_migration_calls_vmops(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_live_migrate(context, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data):
fake_live_migrate.called = True
self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
self.conn.live_migration(None, None, None, None, None)
self.assertTrue(fake_live_migrate.called)
def test_pre_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(self.conn._vmops, "pre_live_migration") as pre:
pre.return_value = True
result = self.conn.pre_live_migration(
"ctx", "inst", "bdi", "nw", "di", "data")
self.assertTrue(result)
pre.assert_called_with("ctx", "inst", "bdi", "nw", "di", "data")
def test_post_live_migration_at_destination(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
fake_instance = {"name": "name"}
fake_network_info = "network_info"
def fake_fw(instance, network_info):
self.assertEqual(instance, fake_instance)
self.assertEqual(network_info, fake_network_info)
fake_fw.call_count += 1
def fake_create_kernel_and_ramdisk(context, session, instance,
name_label):
return "fake-kernel-file", "fake-ramdisk-file"
fake_fw.call_count = 0
_vmops = self.conn._vmops
self.stubs.Set(_vmops.firewall_driver,
'setup_basic_filtering', fake_fw)
self.stubs.Set(_vmops.firewall_driver,
'prepare_instance_filter', fake_fw)
self.stubs.Set(_vmops.firewall_driver,
'apply_instance_filter', fake_fw)
self.stubs.Set(vm_utils, "create_kernel_and_ramdisk",
fake_create_kernel_and_ramdisk)
def fake_get_vm_opaque_ref(instance):
fake_get_vm_opaque_ref.called = True
self.stubs.Set(_vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref)
fake_get_vm_opaque_ref.called = False
def fake_strip_base_mirror_from_vdis(session, vm_ref):
fake_strip_base_mirror_from_vdis.called = True
self.stubs.Set(vm_utils, "strip_base_mirror_from_vdis",
fake_strip_base_mirror_from_vdis)
fake_strip_base_mirror_from_vdis.called = False
self.conn.post_live_migration_at_destination(None, fake_instance,
fake_network_info, None)
self.assertEqual(fake_fw.call_count, 3)
self.assertTrue(fake_get_vm_opaque_ref.called)
self.assertTrue(fake_strip_base_mirror_from_vdis.called)
def test_check_can_live_migrate_destination_with_block_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
expected = {'block_migration': True,
'is_volume_backed': False,
'migrate_data': {
'migrate_send_data': {'value': 'fake_migrate_data'},
'destination_sr_ref': 'asdf'
}
}
result = self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'},
{}, {},
True, False)
result.is_volume_backed = False
self.assertEqual(expected, result.to_legacy_dict())
def test_check_live_migrate_destination_verifies_ip(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
for pif_ref in xenapi_fake.get_all('PIF'):
pif_rec = xenapi_fake.get_record('PIF', pif_ref)
pif_rec['IP'] = ''
pif_rec['IPv6'] = ''
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def test_check_can_live_migrate_destination_block_migration_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def _add_default_live_migrate_stubs(self, conn):
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return []
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
def fake_lookup_kernel_ramdisk(session, vm):
return ("fake_PV_kernel", "fake_PV_ramdisk")
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
self.stubs.Set(conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
self.stubs.Set(conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
fake_lookup_kernel_ramdisk)
def test_check_can_live_migrate_source_with_block_migrate(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = objects.XenapiLiveMigrateData(
block_migration=True, is_volume_backed=False,
destination_sr_ref=None, migrate_send_data={'key': 'value'})
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_migrate_iscsi(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_make_plugin_call(plugin, method, **args):
return "true"
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
fake_make_plugin_call)
dest_check_data = objects.XenapiLiveMigrateData(
block_migration=True,
is_volume_backed=True,
destination_sr_ref=None,
migrate_send_data={'key': 'value'})
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data.to_legacy_dict(),
result.to_legacy_dict())
def test_check_can_live_migrate_source_with_block_iscsi_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_make_plugin_call(plugin, method, **args):
return {'returncode': 'error', 'message': 'Plugin not found'}
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
fake_make_plugin_call)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context, {'host': 'host'},
{})
def test_check_can_live_migrate_source_with_block_migrate_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = objects.XenapiLiveMigrateData(
block_migration=True, is_volume_backed=True,
migrate_send_data={'key': 'value'}, destination_sr_ref=None)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context,
{'host': 'host'},
dest_check_data)
@mock.patch.object(objects.AggregateList, 'get_by_host')
def test_check_can_live_migrate_works(self, mock_get_by_host):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
metadata = {'host': 'test_host_uuid'}
aggregate = objects.Aggregate(metadata=metadata)
aggregate_list = objects.AggregateList(objects=[aggregate])
mock_get_by_host.return_value = aggregate_list
instance = objects.Instance(host='host')
self.conn.check_can_live_migrate_destination(
self.context, instance, None, None)
mock_get_by_host.assert_called_once_with(
self.context, CONF.host, key='hypervisor_pool')
@mock.patch.object(objects.AggregateList, 'get_by_host')
def test_check_can_live_migrate_fails(self, mock_get_by_host):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
metadata = {'dest_other': 'test_host_uuid'}
aggregate = objects.Aggregate(metadata=metadata)
aggregate_list = objects.AggregateList(objects=[aggregate])
mock_get_by_host.return_value = aggregate_list
instance = objects.Instance(host='host')
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, instance, None, None)
mock_get_by_host.assert_called_once_with(
self.context, CONF.host, key='hypervisor_pool')
def test_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_lookup_kernel_ramdisk(session, vm_ref):
return "kernel", "ramdisk"
self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
fake_lookup_kernel_ramdisk)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
migrate_data = objects.XenapiLiveMigrateData(
destination_sr_ref="foo",
migrate_send_data={"bar": "baz"},
block_migration=False)
self.conn.live_migration(self.conn, None, None, post_method, None,
None, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_on_failure(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def fake_call_xenapi(*args):
raise NotImplementedError()
self.stubs.Set(self.conn._vmops._session, "call_xenapi",
fake_call_xenapi)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
migrate_data = objects.XenapiLiveMigrateData(
destination_sr_ref="foo",
migrate_send_data={"bar": "baz"},
block_migration=False)
self.assertRaises(NotImplementedError, self.conn.live_migration,
self.conn, None, None, None, recover_method,
None, migrate_data)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_calls_post_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
# pass block_migration = True and migrate data
migrate_data = objects.XenapiLiveMigrateData(
destination_sr_ref="foo",
migrate_send_data={"bar": "baz"},
block_migration=True)
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_block_cleans_srs(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(context, instance):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_forget_sr(context, instance):
fake_forget_sr.called = True
self.stubs.Set(volume_utils, "forget_sr",
fake_forget_sr)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
migrate_data = objects.XenapiLiveMigrateData(
destination_sr_ref="foo",
migrate_send_data={"bar": "baz"},
block_migration=True)
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
self.assertTrue(fake_forget_sr.called, "forget_sr.called")
def test_live_migration_with_block_migration_fails_migrate_send(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and migrate data
migrate_data = objects.XenapiLiveMigrateData(
destination_sr_ref='foo',
migrate_send_data={'bar': 'baz'},
block_migration=True)
self.assertRaises(exception.MigrationError,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, migrate_data)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migrate_block_migration_xapi_call_parameters(self):
fake_vdi_map = object()
class Session(xenapi_fake.SessionBase):
def VM_migrate_send(self_, session, vmref, migrate_data, islive,
vdi_map, vif_map, options):
self.assertEqual({'SOMEDATA': 'SOMEVAL'}, migrate_data)
self.assertEqual(fake_vdi_map, vdi_map)
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
return fake_vdi_map
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def dummy_callback(*args, **kwargs):
pass
migrate_data = objects.XenapiLiveMigrateData(
migrate_send_data={'SOMEDATA': 'SOMEVAL'},
destination_sr_ref='TARGET_SR_OPAQUE_REF',
block_migration=True)
conn.live_migration(
self.context, instance=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration="SOMEDATA",
migrate_data=migrate_data)
def test_live_migrate_pool_migration_xapi_call_parameters(self):
class Session(xenapi_fake.SessionBase):
def VM_pool_migrate(self_, session, vm_ref, host_ref, options):
self.assertEqual("fake_ref", host_ref)
self.assertEqual({"live": "true"}, options)
raise IOError()
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_get_host_opaque_ref(context, destination):
return "fake_ref"
self.stubs.Set(conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def dummy_callback(*args, **kwargs):
pass
migrate_data = objects.XenapiLiveMigrateData(
migrate_send_data={'foo': 'bar'},
destination_sr_ref='foo',
block_migration=False)
self.assertRaises(IOError, conn.live_migration,
self.context, instance=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration=False, migrate_data=migrate_data)
def test_generate_vdi_map(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = "fake_vm_ref"
def fake_find_sr(_session):
self.assertEqual(conn._session, _session)
return "source_sr_ref"
self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
self.assertEqual(conn._session, _session)
self.assertEqual(vm_ref, _vm_ref)
self.assertEqual("source_sr_ref", _sr_ref)
return ["vdi0", "vdi1"]
self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
fake_get_instance_vdis_for_sr)
result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
self.assertEqual({"vdi0": "dest_sr_ref",
"vdi1": "dest_sr_ref"}, result)
def test_rollback_live_migration_at_destination(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn, "destroy") as mock_destroy:
conn.rollback_live_migration_at_destination("context",
"instance", [], {'block_device_mapping': []})
self.assertFalse(mock_destroy.called)
class XenAPIInjectMetadataTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(XenAPIInjectMetadataTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.xenstore = dict(persist={}, ephem={})
self.called_fake_get_vm_opaque_ref = False
def fake_get_vm_opaque_ref(inst, instance):
self.called_fake_get_vm_opaque_ref = True
if instance["uuid"] == "not_found":
raise exception.NotFound
self.assertEqual(instance, {'uuid': 'fake'})
return 'vm_ref'
def fake_add_to_param_xenstore(inst, vm_ref, key, val):
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['persist'][key] = val
def fake_remove_from_param_xenstore(inst, vm_ref, key):
self.assertEqual(vm_ref, 'vm_ref')
if key in self.xenstore['persist']:
del self.xenstore['persist'][key]
def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['ephem'][path] = jsonutils.dumps(value)
def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
if path in self.xenstore['ephem']:
del self.xenstore['ephem'][path]
self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
fake_get_vm_opaque_ref)
self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
fake_add_to_param_xenstore)
self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
fake_remove_from_param_xenstore)
self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
fake_write_to_xenstore)
self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
fake_delete_from_xenstore)
def test_inject_instance_metadata(self):
# Add some system_metadata to ensure it doesn't get added
# to xenstore
instance = dict(metadata=[{'key': 'a', 'value': 1},
{'key': 'b', 'value': 2},
{'key': 'c', 'value': 3},
# Check xenstore key sanitizing
{'key': 'hi.there', 'value': 4},
{'key': 'hi!t.e/e', 'value': 5}],
# Check xenstore key sanitizing
system_metadata=[{'key': 'sys_a', 'value': 1},
{'key': 'sys_b', 'value': 2},
{'key': 'sys_c', 'value': 3}],
uuid='fake')
self.conn._vmops._inject_instance_metadata(instance, 'vm_ref')
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/hi_there': '4',
'vm-data/user-metadata/hi_t_e_e': '5',
},
'ephem': {},
})
def test_change_instance_metadata_add(self):
# Test XenStore key sanitizing here, too.
diff = {'test.key': ['+', 4]}
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
})
def test_change_instance_metadata_update(self):
diff = dict(b=['+', 4])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_delete(self):
diff = dict(b=['-'])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_not_found(self):
instance = {'uuid': 'not_found'}
self.conn._vmops.change_instance_metadata(instance, "fake_diff")
self.assertTrue(self.called_fake_get_vm_opaque_ref)
class XenAPISessionTestCase(test.NoDBTestCase):
def _get_mock_xapisession(self, software_version):
class MockXapiSession(xenapi_session.XenAPISession):
def __init__(_ignore):
"Skip the superclass's dirty init"
def _get_software_version(_ignore):
return software_version
return MockXapiSession()
def test_local_session(self):
session = self._get_mock_xapisession({})
session.is_local_connection = True
session.XenAPI = self.mox.CreateMockAnything()
session.XenAPI.xapi_local().AndReturn("local_connection")
self.mox.ReplayAll()
self.assertEqual("local_connection",
session._create_session("unix://local"))
def test_remote_session(self):
session = self._get_mock_xapisession({})
session.is_local_connection = False
session.XenAPI = self.mox.CreateMockAnything()
session.XenAPI.Session("url").AndReturn("remote_connection")
self.mox.ReplayAll()
self.assertEqual("remote_connection", session._create_session("url"))
def test_get_product_version_product_brand_does_not_fail(self):
session = self._get_mock_xapisession({
'build_number': '0',
'date': '2012-08-03',
'hostname': 'komainu',
'linux': '3.2.0-27-generic',
'network_backend': 'bridge',
'platform_name': 'XCP_Kronos',
'platform_version': '1.6.0',
'xapi': '1.3',
'xen': '4.1.2',
'xencenter_max': '1.10',
'xencenter_min': '1.10'
})
self.assertEqual(
((1, 6, 0), None),
session._get_product_version_and_brand()
)
def test_get_product_version_product_brand_xs_6(self):
session = self._get_mock_xapisession({
'product_brand': 'XenServer',
'product_version': '6.0.50',
'platform_version': '0.0.1'
})
self.assertEqual(
((6, 0, 50), 'XenServer'),
session._get_product_version_and_brand()
)
def test_verify_plugin_version_same(self):
session = self._get_mock_xapisession({})
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version.py', 'get_version',
).AndReturn("2.4")
self.mox.ReplayAll()
session._verify_plugin_version()
def test_verify_plugin_version_compatible(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version.py', 'get_version',
).AndReturn("2.5")
self.mox.ReplayAll()
session._verify_plugin_version()
def test_verify_plugin_version_python_extensions(self):
"""Validate that 2.0 is equivalent to 1.8."""
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.0'
with mock.patch.object(session, 'call_plugin_serialized',
return_value='1.8'):
session._verify_plugin_version()
def test_verify_plugin_version_bad_maj(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version.py', 'get_version',
).AndReturn("3.0")
self.mox.ReplayAll()
self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
def test_verify_plugin_version_bad_min(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version.py', 'get_version',
).AndReturn("2.3")
self.mox.ReplayAll()
self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
def test_verify_current_version_matches(self):
session = self._get_mock_xapisession({})
# Import the plugin to extract its version
path = os.path.dirname(__file__)
rel_path_elem = "../../../../../plugins/xenserver/xenapi/etc/xapi.d/" \
"plugins/nova_plugin_version.py"
for elem in rel_path_elem.split('/'):
path = os.path.join(path, elem)
path = os.path.realpath(path)
plugin_version = None
with open(path) as plugin_file:
for line in plugin_file:
if "PLUGIN_VERSION = " in line:
plugin_version = line.strip()[17:].strip('"')
self.assertEqual(session.PLUGIN_REQUIRED_VERSION,
plugin_version)
class XenAPIFakeTestCase(test.NoDBTestCase):
def test_query_matches(self):
record = {'a': '1', 'b': '2', 'c_d': '3'}
tests = {'field "a"="1"': True,
'field "b"="2"': True,
'field "b"="4"': False,
'not field "b"="4"': True,
'field "a"="1" and field "b"="4"': False,
'field "a"="1" or field "b"="4"': True,
'field "c__d"="3"': True,
'field \'b\'=\'2\'': True,
}
for query in tests.keys():
expected = tests[query]
fail_msg = "for test '%s'" % query
self.assertEqual(xenapi_fake._query_matches(record, query),
expected, fail_msg)
def test_query_bad_format(self):
record = {'a': '1', 'b': '2', 'c': '3'}
tests = ['"a"="1" or "b"="4"',
'a=1',
]
for query in tests:
fail_msg = "for test '%s'" % query
self.assertFalse(xenapi_fake._query_matches(record, query),
fail_msg)
|
cloudbase/nova
|
nova/tests/unit/virt/xenapi/test_xenapi.py
|
Python
|
apache-2.0
| 177,912
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
'''
Use dependency versions from a conda enviornment .yaml file to update
a recipe/meta.yaml file of a given package. Such an input file can
be created from the given environment with:
conda env export > myenv.yaml
'''
import sys, os, re
if len(sys.argv) < 3:
print("Usage: " + os.path.basename(sys.argv[0]) + " input.yaml mypackage-feedstock")
sys.exit(1)
inFile = sys.argv[1]
outDir = sys.argv[2]
outFile = outDir + "/recipe/meta.yaml"
if not os.path.exists(outFile):
print("Cannot open file: " + outFile)
sys.exit(1)
# parse the versions from the conda env
conda_env = {}
print("Reading: " + inFile)
inHandle = open(inFile, 'r')
lines = inHandle.readlines()
for line in lines:
# Wipe comments
m = re.match('^(.*?)\#', line)
if m:
line = m.group(1)
# Match the package
m = re.match('^\s*-\s*(.*?)\s*=+\s*(.*?)(=|\s|$)', line)
if not m:
continue
package = m.group(1)
version = m.group(2)
if re.match('^\s*$', package):
continue # ignore empty lines
conda_env[package] = version
#print("got ", package, version)
# Update the lines in the output ile
outHandle = open(outFile, 'r')
lines = outHandle.readlines()
for it in range(len(lines)):
line = lines[it]
# Ignore comments
m = re.match('^\#', line)
if m:
continue
# Match the package
m = re.match('^(\s+-[\t ]+)([^\s]+)(\s*)(.*?)$', line)
if not m:
continue
pre = m.group(1)
package = m.group(2)
spaces = m.group(3).rstrip("\n")
old_version = m.group(4).rstrip("\n")
if spaces == "":
# Ensure there's at least one space
spaces = " "
if old_version == "":
# If there was no version before, don't put one now
continue
if not package in conda_env:
continue
version = conda_env[package]
if old_version != version:
if ('[linux]' in old_version) or ('[osx]' in old_version):
# In this case the user better take a closer look
print("For package " + package + ", not replacing " +
old_version + " with " + version + ", a closer look is suggested.")
else:
print("For package " + package + ", replacing version "
+ old_version + " with " + version)
lines[it] = pre + package + spaces + version + ".\n"
# Save the updated lines to disk
print("Updating: " + outFile)
outHandle = open(outFile, "w")
outHandle.writelines(lines)
outHandle.close()
|
oleg-alexandrov/StereoPipeline
|
conda/update_versions.py
|
Python
|
apache-2.0
| 3,360
|
import unittest
from streamlink.plugins.dogus import Dogus
class TestPluginDogus(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'http://www.ntvspor.net/canli-yayin',
'http://eurostartv.com.tr/canli-izle',
]
for url in should_match:
self.assertTrue(Dogus.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://example.com/index.html',
]
for url in should_not_match:
self.assertFalse(Dogus.can_handle_url(url))
|
back-to/streamlink
|
tests/plugins/test_dogus.py
|
Python
|
bsd-2-clause
| 586
|
__author__ = 'zhengwang'
import numpy as np
import cv2
import socket
class VideoStreamingTest(object):
def __init__(self):
self.server_socket = socket.socket()
self.server_socket.bind(('192.168.1.100', 8000))
self.server_socket.listen(0)
self.connection, self.client_address = self.server_socket.accept()
self.connection = self.connection.makefile('rb')
self.streaming()
def streaming(self):
try:
print "Connection from: ", self.client_address
print "Streaming..."
print "Press 'q' to exit"
stream_bytes = ' '
while True:
stream_bytes += self.connection.read(1024)
first = stream_bytes.find('\xff\xd8')
last = stream_bytes.find('\xff\xd9')
if first != -1 and last != -1:
jpg = stream_bytes[first:last + 2]
stream_bytes = stream_bytes[last + 2:]
#image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_GRAYSCALE)
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED)
cv2.imshow('image', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
finally:
self.connection.close()
self.server_socket.close()
if __name__ == '__main__':
VideoStreamingTest()
|
Quadrifrons/AutoRCCar
|
test/stream_server_test.py
|
Python
|
bsd-2-clause
| 1,474
|
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.external.echo import (CallbackProperty, SelectionCallbackProperty,
delay_callback, ListCallbackProperty)
from glue.core.state_objects import StateAttributeLimitsHelper
from glue.viewers.common.state import ViewerState
__all__ = ['Vispy3DViewerState']
class Vispy3DViewerState(ViewerState):
"""
A common state object for all vispy 3D viewers
"""
x_att = SelectionCallbackProperty()
x_min = CallbackProperty(0)
x_max = CallbackProperty(1)
x_stretch = CallbackProperty(1.)
y_att = SelectionCallbackProperty(default_index=1)
y_min = CallbackProperty(0)
y_max = CallbackProperty(1)
y_stretch = CallbackProperty(1.)
z_att = SelectionCallbackProperty(default_index=2)
z_min = CallbackProperty(0)
z_max = CallbackProperty(1)
z_stretch = CallbackProperty(1.)
visible_axes = CallbackProperty(True)
perspective_view = CallbackProperty(False)
clip_data = CallbackProperty(True)
native_aspect = CallbackProperty(False)
layers = ListCallbackProperty()
limits_cache = CallbackProperty()
def _update_priority(self, name):
if name == 'layers':
return 2
elif name.endswith(('_min', '_max')):
return 0
else:
return 1
def __init__(self, **kwargs):
super(Vispy3DViewerState, self).__init__(**kwargs)
if self.limits_cache is None:
self.limits_cache = {}
self.x_lim_helper = StateAttributeLimitsHelper(self, attribute='x_att',
lower='x_min', upper='x_max',
cache=self.limits_cache)
self.y_lim_helper = StateAttributeLimitsHelper(self, attribute='y_att',
lower='y_min', upper='y_max',
cache=self.limits_cache)
self.z_lim_helper = StateAttributeLimitsHelper(self, attribute='z_att',
lower='z_min', upper='z_max',
cache=self.limits_cache)
# TODO: if limits_cache is re-assigned to a different object, we need to
# update the attribute helpers. However if in future we make limits_cache
# into a smart dictionary that can call callbacks when elements are
# changed then we shouldn't always call this. It'd also be nice to
# avoid this altogether and make it more clean.
self.add_callback('limits_cache', self._update_limits_cache)
def reset_limits(self):
self.x_lim_helper.log = False
self.x_lim_helper.percentile = 100.
self.x_lim_helper.update_values(force=True)
self.y_lim_helper.log = False
self.y_lim_helper.percentile = 100.
self.y_lim_helper.update_values(force=True)
self.z_lim_helper.log = False
self.z_lim_helper.percentile = 100.
self.z_lim_helper.update_values(force=True)
def _update_limits_cache(self, *args):
self.x_lim_helper._cache = self.limits_cache
self.x_lim_helper._update_attribute()
self.y_lim_helper._cache = self.limits_cache
self.y_lim_helper._update_attribute()
self.z_lim_helper._cache = self.limits_cache
self.z_lim_helper._update_attribute()
@property
def aspect(self):
# TODO: this could be cached based on the limits, but is not urgent
aspect = np.array([1, 1, 1], dtype=float)
if self.native_aspect:
aspect[0] = 1.
aspect[1] = (self.y_max - self.y_min) / (self.x_max - self.x_min)
aspect[2] = (self.z_max - self.z_min) / (self.x_max - self.x_min)
aspect /= aspect.max()
return aspect
def reset(self):
pass
def flip_x(self):
self.x_lim_helper.flip_limits()
def flip_y(self):
self.y_lim_helper.flip_limits()
def flip_z(self):
self.z_lim_helper.flip_limits()
@property
def clip_limits(self):
return (self.x_min, self.x_max,
self.y_min, self.y_max,
self.z_min, self.z_max)
def set_limits(self, x_min, x_max, y_min, y_max, z_min, z_max):
with delay_callback(self, 'x_min', 'x_max', 'y_min', 'y_max', 'z_min', 'z_max'):
self.x_min = x_min
self.x_max = x_max
self.y_min = y_min
self.y_max = y_max
self.z_min = z_min
self.z_max = z_max
|
astrofrog/glue-vispy-viewers
|
glue_vispy_viewers/common/viewer_state.py
|
Python
|
bsd-2-clause
| 4,667
|
from saml2test.check.ec_compare import Result
from saml2test.check.ec_compare import EntityCategoryTestResult
from saml2test.check.ec_compare import verify_rs_compliance
from saml2test.check.ec_compare import verify_coco_compliance
from saml2.entity_category import refeds
from saml2.entity_category import edugain
__author__ = 'roland'
def list_eq(l1, l2):
return set(l1) == set(l2)
def test_result():
res = Result('R&S')
res.missing.append('mail')
assert len(res) == 1
_str = '{}'.format(res)
assert _str == "R&S: missing=['mail']"
res.missing.append("cn")
assert len(res) == 2
_str = '{}'.format(res)
assert _str == "R&S: missing=['mail', 'cn']"
res.extra.append('ou')
assert len(res) == 3
_str = '{}'.format(res)
assert _str == "R&S: missing=['mail', 'cn'], extra=['ou']"
def test_entity_category_test_result():
res = Result('R&S')
res.missing.append('mail')
res.extra.append('ou')
tr = EntityCategoryTestResult('test_id', 2, 'name', specifics=[res])
tr.message = "Non conformant"
assert tr.status == 2
_str = '{}'.format(tr)
assert _str == "test_id: status=WARNING, message=Non conformant\nR&S: " \
"missing=['mail'], extra=['ou']"
def test_entity_category_test_result_comb():
ec_attr_rs = refeds.RELEASE[refeds.RESEARCH_AND_SCHOLARSHIP]
ec_attr_rs.extend(refeds.RELEASE[''])
ec_attr_coco = edugain.RELEASE[edugain.COCO]
ec_attr_coco.extend(edugain.RELEASE[''])
ava = {
'eduPersonPrincipalName': 'foo@example.com',
'eduPersonTargetedID': 'foovar',
'location': 'earth'
}
requested_attributes = ['eduPersonPrincipalName',
'eduPersonScopedAffiliation',
'mail']
res_rs = verify_rs_compliance('R&S', ava, requested_attributes, ec_attr_rs)
assert list_eq(res_rs.missing, ['mail', 'displayName', 'givenName', 'sn'])
assert list_eq(res_rs.expected,
['eduPersonPrincipalName', 'eduPersonTargetedID'])
assert res_rs.extra == ['location']
res_coco = verify_coco_compliance('CoCo', ava, requested_attributes,
ec_attr_coco)
assert list_eq(res_coco.missing, ['eduPersonScopedAffiliation', 'mail'])
assert list_eq(res_coco.expected, ['eduPersonPrincipalName',
'eduPersonTargetedID'])
assert res_coco.extra == ['location']
res = res_rs.union(res_coco)
assert list_eq(res.missing, ['displayName', 'givenName',
'eduPersonScopedAffiliation', 'sn', 'mail'])
assert list_eq(res.expected,
['eduPersonPrincipalName', 'eduPersonTargetedID'])
assert res.extra == ['location']
|
identinetics/saml2test2
|
test/test_ec_compare.py
|
Python
|
bsd-2-clause
| 2,785
|
import contextlib
import imp
import os
import shutil
import subprocess
import sys
import tempfile
from unittest import skip
from ctypes import *
import numpy as np
import llvmlite.binding as ll
from numba.core import utils
from numba.pycc import main
from numba.pycc.decorators import clear_export_registry
from numba.pycc.platform import find_shared_ending, find_pyext_ending
from numba.pycc.platform import _external_compiler_ok
from numba.tests.support import TestCase, tag, import_dynamic, temp_directory, has_blas
import unittest
try:
import setuptools
except ImportError:
setuptools = None
# if suitable compilers are not present then skip.
_skip_reason = 'AOT compatible compilers missing'
_skip_missing_compilers = unittest.skipIf(not _external_compiler_ok,
_skip_reason)
_skip_reason = 'windows only'
_windows_only = unittest.skipIf(not sys.platform.startswith('win'),
_skip_reason)
base_path = os.path.dirname(os.path.abspath(__file__))
def unset_macosx_deployment_target():
"""Unset MACOSX_DEPLOYMENT_TARGET because we are not building portable
libraries
"""
if 'MACOSX_DEPLOYMENT_TARGET' in os.environ:
del os.environ['MACOSX_DEPLOYMENT_TARGET']
class TestCompilerChecks(TestCase):
# NOTE: THIS TEST MUST ALWAYS RUN ON WINDOWS, DO NOT SKIP
@_windows_only
def test_windows_compiler_validity(self):
# When inside conda-build VSINSTALLDIR should be set and windows should
# have a valid compiler available, `_external_compiler_ok` should agree
# with this. If this is not the case then error out to alert devs.
is_running_conda_build = os.environ.get('CONDA_BUILD', None) is not None
if is_running_conda_build:
if os.environ.get('VSINSTALLDIR', None) is not None:
self.assertTrue(_external_compiler_ok)
class BasePYCCTest(TestCase):
def setUp(self):
unset_macosx_deployment_target()
self.tmpdir = temp_directory('test_pycc')
# Make sure temporary files and directories created by
# distutils don't clutter the top-level /tmp
tempfile.tempdir = self.tmpdir
def tearDown(self):
tempfile.tempdir = None
# Since we're executing the module-under-test several times
# from the same process, we must clear the exports registry
# between invocations.
clear_export_registry()
@contextlib.contextmanager
def check_c_ext(self, extdir, name):
sys.path.append(extdir)
try:
lib = import_dynamic(name)
yield lib
finally:
sys.path.remove(extdir)
sys.modules.pop(name, None)
@_skip_missing_compilers
class TestLegacyAPI(BasePYCCTest):
def test_pycc_ctypes_lib(self):
"""
Test creating a C shared library object using pycc.
"""
source = os.path.join(base_path, 'compile_with_pycc.py')
cdll_modulename = 'test_dll_legacy' + find_shared_ending()
cdll_path = os.path.join(self.tmpdir, cdll_modulename)
if os.path.exists(cdll_path):
os.unlink(cdll_path)
main(args=['--debug', '-o', cdll_path, source])
lib = CDLL(cdll_path)
lib.mult.argtypes = [POINTER(c_double), c_void_p,
c_double, c_double]
lib.mult.restype = c_int
lib.multf.argtypes = [POINTER(c_float), c_void_p,
c_float, c_float]
lib.multf.restype = c_int
res = c_double()
lib.mult(byref(res), None, 123, 321)
self.assertEqual(res.value, 123 * 321)
res = c_float()
lib.multf(byref(res), None, 987, 321)
self.assertEqual(res.value, 987 * 321)
def test_pycc_pymodule(self):
"""
Test creating a CPython extension module using pycc.
"""
self.skipTest("lack of environment can make the extension crash")
source = os.path.join(base_path, 'compile_with_pycc.py')
modulename = 'test_pyext_legacy'
out_modulename = os.path.join(self.tmpdir,
modulename + find_pyext_ending())
if os.path.exists(out_modulename):
os.unlink(out_modulename)
main(args=['--debug', '--python', '-o', out_modulename, source])
with self.check_c_ext(self.tmpdir, modulename) as lib:
res = lib.multi(123, 321)
self.assertPreciseEqual(res, 123 * 321)
res = lib.multf(987, 321)
self.assertPreciseEqual(res, 987.0 * 321.0)
def test_pycc_bitcode(self):
"""
Test creating a LLVM bitcode file using pycc.
"""
modulename = os.path.join(base_path, 'compile_with_pycc')
bitcode_modulename = os.path.join(self.tmpdir, 'test_bitcode_legacy.bc')
if os.path.exists(bitcode_modulename):
os.unlink(bitcode_modulename)
main(args=['--debug', '--llvm', '-o', bitcode_modulename,
modulename + '.py'])
# Sanity check bitcode file contents
with open(bitcode_modulename, "rb") as f:
bc = f.read()
bitcode_wrapper_magic = b'\xde\xc0\x17\x0b'
bitcode_magic = b'BC\xc0\xde'
self.assertTrue(bc.startswith((bitcode_magic, bitcode_wrapper_magic)), bc)
@_skip_missing_compilers
class TestCC(BasePYCCTest):
def setUp(self):
super(TestCC, self).setUp()
from numba.tests import compile_with_pycc
self._test_module = compile_with_pycc
imp.reload(self._test_module)
@contextlib.contextmanager
def check_cc_compiled(self, cc):
#cc.verbose = True
cc.output_dir = self.tmpdir
cc.compile()
with self.check_c_ext(self.tmpdir, cc.name) as lib:
yield lib
def check_cc_compiled_in_subprocess(self, lib, code):
prolog = """if 1:
import sys
import types
# to disable numba package
sys.modules['numba'] = types.ModuleType('numba')
try:
from numba import njit
except ImportError:
pass
else:
raise RuntimeError('cannot disable numba package')
sys.path.insert(0, %(path)r)
import %(name)s as lib
""" % {'name': lib.__name__,
'path': os.path.dirname(lib.__file__)}
code = prolog.strip(' ') + code
subprocess.check_call([sys.executable, '-c', code])
def test_cc_properties(self):
cc = self._test_module.cc
self.assertEqual(cc.name, 'pycc_test_simple')
# Inferred output directory
d = self._test_module.cc.output_dir
self.assertTrue(os.path.isdir(d), d)
# Inferred output filename
f = self._test_module.cc.output_file
self.assertFalse(os.path.exists(f), f)
self.assertTrue(os.path.basename(f).startswith('pycc_test_simple.'), f)
if sys.platform.startswith('linux'):
self.assertTrue(f.endswith('.so'), f)
self.assertIn('.cpython', f)
def test_compile(self):
with self.check_cc_compiled(self._test_module.cc) as lib:
res = lib.multi(123, 321)
self.assertPreciseEqual(res, 123 * 321)
res = lib.multf(987, 321)
self.assertPreciseEqual(res, 987.0 * 321.0)
res = lib.square(5)
self.assertPreciseEqual(res, 25)
self.assertIs(lib.get_none(), None)
with self.assertRaises(ZeroDivisionError):
lib.div(1, 0)
def check_compile_for_cpu(self, cpu_name):
cc = self._test_module.cc
cc.target_cpu = cpu_name
with self.check_cc_compiled(cc) as lib:
res = lib.multi(123, 321)
self.assertPreciseEqual(res, 123 * 321)
self.assertEqual(lib.multi.__module__, 'pycc_test_simple')
def test_compile_for_cpu(self):
# Compiling for the host CPU should always succeed
self.check_compile_for_cpu(ll.get_host_cpu_name())
def test_compile_for_cpu_host(self):
# Compiling for the host CPU should always succeed
self.check_compile_for_cpu("host")
@unittest.skipIf(sys.platform == 'darwin' and
utils.PYVERSION == (3, 8),
'distutils incorrectly using gcc on python 3.8 builds')
def test_compile_helperlib(self):
with self.check_cc_compiled(self._test_module.cc_helperlib) as lib:
res = lib.power(2, 7)
self.assertPreciseEqual(res, 128)
for val in (-1, -1 + 0j, np.complex128(-1)):
res = lib.sqrt(val)
self.assertPreciseEqual(res, 1j)
for val in (4, 4.0, np.float64(4)):
res = lib.np_sqrt(val)
self.assertPreciseEqual(res, 2.0)
res = lib.spacing(1.0)
self.assertPreciseEqual(res, 2**-52)
# Implicit seeding at startup should guarantee a non-pathological
# start state.
self.assertNotEqual(lib.random(-1), lib.random(-1))
res = lib.random(42)
expected = np.random.RandomState(42).random_sample()
self.assertPreciseEqual(res, expected)
res = lib.size(np.float64([0] * 3))
self.assertPreciseEqual(res, 3)
code = """if 1:
from numpy.testing import assert_equal, assert_allclose
res = lib.power(2, 7)
assert res == 128
res = lib.random(42)
assert_allclose(res, %(expected)s)
res = lib.spacing(1.0)
assert_allclose(res, 2**-52)
""" % {'expected': expected}
self.check_cc_compiled_in_subprocess(lib, code)
def test_compile_nrt(self):
with self.check_cc_compiled(self._test_module.cc_nrt) as lib:
# Sanity check
self.assertPreciseEqual(lib.zero_scalar(1), 0.0)
res = lib.zeros(3)
self.assertEqual(list(res), [0, 0, 0])
if has_blas:
res = lib.vector_dot(4)
self.assertPreciseEqual(res, 30.0)
# test argsort
val = np.float64([2., 5., 1., 3., 4.])
res = lib.np_argsort(val)
expected = np.argsort(val)
self.assertPreciseEqual(res, expected)
code = """if 1:
from numpy.testing import assert_equal
from numpy import float64, argsort
res = lib.zero_scalar(1)
assert res == 0.0
res = lib.zeros(3)
assert list(res) == [0, 0, 0]
if %(has_blas)s:
res = lib.vector_dot(4)
assert res == 30.0
val = float64([2., 5., 1., 3., 4.])
res = lib.np_argsort(val)
expected = argsort(val)
assert_equal(res, expected)
""" % dict(has_blas=has_blas)
self.check_cc_compiled_in_subprocess(lib, code)
def test_hashing(self):
with self.check_cc_compiled(self._test_module.cc_nrt) as lib:
res = lib.hash_literal_str_A()
self.assertPreciseEqual(res, hash("A"))
res = lib.hash_str("A")
self.assertPreciseEqual(res, hash("A"))
code = """if 1:
from numpy.testing import assert_equal
res = lib.hash_literal_str_A()
assert_equal(res, hash("A"))
res = lib.hash_str("A")
assert_equal(res, hash("A"))
"""
self.check_cc_compiled_in_subprocess(lib, code)
def test_c_extension_usecase(self):
# Test C-extensions
with self.check_cc_compiled(self._test_module.cc_nrt) as lib:
arr = np.arange(128, dtype=np.intp)
got = lib.dict_usecase(arr)
expect = arr * arr
self.assertPreciseEqual(got, expect)
@_skip_missing_compilers
class TestDistutilsSupport(TestCase):
def setUp(self):
unset_macosx_deployment_target()
# Copy the test project into a temp directory to avoid
# keeping any build leftovers in the source tree
self.tmpdir = temp_directory('test_pycc_distutils')
source_dir = os.path.join(base_path, 'pycc_distutils_usecase')
self.usecase_dir = os.path.join(self.tmpdir, 'work')
shutil.copytree(source_dir, self.usecase_dir)
def check_setup_py(self, setup_py_file):
# Compute PYTHONPATH to ensure the child processes see this Numba
import numba
numba_path = os.path.abspath(os.path.dirname(
os.path.dirname(numba.__file__)))
env = dict(os.environ)
if env.get('PYTHONPATH', ''):
env['PYTHONPATH'] = numba_path + os.pathsep + env['PYTHONPATH']
else:
env['PYTHONPATH'] = numba_path
def run_python(args):
p = subprocess.Popen([sys.executable] + args,
cwd=self.usecase_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
out, _ = p.communicate()
rc = p.wait()
if rc != 0:
self.fail("python failed with the following output:\n%s"
% out.decode('utf-8', 'ignore'))
run_python([setup_py_file, "build_ext", "--inplace"])
code = """if 1:
import pycc_compiled_module as lib
assert lib.get_const() == 42
res = lib.ones(3)
assert list(res) == [1.0, 1.0, 1.0]
"""
run_python(["-c", code])
def check_setup_nested_py(self, setup_py_file):
# Compute PYTHONPATH to ensure the child processes see this Numba
import numba
numba_path = os.path.abspath(os.path.dirname(
os.path.dirname(numba.__file__)))
env = dict(os.environ)
if env.get('PYTHONPATH', ''):
env['PYTHONPATH'] = numba_path + os.pathsep + env['PYTHONPATH']
else:
env['PYTHONPATH'] = numba_path
def run_python(args):
p = subprocess.Popen([sys.executable] + args,
cwd=self.usecase_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
out, _ = p.communicate()
rc = p.wait()
if rc != 0:
self.fail("python failed with the following output:\n%s"
% out.decode('utf-8', 'ignore'))
run_python([setup_py_file, "build_ext", "--inplace"])
code = """if 1:
import nested.pycc_compiled_module as lib
assert lib.get_const() == 42
res = lib.ones(3)
assert list(res) == [1.0, 1.0, 1.0]
"""
run_python(["-c", code])
def test_setup_py_distutils(self):
self.check_setup_py("setup_distutils.py")
def test_setup_py_distutils_nested(self):
self.check_setup_nested_py("setup_distutils_nested.py")
@unittest.skipIf(setuptools is None, "test needs setuptools")
def test_setup_py_setuptools(self):
self.check_setup_py("setup_setuptools.py")
@unittest.skipIf(setuptools is None, "test needs setuptools")
def test_setup_py_setuptools_nested(self):
self.check_setup_nested_py("setup_setuptools_nested.py")
if __name__ == "__main__":
unittest.main()
|
stonebig/numba
|
numba/tests/test_pycc.py
|
Python
|
bsd-2-clause
| 15,763
|
# -*- coding: utf-8 -*-
"""
==============================================================================
MWNT structure class (:mod:`sknano.structures._mwnt`)
==============================================================================
.. currentmodule:: sknano.structures._mwnt
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
__docformat__ = 'restructuredtext en'
import numpy as np
# from sknano.core.crystallography import Crystal3DLattice, UnitCell
from sknano.core.refdata import aCC, element_data
from ._base import NanoStructureBase, r_CC_vdw
from ._swnt import SWNT, compute_dt # , compute_T
from ._extras import generate_Ch_list
__all__ = ['MWNTMixin', 'MWNT']
class MWNTMixin:
"""Mixin class for MWNTs."""
@property
def Ch_list(self):
return self._Ch_list
@Ch_list.setter
def Ch_list(self, value):
if not isinstance(value, list):
raise TypeError('Expected a list')
self._Ch_list = value[:]
@property
def chiral_types(self):
"""List of chiral types for each `MWNT` wall."""
return [swnt.chiral_type for swnt in self.walls]
@chiral_types.setter
def chiral_types(self, value):
if not isinstance(value, list):
raise TypeError('Expected a list')
self.update_Ch_list(chiral_types=value)
@property
def chiral_set(self):
"""Set of all chiral types in `MWNT`."""
return set(self.chiral_types)
@property
def dt(self):
"""`MWNT` wall diameters :math:`d_t=\\frac{|\\mathbf{C}_h|}{\\pi}` \
in \u212b."""
return self.walls[-1].dt
@property
def rt(self):
"""`MWNT` wall radii :math:`r_t=\\frac{|\\mathbf{C}_h|}{2\\pi}` \
in \u212b."""
return self.walls[-1].rt
@property
def Natoms(self):
"""Number of atoms in `MWNT`.
**Returns total number of atoms in `MWNT`.**
Use :attr:`~MWNT.Natoms_per_wall` to get a list of the number of
atoms in each `MWNT` wall.
.. math::
N_{\\mathrm{atoms}} = \\sum_{\\mathrm{walls}}
"""
return np.asarray(self.Natoms_per_wall).sum()
@property
def Natoms_per_tube(self):
"""Number of atoms in `MWNT`."""
return self.Natoms
@property
def Ntubes(self):
"""Number of `MWNT`\ s."""
return 1
@property
def Nwalls(self):
"""Number of `MWNT` walls."""
return len(self.Ch_list)
@Nwalls.setter
def Nwalls(self, value):
self.update_Ch_list(Nwalls=value)
@property
def min_wall_diameter(self):
return self._min_wall_diameter
@min_wall_diameter.setter
def min_wall_diameter(self, value):
self._min_wall_diameter = value
self.update_Ch_list()
@property
def max_wall_diameter(self):
return self._max_wall_diameter
@max_wall_diameter.setter
def max_wall_diameter(self, value):
self._max_wall_diameter = value
self.update_Ch_list()
@property
def max_walls(self):
return self._max_walls
@max_walls.setter
def max_walls(self, value):
self._max_walls = value
@property
def wall_spacing(self):
return self._wall_spacing
@wall_spacing.setter
def wall_spacing(self, value):
self._wall_spacing = value
self.update_Ch_list()
@property
def tube_mass(self):
"""MWNT mass in **grams**."""
return np.asarray([swnt.tube_mass for swnt in self.walls]).sum()
# @property
# def Lz(self):
# return self._Lz
# @Lz.setter
# def Lz(self, value):
# self._Lz = value
@property
def Natoms_per_wall(self):
"""Alias for :attr:`MWNT.Natoms_list`"""
return self.Natoms_list
@property
def Natoms_list(self):
"""List of `MWNT` `SWNT` wall's number of atoms \
:attr:`~SWNT.Natoms`."""
return [swnt.Natoms for swnt in self.walls]
@property
def nz_list(self):
"""Number of nanotube unit cells along the :math:`z`-axis."""
return [swnt.nz for swnt in self.walls]
@property
def Lz_list(self):
"""MWNT length :math:`L_z = L_{\\mathrm{tube}}` in **nanometers**."""
return [swnt.Lz for swnt in self.walls]
@property
def T_list(self):
"""Length of `MWNT` unit cell :math:`|\\mathbf{T}|` in \u212b."""
return [swnt.T for swnt in self.walls]
@property
def dt_list(self):
"""List of `MWNT` `SWNT` wall diameters :attr:`~SWNT.dt` \
:math:`d_t=\\frac{|\\mathbf{C}_h|}{\\pi}` in \u212b."""
return [swnt.dt for swnt in self.walls]
@property
def rt_list(self):
"""List of `MWNT` `SWNT` wall radii :attr:`~SWNT.rt` \
:math:`r_t=\\frac{|\\mathbf{C}_h|}{2\\pi}` in \u212b."""
return [swnt.rt for swnt in self.walls]
@property
def wall_diameters(self):
"""Alias for :attr:`MWNTMixin.dt_list`."""
return self.dt_list
@property
def wall_radii(self):
"""Alias for :attr:`MWNTMixin.rt_list`."""
return self.rt_list
@property
def walls(self):
"""List of `MWNT` `SWNT` wall structures."""
return [SWNT(Ch, Lz=self.Lz, fix_Lz=True, basis=self.basis,
bond=self.bond) for Ch in self.Ch_list]
def get_wall(self, Ch):
"""Return the :class:`~sknano.structures.SWNT` structure with \
chirality `Ch`.
"""
return SWNT(Ch, Lz=self.Lz, fix_Lz=True, basis=self.basis,
bond=self.bond) if Ch in self.Ch_list else None
def generate_dt_mask(self, dt, max_dt_diff=0.5):
"""Generate boolean mask array.
Parameters
----------
dt : float
max_dt_diff : float, optional
Returns
-------
dt_mask : :class:`~numpy:numpy.ndarray`
"""
dt_mask = np.abs(self._dt_pool - dt) <= max_dt_diff
while not np.any(dt_mask):
max_dt_diff += max_dt_diff
dt_mask = np.abs(self._dt_pool - dt) <= max_dt_diff
return dt_mask
def generate_Ch_list(self, Nwalls=None, max_walls=None,
min_wall_diameter=None, max_wall_diameter=None,
chiral_types=None, wall_spacing=None):
if Nwalls is not None:
max_walls = Nwalls
if max_walls is None:
max_walls = 10
if max_wall_diameter is None:
max_wall_diameter = np.inf
if min_wall_diameter is None:
min_wall_diameter = 5.0
if wall_spacing is None:
wall_spacing = 2 * element_data['C']['VanDerWaalsRadius']
delta_dt = 2 * wall_spacing
imax = 100
self._Ch_pool = \
np.asarray(generate_Ch_list(imax=imax,
chiral_types=chiral_types))
self._dt_pool = np.asarray([compute_dt(_Ch, bond=self.bond) for _Ch
in self._Ch_pool])
dt_mask = np.logical_and(self._dt_pool >= min_wall_diameter,
self._dt_pool <= max_wall_diameter)
self._Ch_pool = self._Ch_pool[dt_mask]
self._dt_pool = self._dt_pool[dt_mask]
if max_wall_diameter < np.inf:
dt_list = []
dt = self._dt_pool.min()
while dt <= max_wall_diameter and len(dt_list) < max_walls:
dt_list.append(dt)
dt += delta_dt
else:
dt_list = [self._dt_pool.min() + i * delta_dt
for i in range(max_walls)]
dt_masks = [self.generate_dt_mask(_dt) for _dt in dt_list]
return [tuple(self._Ch_pool[_mask][np.random.choice(
list(range(len(self._Ch_pool[_mask]))))].tolist())
for _mask in dt_masks]
def update_Ch_list(self, Nwalls=None, min_wall_diameter=None,
max_wall_diameter=None, wall_spacing=None,
chiral_types=None):
if Nwalls is None:
Nwalls = self.Nwalls
if min_wall_diameter is None:
min_wall_diameter = self.min_wall_diameter
if max_wall_diameter is None:
max_wall_diameter = self.max_wall_diameter
if wall_spacing is None:
wall_spacing = self.wall_spacing
self.Ch_list = \
self.generate_Ch_list(Nwalls=Nwalls,
min_wall_diameter=min_wall_diameter,
max_wall_diameter=max_wall_diameter,
chiral_types=chiral_types,
wall_spacing=wall_spacing)
class MWNT(MWNTMixin, NanoStructureBase):
"""MWNT structure class.
Parameters
----------
Ch_list : :class:`python:list`, optional
(:attr:`~SWNT.n`, :attr:`~SWNT.m`) for each `SWNT` wall in `MWNT`.
Nwalls : int, optional
Number of `SWNT` walls in `MWNT`.
Lz : float, optional
`MWNT` length in **nanometers**.
min_wall_diameter : float, optional
Minimum `MWNT` wall diameter, in units of **Angstroms**.
max_wall_diameter : float, optional
Maximum `MWNT` wall diameter, in units of **Angstroms**.
max_walls : int, optional
Maximum number of `MWNT` walls.
chiral_types : {None, 'armchair', 'zigzag', 'achiral', 'chiral'}, optional
If `None`, the :attr:`~SWNT.chiral_type` of each `MWNT` walls
will be random and determined by the set of randomly selected
chiral indices (:attr:`~SWNT.n`, :attr:`~SWNT.m`).
wall_spacing : float, optional
Inter-wall spacing in units of **Angstroms**.
Default value is the van der Waals interaction distance of 3.35
Angstroms.
basis : {:class:`python:list`}, optional
List of :class:`python:str`\ s of element symbols or atomic number
of the two atom basis (default: ['C', 'C'])
.. versionadded:: 0.3.10
element1, element2 : {str, int}, optional
Element symbol or atomic number of basis
:class:`~sknano.core.Atom` 1 and 2
.. deprecated:: 0.3.10
Use `basis` instead
bond : float, optional
:math:`\\mathrm{a}_{\\mathrm{CC}} =` distance between
nearest neighbor atoms, in units of **Angstroms**.
verbose : bool, optional
if `True`, show verbose output
Examples
--------
>>> from sknano.generators import MWNT
"""
def __init__(self, Ch_list=None, Nwalls=None, Lz=None,
min_wall_diameter=None, max_wall_diameter=None,
max_walls=None, chiral_types=None, wall_spacing=2 * r_CC_vdw,
basis=['C', 'C'], bond=aCC, **kwargs):
if Ch_list is None and 'Ch' in kwargs:
Ch_list = kwargs['Ch']
del kwargs['Ch']
super().__init__(basis=basis, bond=bond, **kwargs)
if Ch_list is None or not isinstance(Ch_list, list):
Ch_list = \
self.generate_Ch_list(Nwalls=Nwalls, max_walls=max_walls,
min_wall_diameter=min_wall_diameter,
max_wall_diameter=max_wall_diameter,
chiral_types=chiral_types,
wall_spacing=wall_spacing)
self.Ch_list = Ch_list[:]
self._min_wall_diameter = min_wall_diameter
self._max_wall_diameter = max_wall_diameter
self._max_walls = max_walls
self._wall_spacing = wall_spacing
if Lz is None:
Lz = 1.0
self.Lz = Lz
self.unit_cell = self.get_wall(self.Ch_list[-1]).unit_cell
if self.verbose:
print(self.walls)
self.fmtstr = "Ch_list={Ch_list!r}, Lz={Lz!r}, bond={bond!r}, " + \
"basis={basis!r}, min_wall_diameter={min_wall_diameter!r}, " + \
"max_wall_diameter={max_wall_diameter!r}, " + \
"max_walls={max_walls!r}, chiral_types={chiral_types!r}, " + \
"wall_spacing={wall_spacing!r}"
def todict(self):
"""Return :class:`~python:dict` of `MWNT` attributes."""
return dict(Ch_list=self.Ch_list, Lz=self.Lz,
basis=self.basis, bond=self.bond,
min_wall_diameter=self.min_wall_diameter,
max_wall_diameter=self.max_wall_diameter,
max_walls=self.max_walls,
chiral_types=self.chiral_types,
wall_spacing=self.wall_spacing)
|
androomerrill/scikit-nano
|
sknano/structures/_mwnt.py
|
Python
|
bsd-2-clause
| 12,635
|
#
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.codec.der import encoder as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc5208
class PrivateKeyInfoTestCase(unittest.TestCase):
pem_text = """\
MIIBVgIBADANBgkqhkiG9w0BAQEFAASCAUAwggE8AgEAAkEAx8CO8E0MNgEKXXDf
I1xqBmQ+Gp3Srkqp45OApIu4lZ97n5VJ5HljU9wXcPIfx29Le3w8hCPEkugpLsdV
GWx+EQIDAQABAkEAiv3f+DGEh6ddsPszKQXK+LuTwy2CRajKYgJnBxf5zpG50XK4
899An+x/pGYVmVED1f0JCbk3BUbv7HViLq0qgQIhAOYlQJaQ8KJBijDpjF62lcVr
QrqFPM4+ZrHsw0dVY2CZAiEA3jE5ngkVPfjFWEr7wS50EJhGiYlQeY4l+hADGIhd
XDkCIQDIHt5xzmif/nOGop5/gS7ssp8ch1zfTh2IW4NWlOZMCQIgLZmYo5BlpaRK
jAZHiKwJ8eXuhAeEVo4PyTREDmLeFjECIQCfyUPDclPo2O8ycPpozwoGwvKFrNZJ
VWRpRKqYnOAIXQ==
"""
def setUp(self):
self.asn1Spec = rfc5208.PrivateKeyInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder.decode(
substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder.encode(asn1Object))
class EncryptedPrivateKeyInfoInfoTestCase(unittest.TestCase):
pem_text = """\
MIIBgTAbBgkqhkiG9w0BBQMwDgQIdtFgDWnipT8CAggABIIBYN0hkm2xqkTCt8dJ
iZS8+HNiyHxy8g+rmWSXv/i+bTHFUReZA2GINtTRUkWpXqWcSHxNslgf7QdfgbVJ
xQiUM+lLhwOFh85iAHR3xmPU1wfN9NvY9DiLSpM0DMhF3OvAMZD75zIhA0GSKu7w
dUu7ey7H4fv7bez6RhEyLdKw9/Lf2KNStNOs4ow9CAtCoxeoMSniTt6CNhbvCkve
9vNHKiGavX1tS/YTog4wiiGzh2YxuW1RiQpTdhWiKyECgD8qQVg2tY5t3QRcXrzi
OkStpkiAPAbiwS/gyHpsqiLo0al63SCxRefugbn1ucZyc5Ya59e3xNFQXCNhYl+Z
Hl3hIl3cssdWZkJ455Z/bBE29ks1HtsL+bTfFi+kw/4yuMzoaB8C7rXScpGNI/8E
pvTU2+wtuoOFcttJregtR94ZHu5wgdYqRydmFNG8PnvZT1mRMmQgUe/vp88FMmsZ
dLsZjNQ=
"""
def setUp(self):
self.asn1Spec = rfc5208.EncryptedPrivateKeyInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder.decode(
substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder.encode(asn1Object))
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
|
etingof/pyasn1-modules
|
tests/test_rfc5208.py
|
Python
|
bsd-2-clause
| 2,549
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASE_ENGINE='sqlite3',
INSTALLED_APPS=[
'django.contrib.admin', 'django.contrib.sessions', 'django.contrib.contenttypes',
'linkcheck', 'linkcheck.tests.sampleapp',
],
ROOT_URLCONF = "",
SITE_DOMAIN = "localhost"
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['linkcheck']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
yvess/django-linkcheck
|
runtests.py
|
Python
|
bsd-3-clause
| 788
|