code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
#!/usr/bin/env python ############################################################################# ## ## Copyright (C) 2013 Riverbank Computing Limited. ## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). ## All rights reserved. ## ## This file is part of the examples of PyQt. ## ## $QT_BEGIN_LICENSE:BSD$ ## You may use this file under the terms of the BSD license as follows: ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are ## met: ## * Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## * Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in ## the documentation and/or other materials provided with the ## distribution. ## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor ## the names of its contributors may be used to endorse or promote ## products derived from this software without specific prior written ## permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## $QT_END_LICENSE$ ## ############################################################################# from PyQt5.QtCore import (QFile, QFileInfo, QPoint, QSettings, QSignalMapper, QSize, QTextStream, Qt) from PyQt5.QtGui import QIcon, QKeySequence from PyQt5.QtWidgets import (QAction, QApplication, QFileDialog, QMainWindow, QMdiArea, QMessageBox, QTextEdit, QWidget) import mdi_rc class MdiChild(QTextEdit): sequenceNumber = 1 def __init__(self): super(MdiChild, self).__init__() self.setAttribute(Qt.WA_DeleteOnClose) self.isUntitled = True def newFile(self): self.isUntitled = True self.curFile = "document%d.txt" % MdiChild.sequenceNumber MdiChild.sequenceNumber += 1 self.setWindowTitle(self.curFile + '[*]') self.document().contentsChanged.connect(self.documentWasModified) def loadFile(self, fileName): file = QFile(fileName) if not file.open(QFile.ReadOnly | QFile.Text): QMessageBox.warning(self, "MDI", "Cannot read file %s:\n%s." % (fileName, file.errorString())) return False instr = QTextStream(file) QApplication.setOverrideCursor(Qt.WaitCursor) self.setPlainText(instr.readAll()) QApplication.restoreOverrideCursor() self.setCurrentFile(fileName) self.document().contentsChanged.connect(self.documentWasModified) return True def save(self): if self.isUntitled: return self.saveAs() else: return self.saveFile(self.curFile) def saveAs(self): fileName, _ = QFileDialog.getSaveFileName(self, "Save As", self.curFile) if not fileName: return False return self.saveFile(fileName) def saveFile(self, fileName): file = QFile(fileName) if not file.open(QFile.WriteOnly | QFile.Text): QMessageBox.warning(self, "MDI", "Cannot write file %s:\n%s." % (fileName, file.errorString())) return False outstr = QTextStream(file) QApplication.setOverrideCursor(Qt.WaitCursor) outstr << self.toPlainText() QApplication.restoreOverrideCursor() self.setCurrentFile(fileName) return True def userFriendlyCurrentFile(self): return self.strippedName(self.curFile) def currentFile(self): return self.curFile def closeEvent(self, event): if self.maybeSave(): event.accept() else: event.ignore() def documentWasModified(self): self.setWindowModified(self.document().isModified()) def maybeSave(self): if self.document().isModified(): ret = QMessageBox.warning(self, "MDI", "'%s' has been modified.\nDo you want to save your " "changes?" % self.userFriendlyCurrentFile(), QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel) if ret == QMessageBox.Save: return self.save() if ret == QMessageBox.Cancel: return False return True def setCurrentFile(self, fileName): self.curFile = QFileInfo(fileName).canonicalFilePath() self.isUntitled = False self.document().setModified(False) self.setWindowModified(False) self.setWindowTitle(self.userFriendlyCurrentFile() + "[*]") def strippedName(self, fullFileName): return QFileInfo(fullFileName).fileName() class MainWindow(QMainWindow): def __init__(self): super(MainWindow, self).__init__() self.mdiArea = QMdiArea() self.mdiArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded) self.mdiArea.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded) self.setCentralWidget(self.mdiArea) self.mdiArea.subWindowActivated.connect(self.updateMenus) self.windowMapper = QSignalMapper(self) self.windowMapper.mapped[QWidget].connect(self.setActiveSubWindow) self.createActions() self.createMenus() self.createToolBars() self.createStatusBar() self.updateMenus() self.readSettings() self.setWindowTitle("MDI") def closeEvent(self, event): self.mdiArea.closeAllSubWindows() if self.mdiArea.currentSubWindow(): event.ignore() else: self.writeSettings() event.accept() def newFile(self): child = self.createMdiChild() child.newFile() child.show() def open(self): fileName, _ = QFileDialog.getOpenFileName(self) if fileName: existing = self.findMdiChild(fileName) if existing: self.mdiArea.setActiveSubWindow(existing) return child = self.createMdiChild() if child.loadFile(fileName): self.statusBar().showMessage("File loaded", 2000) child.show() else: child.close() def save(self): if self.activeMdiChild() and self.activeMdiChild().save(): self.statusBar().showMessage("File saved", 2000) def saveAs(self): if self.activeMdiChild() and self.activeMdiChild().saveAs(): self.statusBar().showMessage("File saved", 2000) def cut(self): if self.activeMdiChild(): self.activeMdiChild().cut() def copy(self): if self.activeMdiChild(): self.activeMdiChild().copy() def paste(self): if self.activeMdiChild(): self.activeMdiChild().paste() def about(self): QMessageBox.about(self, "About MDI", "The <b>MDI</b> example demonstrates how to write multiple " "document interface applications using Qt.") def updateMenus(self): hasMdiChild = (self.activeMdiChild() is not None) self.saveAct.setEnabled(hasMdiChild) self.saveAsAct.setEnabled(hasMdiChild) self.pasteAct.setEnabled(hasMdiChild) self.closeAct.setEnabled(hasMdiChild) self.closeAllAct.setEnabled(hasMdiChild) self.tileAct.setEnabled(hasMdiChild) self.cascadeAct.setEnabled(hasMdiChild) self.nextAct.setEnabled(hasMdiChild) self.previousAct.setEnabled(hasMdiChild) self.separatorAct.setVisible(hasMdiChild) hasSelection = (self.activeMdiChild() is not None and self.activeMdiChild().textCursor().hasSelection()) self.cutAct.setEnabled(hasSelection) self.copyAct.setEnabled(hasSelection) def updateWindowMenu(self): self.windowMenu.clear() self.windowMenu.addAction(self.closeAct) self.windowMenu.addAction(self.closeAllAct) self.windowMenu.addSeparator() self.windowMenu.addAction(self.tileAct) self.windowMenu.addAction(self.cascadeAct) self.windowMenu.addSeparator() self.windowMenu.addAction(self.nextAct) self.windowMenu.addAction(self.previousAct) self.windowMenu.addAction(self.separatorAct) windows = self.mdiArea.subWindowList() self.separatorAct.setVisible(len(windows) != 0) for i, window in enumerate(windows): child = window.widget() text = "%d %s" % (i + 1, child.userFriendlyCurrentFile()) if i < 9: text = '&' + text action = self.windowMenu.addAction(text) action.setCheckable(True) action.setChecked(child is self.activeMdiChild()) action.triggered.connect(self.windowMapper.map) self.windowMapper.setMapping(action, window) def createMdiChild(self): child = MdiChild() self.mdiArea.addSubWindow(child) child.copyAvailable.connect(self.cutAct.setEnabled) child.copyAvailable.connect(self.copyAct.setEnabled) return child def createActions(self): self.newAct = QAction(QIcon(':/images/new.png'), "&New", self, shortcut=QKeySequence.New, statusTip="Create a new file", triggered=self.newFile) self.openAct = QAction(QIcon(':/images/open.png'), "&Open...", self, shortcut=QKeySequence.Open, statusTip="Open an existing file", triggered=self.open) self.saveAct = QAction(QIcon(':/images/save.png'), "&Save", self, shortcut=QKeySequence.Save, statusTip="Save the document to disk", triggered=self.save) self.saveAsAct = QAction("Save &As...", self, shortcut=QKeySequence.SaveAs, statusTip="Save the document under a new name", triggered=self.saveAs) self.exitAct = QAction("E&xit", self, shortcut=QKeySequence.Quit, statusTip="Exit the application", triggered=QApplication.instance().closeAllWindows) self.cutAct = QAction(QIcon(':/images/cut.png'), "Cu&t", self, shortcut=QKeySequence.Cut, statusTip="Cut the current selection's contents to the clipboard", triggered=self.cut) self.copyAct = QAction(QIcon(':/images/copy.png'), "&Copy", self, shortcut=QKeySequence.Copy, statusTip="Copy the current selection's contents to the clipboard", triggered=self.copy) self.pasteAct = QAction(QIcon(':/images/paste.png'), "&Paste", self, shortcut=QKeySequence.Paste, statusTip="Paste the clipboard's contents into the current selection", triggered=self.paste) self.closeAct = QAction("Cl&ose", self, statusTip="Close the active window", triggered=self.mdiArea.closeActiveSubWindow) self.closeAllAct = QAction("Close &All", self, statusTip="Close all the windows", triggered=self.mdiArea.closeAllSubWindows) self.tileAct = QAction("&Tile", self, statusTip="Tile the windows", triggered=self.mdiArea.tileSubWindows) self.cascadeAct = QAction("&Cascade", self, statusTip="Cascade the windows", triggered=self.mdiArea.cascadeSubWindows) self.nextAct = QAction("Ne&xt", self, shortcut=QKeySequence.NextChild, statusTip="Move the focus to the next window", triggered=self.mdiArea.activateNextSubWindow) self.previousAct = QAction("Pre&vious", self, shortcut=QKeySequence.PreviousChild, statusTip="Move the focus to the previous window", triggered=self.mdiArea.activatePreviousSubWindow) self.separatorAct = QAction(self) self.separatorAct.setSeparator(True) self.aboutAct = QAction("&About", self, statusTip="Show the application's About box", triggered=self.about) self.aboutQtAct = QAction("About &Qt", self, statusTip="Show the Qt library's About box", triggered=QApplication.instance().aboutQt) def createMenus(self): self.fileMenu = self.menuBar().addMenu("&File") self.fileMenu.addAction(self.newAct) self.fileMenu.addAction(self.openAct) self.fileMenu.addAction(self.saveAct) self.fileMenu.addAction(self.saveAsAct) self.fileMenu.addSeparator() action = self.fileMenu.addAction("Switch layout direction") action.triggered.connect(self.switchLayoutDirection) self.fileMenu.addAction(self.exitAct) self.editMenu = self.menuBar().addMenu("&Edit") self.editMenu.addAction(self.cutAct) self.editMenu.addAction(self.copyAct) self.editMenu.addAction(self.pasteAct) self.windowMenu = self.menuBar().addMenu("&Window") self.updateWindowMenu() self.windowMenu.aboutToShow.connect(self.updateWindowMenu) self.menuBar().addSeparator() self.helpMenu = self.menuBar().addMenu("&Help") self.helpMenu.addAction(self.aboutAct) self.helpMenu.addAction(self.aboutQtAct) def createToolBars(self): self.fileToolBar = self.addToolBar("File") self.fileToolBar.addAction(self.newAct) self.fileToolBar.addAction(self.openAct) self.fileToolBar.addAction(self.saveAct) self.editToolBar = self.addToolBar("Edit") self.editToolBar.addAction(self.cutAct) self.editToolBar.addAction(self.copyAct) self.editToolBar.addAction(self.pasteAct) def createStatusBar(self): self.statusBar().showMessage("Ready") def readSettings(self): settings = QSettings('Trolltech', 'MDI Example') pos = settings.value('pos', QPoint(200, 200)) size = settings.value('size', QSize(400, 400)) self.move(pos) self.resize(size) def writeSettings(self): settings = QSettings('Trolltech', 'MDI Example') settings.setValue('pos', self.pos()) settings.setValue('size', self.size()) def activeMdiChild(self): activeSubWindow = self.mdiArea.activeSubWindow() if activeSubWindow: return activeSubWindow.widget() return None def findMdiChild(self, fileName): canonicalFilePath = QFileInfo(fileName).canonicalFilePath() for window in self.mdiArea.subWindowList(): if window.widget().currentFile() == canonicalFilePath: return window return None def switchLayoutDirection(self): if self.layoutDirection() == Qt.LeftToRight: QApplication.setLayoutDirection(Qt.RightToLeft) else: QApplication.setLayoutDirection(Qt.LeftToRight) def setActiveSubWindow(self, window): if window: self.mdiArea.setActiveSubWindow(window) if __name__ == '__main__': import sys app = QApplication(sys.argv) mainWin = MainWindow() mainWin.show() sys.exit(app.exec_())
baoboa/pyqt5
examples/mainwindows/mdi/mdi.py
Python
gpl-3.0
16,057
from PyQt4.QtCore import Qt from PyQt4.QtGui import QFrame, QLabel, QVBoxLayout, QHBoxLayout, QScrollArea, QWidget, QGridLayout from ert_gui.ertwidgets.models.ertsummary import ErtSummary class SummaryTemplate(object): def __init__(self, title): super(SummaryTemplate, self).__init__() self.text = "" self.__finished = False self.startGroup(title) def startGroup(self, title): if not self.__finished: style = "display: inline-block; width: 150px; vertical-align: top; float: left" self.text += "<div style=\"%s\">\n" % style self.addTitle(title) def addTitle(self, title): if not self.__finished: style = "font-size: 16px; font-weight: bold; font-variant: small-caps;" self.text += "<div style=\"%s\">%s</div>" % (style, title) def addRow(self, value): if not self.__finished: style = "text-indent: 5px;" self.text += "<div style=\"%s\">%s</div>" % (style, value) def endGroup(self): if not self.__finished: self.text += "</div></br>\n" def getText(self): if not self.__finished: self.__finished = True self.endGroup() return "<html>%s</html>" % self.text class SummaryPanel(QFrame): def __init__(self, parent=None): QFrame.__init__(self, parent) self.setMinimumWidth(250) self.setMinimumHeight(150) widget = QWidget() self.layout = QHBoxLayout() widget.setLayout(self.layout) scroll = QScrollArea() scroll.setWidgetResizable(True) scroll.setWidget(widget) layout = QGridLayout() layout.addWidget(scroll) self.setLayout(layout) self.updateSummary() def updateSummary(self): summary = ErtSummary() text = SummaryTemplate("Forward Model") for job in summary.getForwardModels(): text.addRow(job) self.addColumn(text.getText()) text = SummaryTemplate("Parameters") for parameters in summary.getParameters(): text.addRow(parameters) self.addColumn(text.getText()) text = SummaryTemplate("Observations") for observations in summary.getObservations(): text.addRow(observations) self.addColumn(text.getText()) def addColumn(self, text): layout = QVBoxLayout() text_widget = QLabel(text) text_widget.setWordWrap(True) text_widget.setTextFormat(Qt.RichText) layout.addWidget(text_widget) layout.addStretch(1) self.layout.addLayout(layout)
arielalmendral/ert
python/python/ert_gui/ertwidgets/summarypanel.py
Python
gpl-3.0
2,674
#!/usr/bin/env python #-*- coding:utf-8 -*- """ This file is part of OpenSesame. OpenSesame is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OpenSesame is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenSesame. If not, see <http://www.gnu.org/licenses/>. """ import os import re import unittest from libopensesame.py3compat import * NO_PUNCTUATION_CHECK = [ u'zh_CN', u'ja_JP' ] class check_translations(unittest.TestCase): def check_punctuation(self, path): for locale in NO_PUNCTUATION_CHECK: if path.endswith(locale + u'.ts'): return False return True def extractWildcards(self, s): return re.findall('(?<!%)%(\(\w+\)){0,1}([dfs]){1}', s) def validateTranslation(self, original, translation, punctuation=True): print(u'"%s" -> "%s"' % (safe_decode(original), safe_decode(translation))) self.assertEqual( self.extractWildcards(original), self.extractWildcards(translation)) self.assertEqual(original[-1] == u' ', translation[-1] == u' ') self.assertEqual(original[-1] == u'\n', translation[-1] == u'\n') self.assertEqual(original[:1] == u' ', translation[:1] == u' ') if not punctuation: return if not original.endswith(u'nr.'): self.assertEqual(original[-1] == u'.', translation[-1] == u'.') self.assertEqual(original[-1] == u'?', translation[-1] == u'?') self.assertEqual(original[-1] == u'…', translation[-1] == u'…') self.assertEqual(original[-1] == u':', translation[-1] == u':') def checkMarkdown(self, dirname): for basename in os.listdir(dirname): if basename == u'locale': continue path = os.path.join(dirname, basename) if os.path.isdir(path): self.checkMarkdown(path) continue if not path.endswith(u'.md'): continue print(path) localedirname = os.path.join(dirname, u'locale') if not os.path.exists(localedirname): print(u'-> no translations') continue for locale in os.listdir(localedirname): localepath = os.path.join(localedirname, locale, basename) if not os.path.exists(localepath): continue print(u'-> '+localepath) with open(path) as fd: original = safe_decode(fd.read()) with open(localepath) as fd: translation = safe_decode(fd.read()) self.validateTranslation(original, translation, self.check_punctuation(localepath)) def checkXML(self, path): print(path) import xml.etree.ElementTree as ET tree = ET.parse(path) root = tree.getroot() for context in root: for message in context: if message.tag != u'message': continue _translation = message.find(u'translation') if _translation.attrib.get(u'type', u'?') == u'obsolete': continue original = message.find(u'source').text translation = _translation.text if translation is None: continue self.validateTranslation(original, translation, self.check_punctuation(path)) def checkTs(self, dirname): for basename in os.listdir(dirname): if not basename.endswith(u'.ts'): continue path = os.path.join(dirname, basename) print(path) self.checkXML(path) def runTest(self): self.checkMarkdown(u'opensesame_resources') self.checkMarkdown(u'opensesame_plugins') self.checkMarkdown(u'opensesame_extensions') self.checkTs(u'opensesame_resources/ts') if __name__ == '__main__': unittest.main()
eort/OpenSesame
opensesame_unittest/translations.py
Python
gpl-3.0
3,743
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com> # # Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact # as a reference and starting point. # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: maven_artifact short_description: Downloads an Artifact from a Maven Repository version_added: "2.0" description: - Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve - snapshots or release versions of the artifact and will resolve the latest available version if one is not - available. author: "Chris Schmidt (@chrisisbeef)" requirements: - "python >= 2.6" - lxml - boto if using a S3 repository (s3://...) options: group_id: description: - The Maven groupId coordinate required: true artifact_id: description: - The maven artifactId coordinate required: true version: description: - The maven version coordinate required: false default: latest classifier: description: - The maven classifier coordinate required: false default: null extension: description: - The maven type/extension coordinate required: false default: jar repository_url: description: - The URL of the Maven Repository to download from. - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2. required: false default: http://repo1.maven.org/maven2 username: description: - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3 required: false default: null aliases: [ "aws_secret_key" ] password: description: - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3 required: false default: null aliases: [ "aws_secret_access_key" ] dest: description: - The path where the artifact should be written to required: true default: false state: description: - The desired state of the artifact required: true default: present choices: [present,absent] timeout: description: - Specifies a timeout in seconds for the connection attempt required: false default: 10 version_added: "2.3" validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists. required: false default: 'yes' choices: ['yes', 'no'] version_added: "1.9.3" ''' EXAMPLES = ''' # Download the latest version of the JUnit framework artifact from Maven Central - maven_artifact: group_id: junit artifact_id: junit dest: /tmp/junit-latest.jar # Download JUnit 4.11 from Maven Central - maven_artifact: group_id: junit artifact_id: junit version: 4.11 dest: /tmp/junit-4.11.jar # Download an artifact from a private repository requiring authentication - maven_artifact: group_id: com.company artifact_id: library-name repository_url: 'https://repo.company.com/maven' username: user password: pass dest: /tmp/library-name-latest.jar # Download a WAR File to the Tomcat webapps directory to be deployed - maven_artifact: group_id: com.company artifact_id: web-app extension: war repository_url: 'https://repo.company.com/maven' dest: /var/lib/tomcat7/webapps/web-app.war ''' import hashlib import os import posixpath import sys from lxml import etree try: import boto3 HAS_BOTO = True except ImportError: HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib import parse as urlparse from ansible.module_utils.urls import fetch_url class Artifact(object): def __init__(self, group_id, artifact_id, version, classifier=None, extension='jar'): if not group_id: raise ValueError("group_id must be set") if not artifact_id: raise ValueError("artifact_id must be set") self.group_id = group_id self.artifact_id = artifact_id self.version = version self.classifier = classifier if not extension: self.extension = "jar" else: self.extension = extension def is_snapshot(self): return self.version and self.version.endswith("SNAPSHOT") def path(self, with_version=True): base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id) if with_version and self.version: return posixpath.join(base, self.version) else: return base def _generate_filename(self): if not self.classifier: return self.artifact_id + "." + self.extension else: return self.artifact_id + "-" + self.classifier + "." + self.extension def get_filename(self, filename=None): if not filename: filename = self._generate_filename() elif os.path.isdir(filename): filename = os.path.join(filename, self._generate_filename()) return filename def __str__(self): if self.classifier: return "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version) elif self.extension != "jar": return "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version) else: return "%s:%s:%s" % (self.group_id, self.artifact_id, self.version) @staticmethod def parse(input): parts = input.split(":") if len(parts) >= 3: g = parts[0] a = parts[1] v = parts[len(parts) - 1] t = None c = None if len(parts) == 4: t = parts[2] if len(parts) == 5: t = parts[2] c = parts[3] return Artifact(g, a, v, c, t) else: return None class MavenDownloader: def __init__(self, module, base="http://repo1.maven.org/maven2"): self.module = module if base.endswith("/"): base = base.rstrip("/") self.base = base self.user_agent = "Maven Artifact Downloader/1.0" def _find_latest_version_available(self, artifact): path = "/%s/maven-metadata.xml" % (artifact.path(False)) xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) v = xml.xpath("/metadata/versioning/versions/version[last()]/text()") if v: return v[0] def find_uri_for_artifact(self, artifact): if artifact.version == "latest": artifact.version = self._find_latest_version_available(artifact) if artifact.is_snapshot(): path = "/%s/maven-metadata.xml" % (artifact.path()) xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0] buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0] for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"): if (len(snapshotArtifact.xpath("classifier/text()")) > 0 and snapshotArtifact.xpath("classifier/text()")[0] == artifact.classifier and len(snapshotArtifact.xpath("extension/text()")) > 0 and snapshotArtifact.xpath("extension/text()")[0] == artifact.extension): return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0]) return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + buildNumber)) return self._uri_for_artifact(artifact, artifact.version) def _uri_for_artifact(self, artifact, version=None): if artifact.is_snapshot() and not version: raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact)) elif not artifact.is_snapshot(): version = artifact.version if artifact.classifier: return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension) return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension) def _request(self, url, failmsg, f): url_to_use = url parsed_url = urlparse(url) if parsed_url.scheme=='s3': parsed_url = urlparse(url) bucket_name = parsed_url.netloc key_name = parsed_url.path[1:] client = boto3.client('s3',aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', '')) url_to_use = client.generate_presigned_url('get_object',Params={'Bucket':bucket_name,'Key':key_name},ExpiresIn=10) req_timeout = self.module.params.get('timeout') # Hack to add parameters in the way that fetch_url expects self.module.params['url_username'] = self.module.params.get('username', '') self.module.params['url_password'] = self.module.params.get('password', '') self.module.params['http_agent'] = self.module.params.get('user_agent', None) response, info = fetch_url(self.module, url_to_use, timeout=req_timeout) if info['status'] != 200: raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use) else: return f(response) def download(self, artifact, filename=None): filename = artifact.get_filename(filename) if not artifact.version or artifact.version == "latest": artifact = Artifact(artifact.group_id, artifact.artifact_id, self._find_latest_version_available(artifact), artifact.classifier, artifact.extension) url = self.find_uri_for_artifact(artifact) if not self.verify_md5(filename, url + ".md5"): response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r) if response: f = open(filename, 'w') # f.write(response.read()) self._write_chunks(response, f, report_hook=self.chunk_report) f.close() return True else: return False else: return True def chunk_report(self, bytes_so_far, chunk_size, total_size): percent = float(bytes_so_far) / total_size percent = round(percent * 100, 2) sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" % (bytes_so_far, total_size, percent)) if bytes_so_far >= total_size: sys.stdout.write('\n') def _write_chunks(self, response, file, chunk_size=8192, report_hook=None): total_size = response.info().getheader('Content-Length').strip() total_size = int(total_size) bytes_so_far = 0 while 1: chunk = response.read(chunk_size) bytes_so_far += len(chunk) if not chunk: break file.write(chunk) if report_hook: report_hook(bytes_so_far, chunk_size, total_size) return bytes_so_far def verify_md5(self, file, remote_md5): if not os.path.exists(file): return False else: local_md5 = self._local_md5(file) remote = self._request(remote_md5, "Failed to download MD5", lambda r: r.read()) return local_md5 == remote def _local_md5(self, file): md5 = hashlib.md5() f = open(file, 'rb') for chunk in iter(lambda: f.read(8192), ''): md5.update(chunk) f.close() return md5.hexdigest() def main(): module = AnsibleModule( argument_spec = dict( group_id = dict(default=None), artifact_id = dict(default=None), version = dict(default="latest"), classifier = dict(default=None), extension = dict(default='jar'), repository_url = dict(default=None), username = dict(default=None,aliases=['aws_secret_key']), password = dict(default=None, no_log=True,aliases=['aws_secret_access_key']), state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state timeout = dict(default=10, type='int'), dest = dict(type="path", default=None), validate_certs = dict(required=False, default=True, type='bool'), ) ) repository_url = module.params["repository_url"] if not repository_url: repository_url = "http://repo1.maven.org/maven2" try: parsed_url = urlparse(repository_url) except AttributeError as e: module.fail_json(msg='url parsing went wrong %s' % e) if parsed_url.scheme=='s3' and not HAS_BOTO: module.fail_json(msg='boto3 required for this module, when using s3:// repository URLs') group_id = module.params["group_id"] artifact_id = module.params["artifact_id"] version = module.params["version"] classifier = module.params["classifier"] extension = module.params["extension"] state = module.params["state"] dest = module.params["dest"] #downloader = MavenDownloader(module, repository_url, repository_username, repository_password) downloader = MavenDownloader(module, repository_url) try: artifact = Artifact(group_id, artifact_id, version, classifier, extension) except ValueError as e: module.fail_json(msg=e.args[0]) prev_state = "absent" if os.path.isdir(dest): dest = posixpath.join(dest, artifact_id + "-" + version + "." + extension) if os.path.lexists(dest) and downloader.verify_md5(dest, downloader.find_uri_for_artifact(artifact) + '.md5'): prev_state = "present" else: path = os.path.dirname(dest) if not os.path.exists(path): os.makedirs(path) if prev_state == "present": module.exit_json(dest=dest, state=state, changed=False) try: if downloader.download(artifact, dest): module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) else: module.fail_json(msg="Unable to download the artifact") except ValueError as e: module.fail_json(msg=e.args[0]) if __name__ == '__main__': main()
fernandezcuesta/ansible
lib/ansible/modules/packaging/language/maven_artifact.py
Python
gpl-3.0
15,518
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2014 Artyom Topchyan <artyom.topchyan@live.com> # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html # Based on Copyright (C) 2014 Radim Rehurek <radimrehurek@seznam.cz> """Python wrapper for `Dynamic Topic Models (DTM) <http://www.cs.columbia.edu/~blei/papers/BleiLafferty2006a.pdf>`_ and the `Document Influence Model (DIM) <http://www.cs.columbia.edu/~blei/papers/GerrishBlei2010.pdf>`_. Installation ------------ You have 2 ways, how to make binaries: #. Use precompiled binaries for your OS version from `/magsilva/dtm/ <https://github.com/magsilva/dtm/tree/master/bin>`_ #. Compile binaries manually from `/blei-lab/dtm <https://github.com/blei-lab/dtm.git>`_ (original instruction available in https://github.com/blei-lab/dtm/blob/master/README.md), or use this :: git clone https://github.com/blei-lab/dtm.git sudo apt-get install libgsl0-dev cd dtm/dtm make Examples -------- .. sourcecode:: pycon >>> from gensim.test.utils import common_corpus, common_dictionary >>> from gensim.models.wrappers import DtmModel >>> >>> path_to_dtm_binary = "/path/to/dtm/binary" >>> model = DtmModel( ... path_to_dtm_binary, corpus=common_corpus, id2word=common_dictionary, ... time_slices=[1] * len(common_corpus) ... ) """ import logging import random import warnings import tempfile import os from subprocess import PIPE import numpy as np from gensim import utils, corpora, matutils from gensim.utils import check_output logger = logging.getLogger(__name__) class DtmModel(utils.SaveLoad): """Python wrapper using `DTM implementation <https://github.com/magsilva/dtm/tree/master/bin>`_. Communication between DTM and Python takes place by passing around data files on disk and executing the DTM binary as a subprocess. Warnings -------- This is **only** python wrapper for `DTM implementation <https://github.com/magsilva/dtm/tree/master/bin>`_, you need to install original implementation first and pass the path to binary to ``dtm_path``. """ def __init__(self, dtm_path, corpus=None, time_slices=None, mode='fit', model='dtm', num_topics=100, id2word=None, prefix=None, lda_sequence_min_iter=6, lda_sequence_max_iter=20, lda_max_em_iter=10, alpha=0.01, top_chain_var=0.005, rng_seed=0, initialize_lda=True): """ Parameters ---------- dtm_path : str Path to the dtm binary, e.g. `/home/username/dtm/dtm/main`. corpus : iterable of iterable of (int, int) Collection of texts in BoW format. time_slices : list of int Sequence of timestamps. mode : {'fit', 'time'}, optional Controls the mode of the mode: 'fit' is for training, 'time' for analyzing documents through time according to a DTM, basically a held out set. model : {'fixed', 'dtm'}, optional Control model that will be runned: 'fixed' is for DIM and 'dtm' for DTM. num_topics : int, optional Number of topics. id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional Mapping between tokens ids and words from corpus, if not specified - will be inferred from `corpus`. prefix : str, optional Prefix for produced temporary files. lda_sequence_min_iter : int, optional Min iteration of LDA. lda_sequence_max_iter : int, optional Max iteration of LDA. lda_max_em_iter : int, optional Max em optimization iterations in LDA. alpha : int, optional Hyperparameter that affects sparsity of the document-topics for the LDA models in each timeslice. top_chain_var : float, optional This hyperparameter controls one of the key aspect of topic evolution which is the speed at which these topics evolve. A smaller top_chain_var leads to similar word distributions over multiple timeslice. rng_seed : int, optional Random seed. initialize_lda : bool, optional If True - initialize DTM with LDA. """ if not os.path.isfile(dtm_path): raise ValueError("dtm_path must point to the binary file, not to a folder") self.dtm_path = dtm_path self.id2word = id2word if self.id2word is None: logger.warning("no word id mapping provided; initializing from corpus, assuming identity") self.id2word = utils.dict_from_corpus(corpus) self.num_terms = len(self.id2word) else: self.num_terms = 0 if not self.id2word else 1 + max(self.id2word.keys()) if self.num_terms == 0: raise ValueError("cannot compute DTM over an empty collection (no terms)") self.num_topics = num_topics try: lencorpus = len(corpus) except TypeError: logger.warning("input corpus stream has no len(); counting documents") lencorpus = sum(1 for _ in corpus) if lencorpus == 0: raise ValueError("cannot compute DTM over an empty corpus") if model == "fixed" and any(not text for text in corpus): raise ValueError("""There is a text without words in the input corpus. This breaks method='fixed' (The DIM model).""") if lencorpus != sum(time_slices): raise ValueError( "mismatched timeslices %{slices} for corpus of len {clen}" .format(slices=sum(time_slices), clen=lencorpus) ) self.lencorpus = lencorpus if prefix is None: rand_prefix = hex(random.randint(0, 0xffffff))[2:] + '_' prefix = os.path.join(tempfile.gettempdir(), rand_prefix) self.prefix = prefix self.time_slices = time_slices self.lda_sequence_min_iter = int(lda_sequence_min_iter) self.lda_sequence_max_iter = int(lda_sequence_max_iter) self.lda_max_em_iter = int(lda_max_em_iter) self.alpha = alpha self.top_chain_var = top_chain_var self.rng_seed = rng_seed self.initialize_lda = str(initialize_lda).lower() self.lambda_ = None self.obs_ = None self.lhood_ = None self.gamma_ = None self.init_alpha = None self.init_beta = None self.init_ss = None self.em_steps = [] self.influences_time = [] if corpus is not None: self.train(corpus, time_slices, mode, model) def fout_liklihoods(self): """Get path to temporary lhood data file. Returns ------- str Path to lhood data file. """ return self.prefix + 'train_out/lda-seq/' + 'lhoods.dat' def fout_gamma(self): """Get path to temporary gamma data file. Returns ------- str Path to gamma data file. """ return self.prefix + 'train_out/lda-seq/' + 'gam.dat' def fout_prob(self): """Get template of path to temporary file. Returns ------- str Path to file. """ return self.prefix + 'train_out/lda-seq/' + 'topic-{i}-var-e-log-prob.dat' def fout_observations(self): """Get template of path to temporary file. Returns ------- str Path to file. """ return self.prefix + 'train_out/lda-seq/' + 'topic-{i}-var-obs.dat' def fout_influence(self): """Get template of path to temporary file. Returns ------- str Path to file. """ return self.prefix + 'train_out/lda-seq/' + 'influence_time-{i}' def foutname(self): """Get path to temporary file. Returns ------- str Path to file. """ return self.prefix + 'train_out' def fem_steps(self): """Get path to temporary em_step data file. Returns ------- str Path to em_step data file. """ return self.prefix + 'train_out/' + 'em_log.dat' def finit_alpha(self): """Get path to initially trained lda alpha file. Returns ------- str Path to initially trained lda alpha file. """ return self.prefix + 'train_out/' + 'initial-lda.alpha' def finit_beta(self): """Get path to initially trained lda beta file. Returns ------- str Path to initially trained lda beta file. """ return self.prefix + 'train_out/' + 'initial-lda.beta' def flda_ss(self): """Get path to initial lda binary file. Returns ------- str Path to initial lda binary file. """ return self.prefix + 'train_out/' + 'initial-lda-ss.dat' def fcorpustxt(self): """Get path to temporary file. Returns ------- str Path to multiple train binary file. """ return self.prefix + 'train-mult.dat' def fcorpus(self): """Get path to corpus file. Returns ------- str Path to corpus file. """ return self.prefix + 'train' def ftimeslices(self): """Get path to time slices binary file. Returns ------- str Path to time slices binary file. """ return self.prefix + 'train-seq.dat' def convert_input(self, corpus, time_slices): """Convert corpus into LDA-C format by :class:`~gensim.corpora.bleicorpus.BleiCorpus` and save to temp file. Path to temporary file produced by :meth:`~gensim.models.wrappers.dtmmodel.DtmModel.ftimeslices`. Parameters ---------- corpus : iterable of iterable of (int, float) Corpus in BoW format. time_slices : list of int Sequence of timestamps. """ logger.info("serializing temporary corpus to %s", self.fcorpustxt()) # write out the corpus in a file format that DTM understands: corpora.BleiCorpus.save_corpus(self.fcorpustxt(), corpus) with utils.open(self.ftimeslices(), 'wb') as fout: fout.write(utils.to_utf8(str(len(self.time_slices)) + "\n")) for sl in time_slices: fout.write(utils.to_utf8(str(sl) + "\n")) def train(self, corpus, time_slices, mode, model): """Train DTM model. Parameters ---------- corpus : iterable of iterable of (int, int) Collection of texts in BoW format. time_slices : list of int Sequence of timestamps. mode : {'fit', 'time'}, optional Controls the mode of the mode: 'fit' is for training, 'time' for analyzing documents through time according to a DTM, basically a held out set. model : {'fixed', 'dtm'}, optional Control model that will be runned: 'fixed' is for DIM and 'dtm' for DTM. """ self.convert_input(corpus, time_slices) arguments = \ "--ntopics={p0} --model={mofrl} --mode={p1} --initialize_lda={p2} --corpus_prefix={p3} " \ "--outname={p4} --alpha={p5}".format( p0=self.num_topics, mofrl=model, p1=mode, p2=self.initialize_lda, p3=self.fcorpus(), p4=self.foutname(), p5=self.alpha ) params = \ "--lda_max_em_iter={p0} --lda_sequence_min_iter={p1} --lda_sequence_max_iter={p2} " \ "--top_chain_var={p3} --rng_seed={p4} ".format( p0=self.lda_max_em_iter, p1=self.lda_sequence_min_iter, p2=self.lda_sequence_max_iter, p3=self.top_chain_var, p4=self.rng_seed ) arguments = arguments + " " + params logger.info("training DTM with args %s", arguments) cmd = [self.dtm_path] + arguments.split() logger.info("Running command %s", cmd) check_output(args=cmd, stderr=PIPE) self.em_steps = np.loadtxt(self.fem_steps()) self.init_ss = np.loadtxt(self.flda_ss()) if self.initialize_lda: self.init_alpha = np.loadtxt(self.finit_alpha()) self.init_beta = np.loadtxt(self.finit_beta()) self.lhood_ = np.loadtxt(self.fout_liklihoods()) # document-topic proportions self.gamma_ = np.loadtxt(self.fout_gamma()) # cast to correct shape, gamme[5,10] is the proprtion of the 10th topic # in doc 5 self.gamma_.shape = (self.lencorpus, self.num_topics) # normalize proportions self.gamma_ /= self.gamma_.sum(axis=1)[:, np.newaxis] self.lambda_ = np.zeros((self.num_topics, self.num_terms * len(self.time_slices))) self.obs_ = np.zeros((self.num_topics, self.num_terms * len(self.time_slices))) for t in range(self.num_topics): topic = "%03d" % t self.lambda_[t, :] = np.loadtxt(self.fout_prob().format(i=topic)) self.obs_[t, :] = np.loadtxt(self.fout_observations().format(i=topic)) # cast to correct shape, lambda[5,10,0] is the proportion of the 10th # topic in doc 5 at time 0 self.lambda_.shape = (self.num_topics, self.num_terms, len(self.time_slices)) self.obs_.shape = (self.num_topics, self.num_terms, len(self.time_slices)) # extract document influence on topics for each time slice # influences_time[0] , influences at time 0 if model == 'fixed': for k, t in enumerate(self.time_slices): stamp = "%03d" % k influence = np.loadtxt(self.fout_influence().format(i=stamp)) influence.shape = (t, self.num_topics) # influence[2,5] influence of document 2 on topic 5 self.influences_time.append(influence) def print_topics(self, num_topics=10, times=5, num_words=10): """Alias for :meth:`~gensim.models.wrappers.dtmmodel.DtmModel.show_topics`. Parameters ---------- num_topics : int, optional Number of topics to return, set `-1` to get all topics. times : int, optional Number of times. num_words : int, optional Number of words. Returns ------- list of str Topics as a list of strings """ return self.show_topics(num_topics, times, num_words, log=True) def show_topics(self, num_topics=10, times=5, num_words=10, log=False, formatted=True): """Get the `num_words` most probable words for `num_topics` number of topics at 'times' time slices. Parameters ---------- num_topics : int, optional Number of topics to return, set `-1` to get all topics. times : int, optional Number of times. num_words : int, optional Number of words. log : bool, optional THIS PARAMETER WILL BE IGNORED. formatted : bool, optional If `True` - return the topics as a list of strings, otherwise as lists of (weight, word) pairs. Returns ------- list of str Topics as a list of strings (if formatted=True) **OR** list of (float, str) Topics as list of (weight, word) pairs (if formatted=False) """ if num_topics < 0 or num_topics >= self.num_topics: num_topics = self.num_topics chosen_topics = range(num_topics) else: num_topics = min(num_topics, self.num_topics) chosen_topics = range(num_topics) if times < 0 or times >= len(self.time_slices): times = len(self.time_slices) chosen_times = range(times) else: times = min(times, len(self.time_slices)) chosen_times = range(times) shown = [] for time in chosen_times: for i in chosen_topics: if formatted: topic = self.print_topic(i, time, topn=num_words) else: topic = self.show_topic(i, time, topn=num_words) shown.append(topic) return shown def show_topic(self, topicid, time, topn=50, num_words=None): """Get `num_words` most probable words for the given `topicid`. Parameters ---------- topicid : int Id of topic. time : int Timestamp. topn : int, optional Top number of topics that you'll receive. num_words : int, optional DEPRECATED PARAMETER, use `topn` instead. Returns ------- list of (float, str) Sequence of probable words, as a list of `(word_probability, word)`. """ if num_words is not None: # deprecated num_words is used warnings.warn("The parameter `num_words` is deprecated, will be removed in 4.0.0, use `topn` instead.") topn = num_words topics = self.lambda_[:, :, time] topic = topics[topicid] # likelihood to probability topic = np.exp(topic) # normalize to probability dist topic = topic / topic.sum() # sort according to prob bestn = matutils.argsort(topic, topn, reverse=True) beststr = [(topic[idx], self.id2word[idx]) for idx in bestn] return beststr def print_topic(self, topicid, time, topn=10, num_words=None): """Get the given topic, formatted as a string. Parameters ---------- topicid : int Id of topic. time : int Timestamp. topn : int, optional Top number of topics that you'll receive. num_words : int, optional DEPRECATED PARAMETER, use `topn` instead. Returns ------- str The given topic in string format, like '0.132*someword + 0.412*otherword + ...'. """ if num_words is not None: # deprecated num_words is used warnings.warn("The parameter `num_words` is deprecated, will be removed in 4.0.0, use `topn` instead.") topn = num_words return ' + '.join('%.3f*%s' % v for v in self.show_topic(topicid, time, topn=topn)) def dtm_vis(self, corpus, time): """Get data specified by pyLDAvis format. Parameters ---------- corpus : iterable of iterable of (int, float) Collection of texts in BoW format. time : int Sequence of timestamp. Notes ----- All of these are needed to visualise topics for DTM for a particular time-slice via pyLDAvis. Returns ------- doc_topic : numpy.ndarray Document-topic proportions. topic_term : numpy.ndarray Calculated term of topic suitable for pyLDAvis format. doc_lengths : list of int Length of each documents in corpus. term_frequency : numpy.ndarray Frequency of each word from vocab. vocab : list of str List of words from docpus. """ topic_term = np.exp(self.lambda_[:, :, time]) / np.exp(self.lambda_[:, :, time]).sum() topic_term *= self.num_topics doc_topic = self.gamma_ doc_lengths = [len(doc) for doc_no, doc in enumerate(corpus)] term_frequency = np.zeros(len(self.id2word)) for doc_no, doc in enumerate(corpus): for pair in doc: term_frequency[pair[0]] += pair[1] vocab = [self.id2word[i] for i in range(0, len(self.id2word))] # returns numpy arrays for doc_topic proportions, topic_term proportions, and document_lengths, term_frequency. # these should be passed to the `pyLDAvis.prepare` method to visualise one time-slice of DTM topics. return doc_topic, topic_term, doc_lengths, term_frequency, vocab def dtm_coherence(self, time, num_words=20): """Get all topics of a particular time-slice without probability values for it to be used. For either "u_mass" or "c_v" coherence. Parameters ---------- num_words : int Number of words. time : int Timestamp Returns ------- coherence_topics : list of list of str All topics of a particular time-slice without probability values for it to be used. Warnings -------- TODO: because of print format right now can only return for 1st time-slice, should we fix the coherence printing or make changes to the print statements to mirror DTM python? """ coherence_topics = [] for topic_no in range(0, self.num_topics): topic = self.show_topic(topicid=topic_no, time=time, topn=num_words) coherence_topic = [] for prob, word in topic: coherence_topic.append(word) coherence_topics.append(coherence_topic) return coherence_topics
midnightradio/gensim
gensim/models/wrappers/dtmmodel.py
Python
gpl-3.0
21,273
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import os import re import shutil from mozpack.executables import ( is_executable, may_strip, strip, may_elfhack, elfhack, ) from mozpack.chrome.manifest import ManifestEntry from io import BytesIO from mozpack.errors import ErrorMessage from mozpack.mozjar import JarReader import mozpack.path from collections import OrderedDict class Dest(object): ''' Helper interface for BaseFile.copy. The interface works as follows: - read() and write() can be used to sequentially read/write from the underlying file. - a call to read() after a write() will re-open the underlying file and read from it. - a call to write() after a read() will re-open the underlying file, emptying it, and write to it. ''' def __init__(self, path): self.path = path self.mode = None def read(self, length=-1): if self.mode != 'r': self.file = open(self.path, 'rb') self.mode = 'r' return self.file.read(length) def write(self, data): if self.mode != 'w': self.file = open(self.path, 'wb') self.mode = 'w' return self.file.write(data) def exists(self): return os.path.exists(self.path) def close(self): if self.mode: self.mode = None self.file.close() class BaseFile(object): ''' Base interface and helper for file copying. Derived class may implement their own copy function, or rely on BaseFile.copy using the open() member function and/or the path property. ''' def copy(self, dest, skip_if_older=True): ''' Copy the BaseFile content to the destination given as a string or a Dest instance. Avoids replacing existing files if the BaseFile content matches that of the destination, or in case of plain files, if the destination is newer than the original file. This latter behaviour is disabled when skip_if_older is False. Returns whether a copy was actually performed (True) or not (False). ''' if isinstance(dest, basestring): dest = Dest(dest) else: assert isinstance(dest, Dest) can_skip_content_check = False if not dest.exists(): can_skip_content_check = True elif getattr(self, 'path', None) and getattr(dest, 'path', None): # os.path.getmtime returns a result in seconds with precision up to # the microsecond. But microsecond is too precise because # shutil.copystat only copies milliseconds, and seconds is not # enough precision. if skip_if_older and int(os.path.getmtime(self.path) * 1000) \ <= int(os.path.getmtime(dest.path) * 1000): return False elif os.path.getsize(self.path) != os.path.getsize(dest.path): can_skip_content_check = True if can_skip_content_check: if getattr(self, 'path', None) and getattr(dest, 'path', None): shutil.copy2(self.path, dest.path) else: # Ensure the file is always created if not dest.exists(): dest.write('') shutil.copyfileobj(self.open(), dest) return True src = self.open() copy_content = '' while True: dest_content = dest.read(32768) src_content = src.read(32768) copy_content += src_content if len(dest_content) == len(src_content) == 0: break # If the read content differs between origin and destination, # write what was read up to now, and copy the remainder. if dest_content != src_content: dest.write(copy_content) shutil.copyfileobj(src, dest) break if hasattr(self, 'path') and hasattr(dest, 'path'): shutil.copystat(self.path, dest.path) return True def open(self): ''' Return a file-like object allowing to read() the content of the associated file. This is meant to be overloaded in subclasses to return a custom file-like object. ''' assert self.path is not None return open(self.path, 'rb') class File(BaseFile): ''' File class for plain files. ''' def __init__(self, path): self.path = path class ExecutableFile(File): ''' File class for executable and library files on OS/2, OS/X and ELF systems. (see mozpack.executables.is_executable documentation). ''' def copy(self, dest, skip_if_older=True): assert isinstance(dest, basestring) # If File.copy didn't actually copy because dest is newer, check the # file sizes. If dest is smaller, it means it is already stripped and # elfhacked, so we can skip. if not File.copy(self, dest, skip_if_older) and \ os.path.getsize(self.path) > os.path.getsize(dest): return False try: if may_strip(dest): strip(dest) if may_elfhack(dest): elfhack(dest) except ErrorMessage: os.remove(dest) raise return True class GeneratedFile(BaseFile): ''' File class for content with no previous existence on the filesystem. ''' def __init__(self, content): self.content = content def open(self): return BytesIO(self.content) class DeflatedFile(BaseFile): ''' File class for members of a jar archive. DeflatedFile.copy() effectively extracts the file from the jar archive. ''' def __init__(self, file): from mozpack.mozjar import JarFileReader assert isinstance(file, JarFileReader) self.file = file def open(self): self.file.seek(0) return self.file class XPTFile(GeneratedFile): ''' File class for a linked XPT file. It takes several XPT files as input (using the add() and remove() member functions), and links them at copy() time. ''' def __init__(self): self._files = set() def add(self, xpt): ''' Add the given XPT file (as a BaseFile instance) to the list of XPTs to link. ''' assert isinstance(xpt, BaseFile) self._files.add(xpt) def remove(self, xpt): ''' Remove the given XPT file (as a BaseFile instance) from the list of XPTs to link. ''' assert isinstance(xpt, BaseFile) self._files.remove(xpt) def copy(self, dest, skip_if_older=True): ''' Link the registered XPTs and place the resulting linked XPT at the destination given as a string or a Dest instance. Avoids an expensive XPT linking if the interfaces in an existing destination match those of the individual XPTs to link. skip_if_older is ignored. ''' if isinstance(dest, basestring): dest = Dest(dest) assert isinstance(dest, Dest) from xpt import xpt_link, Typelib, Interface all_typelibs = [Typelib.read(f.open()) for f in self._files] if dest.exists(): # Typelib.read() needs to seek(), so use a BytesIO for dest # content. dest_interfaces = \ dict((i.name, i) for i in Typelib.read(BytesIO(dest.read())).interfaces if i.iid != Interface.UNRESOLVED_IID) identical = True for f in self._files: typelib = Typelib.read(f.open()) for i in typelib.interfaces: if i.iid != Interface.UNRESOLVED_IID and \ not (i.name in dest_interfaces and i == dest_interfaces[i.name]): identical = False break if identical: return False s = BytesIO() xpt_link(all_typelibs).write(s) dest.write(s.getvalue()) return True def open(self): raise RuntimeError("Unsupported") def isempty(self): ''' Return whether there are XPT files to link. ''' return len(self._files) == 0 class ManifestFile(BaseFile): ''' File class for a manifest file. It takes individual manifest entries (using the add() and remove() member functions), and adjusts them to be relative to the base path for the manifest, given at creation. Example: There is a manifest entry "content webapprt webapprt/content/" relative to "webapprt/chrome". When packaging, the entry will be stored in jar:webapprt/omni.ja!/chrome/chrome.manifest, which means the entry will have to be relative to "chrome" instead of "webapprt/chrome". This doesn't really matter when serializing the entry, since this base path is not written out, but it matters when moving the entry at the same time, e.g. to jar:webapprt/omni.ja!/chrome.manifest, which we don't do currently but could in the future. ''' def __init__(self, base, entries=None): self._entries = entries if entries else [] self._base = base def add(self, entry): ''' Add the given entry to the manifest. Entries are rebased at open() time instead of add() time so that they can be more easily remove()d. ''' assert isinstance(entry, ManifestEntry) self._entries.append(entry) def remove(self, entry): ''' Remove the given entry from the manifest. ''' assert isinstance(entry, ManifestEntry) self._entries.remove(entry) def open(self): ''' Return a file-like object allowing to read() the serialized content of the manifest. ''' return BytesIO(''.join('%s\n' % e.rebase(self._base) for e in self._entries)) def __iter__(self): ''' Iterate over entries in the manifest file. ''' return iter(self._entries) def isempty(self): ''' Return whether there are manifest entries to write ''' return len(self._entries) == 0 class MinifiedProperties(BaseFile): ''' File class for minified properties. This wraps around a BaseFile instance, and removes lines starting with a # from its content. ''' def __init__(self, file): assert isinstance(file, BaseFile) self._file = file def open(self): ''' Return a file-like object allowing to read() the minified content of the properties file. ''' return BytesIO(''.join(l for l in self._file.open().readlines() if not l.startswith('#'))) class BaseFinder(object): def __init__(self, base, minify=False): ''' Initializes the instance with a reference base directory. The optional minify argument specifies whether file types supporting minification (currently only "*.properties") should be minified. ''' self.base = base self._minify = minify def find(self, pattern): ''' Yield path, BaseFile_instance pairs for all files under the base directory and its subdirectories that match the given pattern. See the mozpack.path.match documentation for a description of the handled patterns. ''' while pattern.startswith('/'): pattern = pattern[1:] for p, f in self._find(pattern): yield p, self._minify_file(p, f) def __iter__(self): ''' Iterates over all files under the base directory (excluding files starting with a '.' and files at any level under a directory starting with a '.'). for path, file in finder: ... ''' return self.find('') def __contains__(self, pattern): raise RuntimeError("'in' operator forbidden for %s. Use contains()." % self.__class__.__name__) def contains(self, pattern): ''' Return whether some files under the base directory match the given pattern. See the mozpack.path.match documentation for a description of the handled patterns. ''' return any(self.find(pattern)) def _minify_file(self, path, file): ''' Return an appropriate MinifiedSomething wrapper for the given BaseFile instance (file), according to the file type (determined by the given path), if the FileFinder was created with minification enabled. Otherwise, just return the given BaseFile instance. Currently, only "*.properties" files are handled. ''' if self._minify and not isinstance(file, ExecutableFile): if path.endswith('.properties'): return MinifiedProperties(file) return file class FileFinder(BaseFinder): ''' Helper to get appropriate BaseFile instances from the file system. ''' def __init__(self, base, **kargs): ''' Create a FileFinder for files under the given base directory. ''' BaseFinder.__init__(self, base, **kargs) def _find(self, pattern): ''' Actual implementation of FileFinder.find(), dispatching to specialized member functions depending on what kind of pattern was given. Note all files with a name starting with a '.' are ignored when scanning directories, but are not ignored when explicitely requested. ''' if '*' in pattern: return self._find_glob('', mozpack.path.split(pattern)) elif os.path.isdir(os.path.join(self.base, pattern)): return self._find_dir(pattern) else: return self._find_file(pattern) def _find_dir(self, path): ''' Actual implementation of FileFinder.find() when the given pattern corresponds to an existing directory under the base directory. Ignores file names starting with a '.' under the given path. If the path itself has leafs starting with a '.', they are not ignored. ''' for p in os.listdir(os.path.join(self.base, path)): if p.startswith('.'): continue for p_, f in self._find(mozpack.path.join(path, p)): yield p_, f def _find_file(self, path): ''' Actual implementation of FileFinder.find() when the given pattern corresponds to an existing file under the base directory. ''' srcpath = os.path.join(self.base, path) if not os.path.exists(srcpath): return if is_executable(srcpath): yield path, ExecutableFile(srcpath) else: yield path, File(srcpath) def _find_glob(self, base, pattern): ''' Actual implementation of FileFinder.find() when the given pattern contains globbing patterns ('*' or '**'). This is meant to be an equivalent of: for p, f in self: if mozpack.path.match(p, pattern): yield p, f but avoids scanning the entire tree. ''' if not pattern: for p, f in self._find(base): yield p, f elif pattern[0] == '**': for p, f in self._find(base): if mozpack.path.match(p, mozpack.path.join(*pattern)): yield p, f elif '*' in pattern[0]: if not os.path.exists(os.path.join(self.base, base)): return for p in os.listdir(os.path.join(self.base, base)): if p.startswith('.') and not pattern[0].startswith('.'): continue if re.match(mozpack.path.translate(pattern[0]), p): for p_, f in self._find_glob(mozpack.path.join(base, p), pattern[1:]): yield p_, f else: for p, f in self._find_glob(mozpack.path.join(base, pattern[0]), pattern[1:]): yield p, f class JarFinder(BaseFinder): ''' Helper to get appropriate DeflatedFile instances from a JarReader. ''' def __init__(self, base, reader, **kargs): ''' Create a JarFinder for files in the given JarReader. The base argument is used as an indication of the Jar file location. ''' assert isinstance(reader, JarReader) BaseFinder.__init__(self, base, **kargs) self._files = OrderedDict((f.filename, f) for f in reader) def _find(self, pattern): ''' Actual implementation of JarFinder.find(), dispatching to specialized member functions depending on what kind of pattern was given. ''' if '*' in pattern: for p in self._files: if mozpack.path.match(p, pattern): yield p, DeflatedFile(self._files[p]) elif pattern == '': for p in self._files: yield p, DeflatedFile(self._files[p]) elif pattern in self._files: yield pattern, DeflatedFile(self._files[pattern]) else: for p in self._files: if mozpack.path.basedir(p, [pattern]) == pattern: yield p, DeflatedFile(self._files[p])
SlateScience/MozillaJS
js/src/python/mozbuild/mozpack/files.py
Python
mpl-2.0
17,845
#!/usr/bin/python # -*- coding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. ### ### Author: Chris Iatrou (ichrispa@core-vector.net) ### Version: rev 14 ### ### This program was created for educational purposes and has been ### contributed to the open62541 project by the author. All licensing ### terms for this source is inherited by the terms and conditions ### specified for by the open62541 project (see the projects readme ### file for more information on the MPL v2 terms and restrictions). ### ### This program is not meant to be used in a production environment. The ### author is not liable for any complications arising due to the use of ### this program. ### from __future__ import print_function from ua_namespace import * import logging import argparse from open62541_XMLPreprocessor import open62541_XMLPreprocessor logger = logging.getLogger(__name__) parser = argparse.ArgumentParser( description="""Parse OPC UA NamespaceXML file(s) and create C code for generating nodes in open62541 generate_open62541CCode.py will first read all XML files passed on the command line, then link and check the namespace. All nodes that fulfill the basic requirements will then be printed as C-Code intended to be included in the open62541 OPC UA Server that will initialize the corresponding namespace.""", formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('infiles', metavar="<namespaceXML>", nargs='+', type=argparse.FileType('r'), help='Namespace XML file(s). Note that the last definition of a node encountered will be used and all prior definitions are discarded.') parser.add_argument('outputFile', metavar='<outputFile>', #type=argparse.FileType('w', 0), help='The basename for the <output file>.c and <output file>.h files to be generated. This will also be the function name used in the header and c-file.') parser.add_argument('-i','--ignore', metavar="<ignoreFile>", type=argparse.FileType('r'), action='append', dest="ignoreFiles", default=[], help='Loads a list of NodeIDs stored in ignoreFile (one NodeID per line). The compiler will assume that these nodes have been created externally and not generate any code for them. They will however be linked to from other nodes.') parser.add_argument('-b','--blacklist', metavar="<blacklistFile>", type=argparse.FileType('r'), action='append', dest="blacklistFiles", default=[], help='Loads a list of NodeIDs stored in blacklistFile (one NodeID per line). Any of the nodeIds encountered in this file will be removed from the namespace prior to compilation. Any references to these nodes will also be removed') parser.add_argument('-s','--suppress', metavar="<attribute>", action='append', dest="suppressedAttributes", choices=['description', 'browseName', 'displayName', 'writeMask', 'userWriteMask','nodeid'], default=[], help="Suppresses the generation of some node attributes. Currently supported options are 'description', 'browseName', 'displayName', 'writeMask', 'userWriteMask' and 'nodeid'.") parser.add_argument('-v','--verbose', action='count', help='Make the script more verbose. Can be applied up to 4 times') args = parser.parse_args() level = logging.CRITICAL verbosity = 0 if args.verbose: verbosity = int(args.verbose) if (verbosity==1): level = logging.ERROR elif (verbosity==2): level = logging.WARNING elif (verbosity==3): level = logging.INFO elif (verbosity>=4): level = logging.DEBUG logging.basicConfig(level=level) logger.setLevel(logging.INFO) # Creating the header is tendious. We can skip the entire process if # the header exists. #if path.exists(argv[-1]+".c") or path.exists(argv[-1]+".h"): # log(None, "File " + str(argv[-1]) + " does already exists.", LOG_LEVEL_INFO) # log(None, "Header generation will be skipped. Delete the header and rerun this script if necessary.", LOG_LEVEL_INFO) # exit(0) # Open the output file outfileh = open(args.outputFile+".h", r"w+") outfilec = open(args.outputFile+".c", r"w+") # Create a new namespace. Note that the namespace name is not significant. ns = opcua_namespace("open62541") # Clean up the XML files by removing duplicate namespaces and unwanted prefixes preProc = open62541_XMLPreprocessor() for xmlfile in args.infiles: logger.info("Preprocessing " + str(xmlfile.name)) preProc.addDocument(xmlfile.name) preProc.preprocessAll() # Parse the XML files for xmlfile in preProc.getPreProcessedFiles(): logger.info("Parsing " + str(xmlfile)) ns.parseXML(xmlfile) # We need to notify the open62541 server of the namespaces used to be able to use i.e. ns=3 namespaceArrayNames = preProc.getUsedNamespaceArrayNames() for key in namespaceArrayNames: ns.addNamespace(key, namespaceArrayNames[key]) # Remove any temp files - they are not needed after the AST is created preProc.removePreprocessedFiles() # Remove blacklisted nodes from the namespace # Doing this now ensures that unlinkable pointers will be cleanly removed # during sanitation. for blacklist in args.blacklistFiles: for line in blacklist.readlines(): line = line.replace(" ","") id = line.replace("\n","") if ns.getNodeByIDString(id) == None: logger.info("Can't blacklist node, namespace does currently not contain a node with id " + str(id)) else: ns.removeNodeById(line) blacklist.close() # Link the references in the namespace logger.info("Linking namespace nodes and references") ns.linkOpenPointers() # Remove nodes that are not printable or contain parsing errors, such as # unresolvable or no references or invalid NodeIDs ns.sanitize() # Parse Datatypes in order to find out what the XML keyed values actually # represent. # Ex. <rpm>123</rpm> is not encodable # only after parsing the datatypes, it is known that # rpm is encoded as a double logger.info("Building datatype encoding rules") ns.buildEncodingRules() # Allocate/Parse the data values. In order to do this, we must have run # buidEncodingRules. logger.info("Allocating variables") ns.allocateVariables() # Users may have manually defined some nodes in their code already (such as serverStatus). # To prevent those nodes from being reprinted, we will simply mark them as already # converted to C-Code. That way, they will still be referred to by other nodes, but # they will not be created themselves. ignoreNodes = [] for ignore in args.ignoreFiles: for line in ignore.readlines(): line = line.replace(" ","") id = line.replace("\n","") if ns.getNodeByIDString(id) == None: logger.warn("Can't ignore node, Namespace does currently not contain a node with id " + str(id)) else: ignoreNodes.append(ns.getNodeByIDString(id)) ignore.close() # Create the C Code logger.info("Generating Header") # Returns a tuple of (["Header","lines"],["Code","lines","generated"]) from os.path import basename generatedCode = ns.printOpen62541Header(ignoreNodes, args.suppressedAttributes, outfilename=basename(args.outputFile)) for line in generatedCode[0]: outfileh.write(line+"\n") for line in generatedCode[1]: outfilec.write(line+"\n") outfilec.close() outfileh.close() logger.info("Namespace generation code successfully printed")
AGIsmail/open62541
tools/pyUANamespace/generate_open62541CCode.py
Python
mpl-2.0
7,804
""" Code for propagating Nylas datastore changes to account backends. Syncback actions don't update anything in the local datastore; the Nylas datastore is updated asynchronously (see namespace.py) and bookkeeping about the account backend state is updated when the changes show up in the mail sync engine. Dealing with write actions separately from read syncing allows us more flexibility in responsiveness/latency on data propagation, and also makes us unable to royally mess up a sync and e.g. accidentally delete a bunch of messages on the account backend because our local datastore is messed up. This read/write separation also allows us to easily disable syncback for testing. The main problem the separation presents is the fact that the read syncing needs to deal with the fact that the local datastore may have new changes to it that are not yet reflected in the account backend. In practice, this is not really a problem because of the limited ways mail messages can change. (For more details, see individual account backend submodules.) ACTIONS MUST BE IDEMPOTENT! We are going to have task workers guarantee at-least-once semantics. """ from inbox.actions.backends.generic import (set_remote_unread, set_remote_starred, remote_move, remote_save_draft, remote_update_draft, remote_delete_draft, remote_save_sent, remote_create_folder, remote_update_folder, remote_delete_folder, remote_delete_sent) from inbox.actions.backends.gmail import (remote_change_labels, remote_create_label, remote_update_label, remote_delete_label) from inbox.models import Message from inbox.models.session import session_scope from nylas.logging import get_logger log = get_logger() def can_handle_multiple_records(action_name): return action_name == 'change_labels' def mark_unread(crispin_client, account_id, message_id, args): unread = args['unread'] set_remote_unread(crispin_client, account_id, message_id, unread) def mark_starred(crispin_client, account_id, message_id, args): starred = args['starred'] set_remote_starred(crispin_client, account_id, message_id, starred) def move(crispin_client, account_id, message_id, args): destination = args['destination'] remote_move(crispin_client, account_id, message_id, destination) def change_labels(crispin_client, account_id, message_ids, args): added_labels = args['added_labels'] removed_labels = args['removed_labels'] remote_change_labels(crispin_client, account_id, message_ids, removed_labels, added_labels) def create_folder(crispin_client, account_id, category_id): remote_create_folder(crispin_client, account_id, category_id) def update_folder(crispin_client, account_id, category_id, args): old_name = args['old_name'] new_name = args['new_name'] remote_update_folder(crispin_client, account_id, category_id, old_name, new_name) def delete_folder(crispin_client, account_id, category_id): remote_delete_folder(crispin_client, account_id, category_id) def create_label(crispin_client, account_id, category_id): remote_create_label(crispin_client, account_id, category_id) def update_label(crispin_client, account_id, category_id, args): old_name = args['old_name'] new_name = args['new_name'] remote_update_label(crispin_client, account_id, category_id, old_name, new_name) def delete_label(crispin_client, account_id, category_id): remote_delete_label(crispin_client, account_id, category_id) def save_draft(crispin_client, account_id, message_id, args): """ Sync a new draft back to the remote backend. """ with session_scope(account_id) as db_session: message = db_session.query(Message).get(message_id) version = args.get('version') if message is None: log.info('tried to save nonexistent message as draft', message_id=message_id, account_id=account_id) return if not message.is_draft: log.warning('tried to save non-draft message as draft', message_id=message_id, account_id=account_id) return if version != message.version: log.warning('tried to save outdated version of draft') return remote_save_draft(crispin_client, account_id, message_id) def update_draft(crispin_client, account_id, message_id, args): """ Sync an updated draft back to the remote backend. """ with session_scope(account_id) as db_session: message = db_session.query(Message).get(message_id) version = args.get('version') old_message_id_header = args.get('old_message_id_header') if message is None: log.info('tried to save nonexistent message as draft', message_id=message_id, account_id=account_id) return if not message.is_draft: log.warning('tried to save non-draft message as draft', message_id=message_id, account_id=account_id) return if version != message.version: log.warning('tried to save outdated version of draft') return remote_update_draft(crispin_client, account_id, message_id, old_message_id_header) def delete_draft(crispin_client, account_id, draft_id, args): """ Delete a draft from the remote backend. `args` should contain an `nylas_uid` or a `message_id_header` key. This is used to find the draft on "the backend. """ nylas_uid = args.get('nylas_uid') message_id_header = args.get('message_id_header') assert nylas_uid or message_id_header, 'Need at least one header value' remote_delete_draft(crispin_client, account_id, nylas_uid, message_id_header) def save_sent_email(crispin_client, account_id, message_id): """ Create an email on the remote backend. Generic providers expect us to create a copy of the message in the sent folder. """ remote_save_sent(crispin_client, account_id, message_id) def delete_sent_email(crispin_client, account_id, message_id, args): """ Delete an email on the remote backend, in the sent folder. """ message_id_header = args.get('message_id_header') assert message_id_header, 'Need the message_id_header' remote_delete_sent(crispin_client, account_id, message_id_header)
nylas/sync-engine
inbox/actions/base.py
Python
agpl-3.0
7,046
"""Add queryable value column to Metadata Revision ID: 2dbf6da0775b Revises: 25129e0316d4 Create Date: 2016-07-18 23:33:52.050259 """ # revision identifiers, used by Alembic. revision = '2dbf6da0775b' down_revision = '25129e0316d4' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('metadata', sa.Column('queryable_value', sa.BigInteger(), nullable=True)) op.create_index(op.f('ix_metadata_queryable_value'), 'metadata', ['queryable_value'], unique=False) def downgrade(): op.drop_index(op.f('ix_metadata_queryable_value'), table_name='metadata') op.drop_column('metadata', 'queryable_value')
jobscore/sync-engine
migrations/versions/226_add_queryable_value_column_to_metadata.py
Python
agpl-3.0
703
__author__ = 'tony' HOME_CRUMB = { 'link': '/', 'name': 'Home' }
stonestone/stonefreedomsponsors
djangoproject/core/views/__init__.py
Python
agpl-3.0
73
import json import logging from lxml import etree from xmodule.capa_module import ComplexEncoder from xmodule.progress import Progress from xmodule.stringify import stringify_children import openendedchild from .combined_open_ended_rubric import CombinedOpenEndedRubric log = logging.getLogger("edx.courseware") class SelfAssessmentModule(openendedchild.OpenEndedChild): """ A Self Assessment module that allows students to write open-ended responses, submit, then see a rubric and rate themselves. Persists student supplied hints, answers, and assessment judgment (currently only correct/incorrect). Parses xml definition file--see below for exact format. Sample XML format: <selfassessment> <hintprompt> What hint about this problem would you give to someone? </hintprompt> <submitmessage> Save Succcesful. Thanks for participating! </submitmessage> </selfassessment> """ TEMPLATE_DIR = "combinedopenended/selfassessment" # states INITIAL = 'initial' ASSESSING = 'assessing' REQUEST_HINT = 'request_hint' DONE = 'done' def setup_response(self, system, location, definition, descriptor): """ Sets up the module @param system: Modulesystem @param location: location, to let the module know where it is. @param definition: XML definition of the module. @param descriptor: SelfAssessmentDescriptor @return: None """ self.child_prompt = stringify_children(self.child_prompt) self.child_rubric = stringify_children(self.child_rubric) def get_html(self, system): """ Gets context and renders HTML that represents the module @param system: Modulesystem @return: Rendered HTML """ # set context variables and render template previous_answer = self.get_display_answer() # Use the module name as a unique id to pass to the template. try: module_id = self.system.location.name except AttributeError: # In cases where we don't have a system or a location, use a fallback. module_id = "self_assessment" context = { 'prompt': self.child_prompt, 'previous_answer': previous_answer, 'ajax_url': system.ajax_url, 'initial_rubric': self.get_rubric_html(system), 'state': self.child_state, 'allow_reset': self._allow_reset(), 'child_type': 'selfassessment', 'accept_file_upload': self.accept_file_upload, 'module_id': module_id, } html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context) return html def handle_ajax(self, dispatch, data, system): """ This is called by courseware.module_render, to handle an AJAX call. "data" is request.POST. Returns a json dictionary: { 'progress_changed' : True/False, 'progress': 'none'/'in_progress'/'done', <other request-specific values here > } """ handlers = { 'save_answer': self.save_answer, 'save_assessment': self.save_assessment, 'save_post_assessment': self.save_hint, 'store_answer': self.store_answer, } if dispatch not in handlers: # This is a dev_facing_error log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) # This is a dev_facing_error return json.dumps({'error': 'Error handling action. Please try again.', 'success': False}) before = self.get_progress() d = handlers[dispatch](data, system) after = self.get_progress() d.update({ 'progress_changed': after != before, 'progress_status': Progress.to_js_status_str(after), }) return json.dumps(d, cls=ComplexEncoder) def get_rubric_html(self, system): """ Return the appropriate version of the rubric, based on the state. """ if self.child_state == self.INITIAL: return '' rubric_renderer = CombinedOpenEndedRubric(system, False) rubric_dict = rubric_renderer.render_rubric(self.child_rubric) success = rubric_dict['success'] rubric_html = rubric_dict['html'] # we'll render it context = {'rubric': rubric_html, 'max_score': self._max_score, } if self.child_state == self.ASSESSING: context['read_only'] = False elif self.child_state in (self.POST_ASSESSMENT, self.DONE): context['read_only'] = True else: # This is a dev_facing_error raise ValueError("Self assessment module is in an illegal state '{0}'".format(self.child_state)) return system.render_template('{0}/self_assessment_rubric.html'.format(self.TEMPLATE_DIR), context) def get_hint_html(self, system): """ Return the appropriate version of the hint view, based on state. """ if self.child_state in (self.INITIAL, self.ASSESSING): return '' if self.child_state == self.DONE: # display the previous hint latest = self.latest_post_assessment(system) hint = latest if latest is not None else '' else: hint = '' context = {'hint': hint} if self.child_state == self.POST_ASSESSMENT: context['read_only'] = False elif self.child_state == self.DONE: context['read_only'] = True else: # This is a dev_facing_error raise ValueError("Self Assessment module is in an illegal state '{0}'".format(self.child_state)) return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context) def save_answer(self, data, system): """ After the answer is submitted, show the rubric. Args: data: the request dictionary passed to the ajax request. Should contain a key 'student_answer' Returns: Dictionary with keys 'success' and either 'error' (if not success), or 'rubric_html' (if success). """ # Check to see if this problem is closed closed, msg = self.check_if_closed() if closed: return msg if self.child_state != self.INITIAL: return self.out_of_sync_error(data) error_message = "" # add new history element with answer and empty score and hint. success, error_message, data = self.append_file_link_to_student_answer(data) if success: data['student_answer'] = SelfAssessmentModule.sanitize_html(data['student_answer']) self.new_history_entry(data['student_answer']) self.change_state(self.ASSESSING) return { 'success': success, 'rubric_html': self.get_rubric_html(system), 'error': error_message, 'student_response': data['student_answer'].replace("\n","<br/>") } def save_assessment(self, data, _system): """ Save the assessment. If the student said they're right, don't ask for a hint, and go straight to the done state. Otherwise, do ask for a hint. Returns a dict { 'success': bool, 'state': state, 'hint_html': hint_html OR 'message_html': html and 'allow_reset', 'error': error-msg}, with 'error' only present if 'success' is False, and 'hint_html' or 'message_html' only if success is true :param data: A `webob.multidict.MultiDict` containing the keys asasssment: The sum of assessment scores score_list[]: A multivalue key containing all the individual scores """ if self.child_state != self.ASSESSING: return self.out_of_sync_error(data) try: score = int(data.get('assessment')) score_list = [int(x) for x in data.getall('score_list[]')] except (ValueError, TypeError): # This is a dev_facing_error log.error("Non-integer score value passed to save_assessment, or no score list present.") # This is a student_facing_error return {'success': False, 'error': "Error saving your score. Please notify course staff."} # Record score as assessment and rubric scores as post assessment self.record_latest_score(score) self.record_latest_post_assessment(json.dumps(score_list)) d = {'success': True, } self.change_state(self.DONE) d['allow_reset'] = self._allow_reset() d['state'] = self.child_state return d def save_hint(self, data, _system): ''' Not used currently, as hints have been removed from the system. Save the hint. Returns a dict { 'success': bool, 'message_html': message_html, 'error': error-msg, 'allow_reset': bool}, with the error key only present if success is False and message_html only if True. ''' if self.child_state != self.POST_ASSESSMENT: # Note: because we only ask for hints on wrong answers, may not have # the same number of hints and answers. return self.out_of_sync_error(data) self.record_latest_post_assessment(data['hint']) self.change_state(self.DONE) return {'success': True, 'message_html': '', 'allow_reset': self._allow_reset()} def latest_post_assessment(self, system): latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system) try: rubric_scores = json.loads(latest_post_assessment) except: rubric_scores = [] return [rubric_scores] class SelfAssessmentDescriptor(): """ Module for adding self assessment questions to courses """ mako_template = "widgets/html-edit.html" module_class = SelfAssessmentModule filename_extension = "xml" has_score = True def __init__(self, system): self.system = system @classmethod def definition_from_xml(cls, xml_object, system): """ Pull out the rubric, prompt, and submitmessage into a dictionary. Returns: { 'submitmessage': 'some-html' 'hintprompt': 'some-html' } """ expected_children = [] for child in expected_children: if len(xml_object.xpath(child)) != 1: # This is a staff_facing_error raise ValueError( u"Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format( child)) def parse(k): """Assumes that xml_object has child k""" return stringify_children(xml_object.xpath(k)[0]) return {} def definition_to_xml(self, resource_fs): '''Return an xml element representing this definition.''' elt = etree.Element('selfassessment') def add_child(k): child_str = u'<{tag}>{body}</{tag}>'.format(tag=k, body=getattr(self, k)) child_node = etree.fromstring(child_str) elt.append(child_node) for child in []: add_child(child) return elt
pelikanchik/edx-platform
common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
Python
agpl-3.0
11,675
""" Module for running content split tests """ import logging import json from webob import Response from xmodule.progress import Progress from xmodule.seq_module import SequenceDescriptor from xmodule.x_module import XModule, module_attr from lxml import etree from xblock.core import XBlock from xblock.fields import Scope, Integer, ReferenceValueDict from xblock.fragment import Fragment log = logging.getLogger('edx.' + __name__) class SplitTestFields(object): """Fields needed for split test module""" has_children = True user_partition_id = Integer( help="Which user partition is used for this test", scope=Scope.content ) # group_id is an int # child is a serialized UsageId (aka Location). This child # location needs to actually match one of the children of this # Block. (expected invariant that we'll need to test, and handle # authoring tools that mess this up) # TODO: is there a way to add some validation around this, to # be run on course load or in studio or .... group_id_to_child = ReferenceValueDict( help="Which child module students in a particular group_id should see", scope=Scope.content ) @XBlock.needs('user_tags') # pylint: disable=abstract-method @XBlock.wants('partitions') class SplitTestModule(SplitTestFields, XModule): """ Show the user the appropriate child. Uses the ExperimentState API to figure out which child to show. Course staff still get put in an experimental condition, but have the option to see the other conditions. The only thing that counts toward their grade/progress is the condition they are actually in. Technical notes: - There is more dark magic in this code than I'd like. The whole varying-children + grading interaction is a tangle between super and subclasses of descriptors and modules. """ def __init__(self, *args, **kwargs): super(SplitTestModule, self).__init__(*args, **kwargs) self.child_descriptor = None child_descriptors = self.get_child_descriptors() if len(child_descriptors) >= 1: self.child_descriptor = child_descriptors[0] if self.child_descriptor is not None: self.child = self.system.get_module(self.child_descriptor) else: self.child = None def get_child_descriptor_by_location(self, location): """ Look through the children and look for one with the given location. Returns the descriptor. If none match, return None """ # NOTE: calling self.get_children() creates a circular reference-- # it calls get_child_descriptors() internally, but that doesn't work until # we've picked a choice. Use self.descriptor.get_children() instead. for child in self.descriptor.get_children(): if child.location == location: return child return None def get_content_titles(self): """ Returns list of content titles for split_test's child. This overwrites the get_content_titles method included in x_module by default. WHY THIS OVERWRITE IS NECESSARY: If we fetch *all* of split_test's children, we'll end up getting all of the possible conditions users could ever see. Ex: If split_test shows a video to group A and HTML to group B, the regular get_content_titles in x_module will get the title of BOTH the video AND the HTML. We only want the content titles that should actually be displayed to the user. split_test's .child property contains *only* the child that should actually be shown to the user, so we call get_content_titles() on only that child. """ return self.child.get_content_titles() def get_child_descriptors(self): """ For grading--return just the chosen child. """ group_id = self.get_group_id() if group_id is None: return [] # group_id_to_child comes from json, so it has to have string keys str_group_id = str(group_id) if str_group_id in self.group_id_to_child: child_location = self.group_id_to_child[str_group_id] child_descriptor = self.get_child_descriptor_by_location(child_location) else: # Oops. Config error. log.debug("configuration error in split test module: invalid group_id %r (not one of %r). Showing error", str_group_id, self.group_id_to_child.keys()) if child_descriptor is None: # Peak confusion is great. Now that we set child_descriptor, # get_children() should return a list with one element--the # xmodule for the child log.debug("configuration error in split test module: no such child") return [] return [child_descriptor] def get_group_id(self): """ Returns the group ID, or None if none is available. """ partitions_service = self.runtime.service(self, 'partitions') if not partitions_service: return None return partitions_service.get_user_group_for_partition(self.user_partition_id) def _staff_view(self, context): """ Render the staff view for a split test module. """ fragment = Fragment() contents = [] for group_id in self.group_id_to_child: child_location = self.group_id_to_child[group_id] child_descriptor = self.get_child_descriptor_by_location(child_location) child = self.system.get_module(child_descriptor) rendered_child = child.render('student_view', context) fragment.add_frag_resources(rendered_child) contents.append({ 'group_id': group_id, 'id': child.location.to_deprecated_string(), 'content': rendered_child.content }) # Use the new template fragment.add_content(self.system.render_template('split_test_staff_view.html', { 'items': contents, })) fragment.add_css('.split-test-child { display: none; }') fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/split_test_staff.js')) fragment.initialize_js('ABTestSelector') return fragment def studio_preview_view(self, context): """ Renders the Studio preview by rendering each child so that they can all be seen and edited. """ fragment = Fragment() contents = [] for child in self.descriptor.get_children(): rendered_child = self.runtime.get_module(child).render('student_view', context) fragment.add_frag_resources(rendered_child) contents.append({ 'id': child.location.to_deprecated_string(), 'content': rendered_child.content }) fragment.add_content(self.system.render_template('vert_module.html', { 'items': contents })) return fragment def student_view(self, context): """ Render the contents of the chosen condition for students, and all the conditions for staff. """ # When rendering a Studio preview, render all of the block's children if context and context.get('runtime_type', None) == 'studio': return self.studio_preview_view(context) if self.child is None: # raise error instead? In fact, could complain on descriptor load... return Fragment(content=u"<div>Nothing here. Move along.</div>") if self.system.user_is_staff: return self._staff_view(context) else: child_fragment = self.child.render('student_view', context) fragment = Fragment(self.system.render_template('split_test_student_view.html', { 'child_content': child_fragment.content, 'child_id': self.child.scope_ids.usage_id, })) fragment.add_frag_resources(child_fragment) fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/split_test_student.js')) fragment.initialize_js('SplitTestStudentView') return fragment @XBlock.handler def log_child_render(self, request, suffix=''): # pylint: disable=unused-argument """ Record in the tracking logs which child was rendered """ # TODO: use publish instead, when publish is wired to the tracking logs self.system.track_function('xblock.split_test.child_render', {'child-id': self.child.scope_ids.usage_id}) return Response() def get_icon_class(self): return self.child.get_icon_class() if self.child else 'other' def get_progress(self): children = self.get_children() progresses = [child.get_progress() for child in children] progress = reduce(Progress.add_counts, progresses, None) return progress @XBlock.needs('user_tags') # pylint: disable=abstract-method @XBlock.wants('partitions') class SplitTestDescriptor(SplitTestFields, SequenceDescriptor): # the editing interface can be the same as for sequences -- just a container module_class = SplitTestModule filename_extension = "xml" child_descriptor = module_attr('child_descriptor') log_child_render = module_attr('log_child_render') get_content_titles = module_attr('get_content_titles') def definition_to_xml(self, resource_fs): xml_object = etree.Element('split_test') renderable_groups = {} # json.dumps doesn't know how to handle Location objects for group in self.group_id_to_child: renderable_groups[group] = self.group_id_to_child[group].to_deprecated_string() xml_object.set('group_id_to_child', json.dumps(renderable_groups)) xml_object.set('user_partition_id', str(self.user_partition_id)) for child in self.get_children(): self.runtime.add_block_as_child_node(child, xml_object) return xml_object @classmethod def definition_from_xml(cls, xml_object, system): children = [] raw_group_id_to_child = xml_object.attrib.get('group_id_to_child', None) user_partition_id = xml_object.attrib.get('user_partition_id', None) try: group_id_to_child = json.loads(raw_group_id_to_child) except ValueError: msg = "group_id_to_child is not valid json" log.exception(msg) system.error_tracker(msg) for child in xml_object: try: descriptor = system.process_xml(etree.tostring(child)) children.append(descriptor.scope_ids.usage_id) except Exception: msg = "Unable to load child when parsing split_test module." log.exception(msg) system.error_tracker(msg) return ({ 'group_id_to_child': group_id_to_child, 'user_partition_id': user_partition_id }, children) def has_dynamic_children(self): """ Grading needs to know that only one of the children is actually "real". This makes it use module.get_child_descriptors(). """ return True
nanolearning/edx-platform
common/lib/xmodule/xmodule/split_test_module.py
Python
agpl-3.0
11,395
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Biobloom(AutotoolsPackage): """BioBloom Tools (BBT) provides the means to create filters for a given reference and then to categorize sequences.""" homepage = "https://github.com/bcgsc/biobloom" url = "https://github.com/bcgsc/biobloom/releases/download/2.2.0/biobloomtools-2.2.0.tar.gz" version('2.2.0', sha256='5d09f8690f0b6402f967ac09c5b0f769961f3fe3791000f8f73af6af7324f02c') depends_on('boost') depends_on('sdsl-lite') depends_on('sparsehash') depends_on('zlib') def configure_args(self): # newer versions of sdsl-lite introduce tolerable warnings # they must disabled to allow the build to continue return ['CXXFLAGS=-w', 'CPPFLAGS=-w']
iulian787/spack
var/spack/repos/builtin/packages/biobloom/package.py
Python
lgpl-2.1
946
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Vt(MakefilePackage): """A tool set for short variant discovery in genetic sequence data.""" homepage = "http://genome.sph.umich.edu/wiki/vt" url = "https://github.com/atks/vt/archive/0.577.tar.gz" version('0.5772', sha256='b147520478a2f7c536524511e48133d0360e88282c7159821813738ccbda97e7') version('0.577', sha256='009e2592e787ab37e471b4e8a66520141bb2791ca78142ca1767d27036f460d0') depends_on('zlib') def install(self, spec, spack): mkdirp(prefix.bin) install('vt', prefix.bin)
iulian787/spack
var/spack/repos/builtin/packages/vt/package.py
Python
lgpl-2.1
759
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os import stat import pytest import collections import spack.spec import spack.modules.tcl from spack.modules.common import UpstreamModuleIndex from spack.spec import Spec import spack.error def test_update_dictionary_extending_list(): target = { 'foo': { 'a': 1, 'b': 2, 'd': 4 }, 'bar': [1, 2, 4], 'baz': 'foobar' } update = { 'foo': { 'c': 3, }, 'bar': [3], 'baz': 'foobaz', 'newkey': { 'd': 4 } } spack.modules.common.update_dictionary_extending_lists(target, update) assert len(target) == 4 assert len(target['foo']) == 4 assert len(target['bar']) == 4 assert target['baz'] == 'foobaz' @pytest.fixture() def mock_module_filename(monkeypatch, tmpdir): filename = str(tmpdir.join('module')) monkeypatch.setattr(spack.modules.common.BaseFileLayout, 'filename', filename) yield filename @pytest.fixture() def mock_package_perms(monkeypatch): perms = stat.S_IRGRP | stat.S_IWGRP monkeypatch.setattr(spack.package_prefs, 'get_package_permissions', lambda spec: perms) yield perms def test_modules_written_with_proper_permissions(mock_module_filename, mock_package_perms, mock_packages, config): spec = spack.spec.Spec('mpileaks').concretized() # The code tested is common to all module types, but has to be tested from # one. TCL picked at random generator = spack.modules.tcl.TclModulefileWriter(spec) generator.write() assert mock_package_perms & os.stat( mock_module_filename).st_mode == mock_package_perms class MockDb(object): def __init__(self, db_ids, spec_hash_to_db): self.upstream_dbs = db_ids self.spec_hash_to_db = spec_hash_to_db def db_for_spec_hash(self, spec_hash): return self.spec_hash_to_db.get(spec_hash) class MockSpec(object): def __init__(self, unique_id): self.unique_id = unique_id def dag_hash(self): return self.unique_id def test_upstream_module_index(): s1 = MockSpec('spec-1') s2 = MockSpec('spec-2') s3 = MockSpec('spec-3') s4 = MockSpec('spec-4') tcl_module_index = """\ module_index: {0}: path: /path/to/a use_name: a """.format(s1.dag_hash()) module_indices = [ { 'tcl': spack.modules.common._read_module_index(tcl_module_index) }, {} ] dbs = [ 'd0', 'd1' ] mock_db = MockDb( dbs, { s1.dag_hash(): 'd0', s2.dag_hash(): 'd1', s3.dag_hash(): 'd0' } ) upstream_index = UpstreamModuleIndex(mock_db, module_indices) m1 = upstream_index.upstream_module(s1, 'tcl') assert m1.path == '/path/to/a' # No modules are defined for the DB associated with s2 assert not upstream_index.upstream_module(s2, 'tcl') # Modules are defined for the index associated with s1, but none are # defined for the requested type assert not upstream_index.upstream_module(s1, 'lmod') # A module is registered with a DB and the associated module index has # modules of the specified type defined, but not for the requested spec assert not upstream_index.upstream_module(s3, 'tcl') # The spec isn't recorded as installed in any of the DBs with pytest.raises(spack.error.SpackError): upstream_index.upstream_module(s4, 'tcl') def test_get_module_upstream(): s1 = MockSpec('spec-1') tcl_module_index = """\ module_index: {0}: path: /path/to/a use_name: a """.format(s1.dag_hash()) module_indices = [ {}, { 'tcl': spack.modules.common._read_module_index(tcl_module_index) } ] dbs = ['d0', 'd1'] mock_db = MockDb( dbs, {s1.dag_hash(): 'd1'} ) upstream_index = UpstreamModuleIndex(mock_db, module_indices) MockPackage = collections.namedtuple('MockPackage', ['installed_upstream']) setattr(s1, "package", MockPackage(True)) try: old_index = spack.modules.common.upstream_module_index spack.modules.common.upstream_module_index = upstream_index m1_path = spack.modules.common.get_module('tcl', s1, True) assert m1_path == '/path/to/a' finally: spack.modules.common.upstream_module_index = old_index def test_load_installed_package_not_in_repo(install_mockery, mock_fetch, monkeypatch): # Get a basic concrete spec for the trivial install package. spec = Spec('trivial-install-test-package') spec.concretize() assert spec.concrete # Get the package pkg = spec.package def find_nothing(*args): raise spack.repo.UnknownPackageError( 'Repo package access is disabled for test') try: pkg.do_install() spec._package = None monkeypatch.setattr(spack.repo, 'get', find_nothing) with pytest.raises(spack.repo.UnknownPackageError): spec.package module_path = spack.modules.common.get_module('tcl', spec, True) assert module_path pkg.do_uninstall() except Exception: pkg.remove_prefix() raise
iulian787/spack
lib/spack/spack/test/modules/common.py
Python
lgpl-2.1
5,670
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import argparse import os import pytest import sys from llnl.util.filesystem import FileFilter import spack.paths from spack.cmd.flake8 import flake8, setup_parser, changed_files from spack.repo import Repo from spack.util.executable import which @pytest.fixture(scope='module') def parser(): """Returns the parser for the ``flake8`` command""" parser = argparse.ArgumentParser() setup_parser(parser) return parser @pytest.fixture(scope='module') def flake8_package(): """Flake8 only checks files that have been modified. This fixture makes a small change to the ``flake8`` mock package, yields the filename, then undoes the change on cleanup. """ repo = Repo(spack.paths.mock_packages_path) filename = repo.filename_for_package_name('flake8') package = FileFilter(filename) # Make the change package.filter("state = 'unmodified'", "state = 'modified'", string=True) yield filename # Undo the change package.filter("state = 'modified'", "state = 'unmodified'", string=True) def test_changed_files(parser, flake8_package): args = parser.parse_args([]) # changed_files returns file paths relative to the root # directory of Spack. Convert to absolute file paths. files = changed_files(args) files = [os.path.join(spack.paths.prefix, path) for path in files] # There will likely be other files that have changed # when these tests are run assert flake8_package in files # As of flake8 3.0.0, Python 2.6 and 3.3 are no longer supported # http://flake8.pycqa.org/en/latest/release-notes/3.0.0.html @pytest.mark.skipif( sys.version_info[:2] <= (2, 6) or (3, 0) <= sys.version_info[:2] <= (3, 3), reason='flake8 no longer supports Python 2.6 or 3.3 and older') @pytest.mark.skipif(not which('flake8'), reason='flake8 is not installed.') def test_flake8(parser, flake8_package): # Only test the flake8_package that we modified # Otherwise, the unit tests would fail every time # the flake8 tests fail args = parser.parse_args([flake8_package]) flake8(parser, args) # Get even more coverage args = parser.parse_args(['--output', '--root-relative', flake8_package]) flake8(parser, args)
iulian787/spack
lib/spack/spack/test/cmd/flake8.py
Python
lgpl-2.1
2,434
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PerlXmlTwig(PerlPackage): """This module provides a way to process XML documents. It is build on top of XML::Parser. The module offers a tree interface to the document, while allowing you to output the parts of it that have been completely processed. It allows minimal resource (CPU and memory) usage by building the tree only for the parts of the documents that need actual processing, through the use of the twig_roots and twig_print_outside_roots options. The finish and finish_print methods also help to increase performances. XML::Twig tries to make simple things easy so it tries its best to takes care of a lot of the (usually) annoying (but sometimes necessary) features that come with XML and XML::Parser.""" homepage = "https://metacpan.org/pod/XML::Twig" url = "https://cpan.metacpan.org/authors/id/M/MI/MIROD/XML-Twig-3.52.tar.gz" version('3.52', sha256='fef75826c24f2b877d0a0d2645212fc4fb9756ed4d2711614ac15c497e8680ad') depends_on('perl-xml-parser', type=('build', 'run')) patch('non_interactive.patch')
iulian787/spack
var/spack/repos/builtin/packages/perl-xml-twig/package.py
Python
lgpl-2.1
1,320
from proxy import CartProxy, ItemAlreadyExists, ItemDoesNotExist
sidja/django-changuito
changuito/__init__.py
Python
lgpl-3.0
65
""" Force import of all modules in this package in order to get the standard test runner to pick up the tests. Yowzers. """ import os import django modules = [filename.rsplit('.', 1)[0] for filename in os.listdir(os.path.dirname(__file__)) if filename.endswith('.py') and not filename.startswith('_')] __test__ = dict() if django.VERSION < (1, 6): for module in modules: exec("from celery_rpc.tests.%s import *" % module)
tumb1er/django-celery-rpc
celery_rpc/tests/tests.py
Python
unlicense
459
#!/usr/bin/env python3 # Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import generic_distro import utils class FreeBSDTests(generic_distro.GenericDistroTests): def TestPackageInstallation(self): utils.Execute(['pkg', 'install', '--force', '--yes', 'tree']) def IsPackageInstalled(self, package_name): # the following command returns zero if package is installed command = ['pkg', 'query', '%n', package_name] rc, output = utils.Execute(command, raise_errors=False) return rc == 0 def GetCmdlineConfigs(self): return { 'console': ['vidconsole', ], # 'comconsole_speed': ['38400', ] } def GetCmdlineLocation(self): return '/boot/loader.conf' def GetSshdConfig(self): # Don't check for PermitRootLogin and PasswordAuthentication as it # fallbacks on FreeBSD to "no" when undefined return {} def TestRootPasswordDisabled(self): """ Ensure root password is disabled (/etc/passwd) """ # It's actually empty and it's fine according to: # https://forums.freebsd.org/threads/jails-default-root-password-is-empty-not-starred-out.37701/ utils.Execute(['grep', '^root::', '/etc/master.passwd']) def TestPackageManagerConfig(self): """ BSD has all GCE packages officially on its mirror """ pass def TestNetworkInterfaceMTU(self): """ Ensure that the network interface MTU is set to 1460. """ # Parsing from ifconfig as BSD has no sysfs rc, output = utils.Execute(['ifconfig'], capture_output=True) for line in output.split('\n'): token = 'mtu ' token_pos = line.find(token) if token_pos >= 0: desired_mtu = 1460 cur_mtu = int(line[token_pos + len(token):]) if cur_mtu != desired_mtu: raise Exception('Network MTU is %d but expected %d' % ( cur_mtu, desired_mtu)) def TestAutomaticSecurityUpdates(self): def HasFoundConfig(config_file, key, value): """ Return True if @value is found inside of a @key line on @config_file. """ command = ['grep', key, config_file] rc, output = utils.Execute(command, capture_output=True) output_lines = output.split('\n') useful_lines = filter(generic_distro.RemoveCommentAndStrip, output_lines) for line in useful_lines: if line.find(value) >= 0: # found desired value return True return False config_file = '/etc/freebsd-update.conf' desired_configs = { 'Components': 'kernel', } for key in desired_configs: if not HasFoundConfig(config_file, key, desired_configs[key]): raise Exception('freebsd-update "%s" config has no "%s" on it' % ( key, desired_configs[key])) def GetSysctlConfigs(self): """ Return BSD parameters for sysctl checks. Below is what I could find on BSD related to original Linux's requirements. """ return { 'net.inet.ip.forwarding': 0, 'net.inet.tcp.syncookies': 1, 'net.inet.ip.accept_sourceroute': 0, 'net.inet.ip.redirect': 0, 'net.inet.icmp.bmcastecho': 0, }
adjackura/compute-image-tools
image_test/configuration/linux/freebsd.py
Python
apache-2.0
3,674
from ncclient.operations.lock import * import unittest try: from unittest.mock import patch # Python 3.4 and later except ImportError: from mock import patch from ncclient import manager import ncclient.manager import ncclient.transport from ncclient.xml_ import * from ncclient.operations import RaiseMode from xml.etree import ElementTree class TestLock(unittest.TestCase): def setUp(self): self.device_handler = manager.make_device_handler({'name': 'junos'}) @patch('ncclient.transport.SSHSession') @patch('ncclient.operations.RPC._request') def test_lock_default_param(self, mock_request, mock_session): session = ncclient.transport.SSHSession(self.device_handler) obj = Lock(session, self.device_handler, raise_mode=RaiseMode.ALL) obj.request() node = new_ele("lock") sub_ele(sub_ele(node, "target"), "candidate") xml = ElementTree.tostring(node) call = mock_request.call_args_list[0][0][0] call = ElementTree.tostring(call) self.assertEqual(call, xml) @patch('ncclient.transport.SSHSession') @patch('ncclient.operations.RPC._request') def test_lock(self, mock_request, mock_session): session = ncclient.transport.SSHSession(self.device_handler) obj = Lock(session, self.device_handler, raise_mode=RaiseMode.ALL) obj.request(target="running") node = new_ele("lock") sub_ele(sub_ele(node, "target"), "running") xml = ElementTree.tostring(node) call = mock_request.call_args_list[0][0][0] call = ElementTree.tostring(call) self.assertEqual(call, xml) @patch('ncclient.transport.SSHSession') @patch('ncclient.operations.RPC._request') def test_unlock_default_param(self, mock_request, mock_session): session = ncclient.transport.SSHSession(self.device_handler) obj = Unlock(session, self.device_handler, raise_mode=RaiseMode.ALL) obj.request() node = new_ele("unlock") sub_ele(sub_ele(node, "target"), "candidate") xml = ElementTree.tostring(node) call = mock_request.call_args_list[0][0][0] call = ElementTree.tostring(call) self.assertEqual(call, xml) @patch('ncclient.transport.SSHSession') @patch('ncclient.operations.RPC._request') def test_unlock(self, mock_request, mock_session): session = ncclient.transport.SSHSession(self.device_handler) obj = Unlock(session, self.device_handler, raise_mode=RaiseMode.ALL) obj.request(target="running") node = new_ele("unlock") sub_ele(sub_ele(node, "target"), "running") xml = ElementTree.tostring(node) call = mock_request.call_args_list[0][0][0] call = ElementTree.tostring(call) self.assertEqual(call, xml) @patch('ncclient.transport.SSHSession') @patch('ncclient.operations.RPC._request') def test_lock_context_enter(self, mock_request, mock_session): session = ncclient.transport.SSHSession(self.device_handler) obj = LockContext(session, self.device_handler, "candidate") self.assertEqual(obj.__enter__(), obj) node = new_ele("lock") sub_ele(sub_ele(node, "target"), "candidate") xml = ElementTree.tostring(node) call = mock_request.call_args_list[0][0][0] call = ElementTree.tostring(call) self.assertEqual(call, xml) @patch('ncclient.transport.SSHSession') @patch('ncclient.operations.RPC._request') def test_lock_context_exit(self, mock_request, mock_session): session = ncclient.transport.SSHSession(self.device_handler) obj = LockContext(session, self.device_handler, "running") self.assertFalse(obj.__exit__()) node = new_ele("unlock") sub_ele(sub_ele(node, "target"), "running") xml = ElementTree.tostring(node) call = mock_request.call_args_list[0][0][0] call = ElementTree.tostring(call) self.assertEqual(call, xml)
nwautomator/ncclient
test/unit/operations/test_lock.py
Python
apache-2.0
4,013
#!/usr/bin/env python # Copyright 2011 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Slightly Modified from Google Person Finder: # http://code.google.com/p/googlepersonfinder/ """Setup and teardown fixtures for all the tests in the tests/ directory.""" import os import sys import unittest # pylint: disable=R0801 from google.appengine.api import apiproxy_stub_map from google.appengine.api import datastore_file_stub import remote_api # pylint: enable=R0801 def setup(): """Setup for all tests in tests/""" # Create a new apiproxy and temp datastore to use for this test suite apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap() temp_db = datastore_file_stub.DatastoreFileStub( 'PfifToolsUnittestDataStore', None, None, trusted=True) apiproxy_stub_map.apiproxy.RegisterStub('datastore', temp_db) # An application id is required to access the datastore, so let's create one os.environ['APPLICATION_ID'] = 'pfif-tools-test'
AwesomeTurtle/personfinder
tools/pfif-tools/tests/__init__.py
Python
apache-2.0
1,480
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import argparse import json import logging import random import string import csv from pylons import tmpl_context as c from testfixtures import LogCapture from mock import patch from ming.odm import ThreadLocalODMSession from allura import model as M from allura.lib.helpers import push_config from allura.tests import TestController from allura.tests.decorators import patch_middleware_config from forgewiki import model as WM def parse_args(): parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter, description='Count number of expensive calls (mongo, markdown, etc) for a standard page.\n' 'Currently its a _discuss URL with a few posts on it. This exercises core logic\n' '(project & tool lookup, security, discussion thread, main template, etc) but\n' 'intentionally avoids most tool-specific code.') parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Show call details. Note that Timers with debug_each_call=False (like ming\'s Cursor.next) are not displayed in verbose mode (but they are counted).') parser.add_argument('--debug-html', action='store_true', default=False, help='Save HTML responses as local files') parser.add_argument( '--data-file', default='call_counts.csv', type=argparse.FileType('a'), help='CSV file that is appended to') parser.add_argument('--id', default='', help='An identifier for this run. Examples:\n' '`git rev-parse --short HEAD` for current hash\n' '`git log -1 --oneline` for hash + message') return parser.parse_args() def main(args): test = TestController() setup(test) url = generate_wiki_thread(test) # make sure ODM sessions won't get re-used ThreadLocalODMSession.close_all() counts = count_page(test, url, verbose=args.verbose, debug_html=args.debug_html) print json.dumps(counts) write_csv(counts, args.id, args.data_file) test.tearDown() def setup(test): # includes setting up mim with patch_middleware_config({'stats.sample_rate': 1, 'stats.debug_line_length': 1000, }), \ patch('timermiddleware.log.isEnabledFor', return_value=True): # can't set this via logging configuration since setUp() will load a logging config and then start using it before we have a good place to tweak it test.setUp() tmw_log = logging.getLogger('timermiddleware') tmw_log.disabled = 0 # gets disabled when .ini file is loaded; dumb. tmw_log.setLevel(logging.DEBUG) def generate_wiki_thread(test): # automagically instantiate the app test.app.get('/wiki/') project = M.Project.query.get(shortname='test') app = project.app_instance('wiki') page = WM.Page.query.get(app_config_id=app.config._id, title='Home') thread = page.discussion_thread # create a few posts by a few users with push_config(c, user=M.User.query.get(username='test-admin'), app=app, project=project): thread.add_post(text='This is very helpful') thread.add_post(text="But it's not **super** helpful") with push_config(c, user=M.User.query.get(username='test-user')): thread.add_post(text='I disagree') with push_config(c, user=M.User.query.get(username='test-user-1')): thread.add_post(text='But what about foo?') ThreadLocalODMSession.flush_all() url = '/p/test/wiki/_discuss/thread/{}/'.format(thread._id) return url def count_page(test, url, verbose=False, debug_html=False): with LogCapture('stats') as stats, LogCapture('timermiddleware') as calls: resp = test.app.get(url, extra_environ=dict(username='*anonymous')) print url, resp.status if debug_html: debug_filename = 'call-{}.html'.format(''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(10)])) with open(debug_filename, 'w') as out: out.write(resp.body) print debug_filename if verbose: for r in calls.records: print r.getMessage() assert len(stats.records) == 1 timings = json.loads(stats.records[0].getMessage()) # total is always 1, which is misleading del timings['call_counts']['total'] return timings['call_counts'] def write_csv(counts, id, data_file): cols = sorted(counts.keys()) row = counts if id: cols = ['id'] + cols row = dict(counts, id=id) csv_out = csv.DictWriter(data_file, cols) if data_file.tell() == 0: csv_out.writeheader() csv_out.writerow(row) data_file.close() if __name__ == '__main__': main(parse_args())
apache/incubator-allura
scripts/perf/call_count.py
Python
apache-2.0
5,886
"""Alexa state report code.""" import asyncio import json import logging import aiohttp import async_timeout from homeassistant.const import MATCH_ALL, STATE_ON import homeassistant.util.dt as dt_util from .const import API_CHANGE, Cause from .entities import ENTITY_ADAPTERS from .messages import AlexaResponse _LOGGER = logging.getLogger(__name__) DEFAULT_TIMEOUT = 10 async def async_enable_proactive_mode(hass, smart_home_config): """Enable the proactive mode. Proactive mode makes this component report state changes to Alexa. """ # Validate we can get access token. await smart_home_config.async_get_access_token() async def async_entity_state_listener(changed_entity, old_state, new_state): if not new_state: return if new_state.domain not in ENTITY_ADAPTERS: return if not smart_home_config.should_expose(changed_entity): _LOGGER.debug("Not exposing %s because filtered by config", changed_entity) return alexa_changed_entity = ENTITY_ADAPTERS[new_state.domain]( hass, smart_home_config, new_state ) for interface in alexa_changed_entity.interfaces(): if interface.properties_proactively_reported(): await async_send_changereport_message( hass, smart_home_config, alexa_changed_entity ) return if ( interface.name() == "Alexa.DoorbellEventSource" and new_state.state == STATE_ON ): await async_send_doorbell_event_message( hass, smart_home_config, alexa_changed_entity ) return return hass.helpers.event.async_track_state_change( MATCH_ALL, async_entity_state_listener ) async def async_send_changereport_message( hass, config, alexa_entity, *, invalidate_access_token=True ): """Send a ChangeReport message for an Alexa entity. https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#report-state-with-changereport-events """ token = await config.async_get_access_token() headers = {"Authorization": f"Bearer {token}"} endpoint = alexa_entity.alexa_id() # this sends all the properties of the Alexa Entity, whether they have # changed or not. this should be improved, and properties that have not # changed should be moved to the 'context' object properties = list(alexa_entity.serialize_properties()) payload = { API_CHANGE: {"cause": {"type": Cause.APP_INTERACTION}, "properties": properties} } message = AlexaResponse(name="ChangeReport", namespace="Alexa", payload=payload) message.set_endpoint_full(token, endpoint) message_serialized = message.serialize() session = hass.helpers.aiohttp_client.async_get_clientsession() try: with async_timeout.timeout(DEFAULT_TIMEOUT): response = await session.post( config.endpoint, headers=headers, json=message_serialized, allow_redirects=True, ) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.error("Timeout sending report to Alexa.") return response_text = await response.text() _LOGGER.debug("Sent: %s", json.dumps(message_serialized)) _LOGGER.debug("Received (%s): %s", response.status, response_text) if response.status == 202: return response_json = json.loads(response_text) if ( response_json["payload"]["code"] == "INVALID_ACCESS_TOKEN_EXCEPTION" and not invalidate_access_token ): config.async_invalidate_access_token() return await async_send_changereport_message( hass, config, alexa_entity, invalidate_access_token=False ) _LOGGER.error( "Error when sending ChangeReport to Alexa: %s: %s", response_json["payload"]["code"], response_json["payload"]["description"], ) async def async_send_add_or_update_message(hass, config, entity_ids): """Send an AddOrUpdateReport message for entities. https://developer.amazon.com/docs/device-apis/alexa-discovery.html#add-or-update-report """ token = await config.async_get_access_token() headers = {"Authorization": f"Bearer {token}"} endpoints = [] for entity_id in entity_ids: domain = entity_id.split(".", 1)[0] if domain not in ENTITY_ADAPTERS: continue alexa_entity = ENTITY_ADAPTERS[domain](hass, config, hass.states.get(entity_id)) endpoints.append(alexa_entity.serialize_discovery()) payload = {"endpoints": endpoints, "scope": {"type": "BearerToken", "token": token}} message = AlexaResponse( name="AddOrUpdateReport", namespace="Alexa.Discovery", payload=payload ) message_serialized = message.serialize() session = hass.helpers.aiohttp_client.async_get_clientsession() return await session.post( config.endpoint, headers=headers, json=message_serialized, allow_redirects=True ) async def async_send_delete_message(hass, config, entity_ids): """Send an DeleteReport message for entities. https://developer.amazon.com/docs/device-apis/alexa-discovery.html#deletereport-event """ token = await config.async_get_access_token() headers = {"Authorization": f"Bearer {token}"} endpoints = [] for entity_id in entity_ids: domain = entity_id.split(".", 1)[0] if domain not in ENTITY_ADAPTERS: continue alexa_entity = ENTITY_ADAPTERS[domain](hass, config, hass.states.get(entity_id)) endpoints.append({"endpointId": alexa_entity.alexa_id()}) payload = {"endpoints": endpoints, "scope": {"type": "BearerToken", "token": token}} message = AlexaResponse( name="DeleteReport", namespace="Alexa.Discovery", payload=payload ) message_serialized = message.serialize() session = hass.helpers.aiohttp_client.async_get_clientsession() return await session.post( config.endpoint, headers=headers, json=message_serialized, allow_redirects=True ) async def async_send_doorbell_event_message(hass, config, alexa_entity): """Send a DoorbellPress event message for an Alexa entity. https://developer.amazon.com/docs/smarthome/send-events-to-the-alexa-event-gateway.html """ token = await config.async_get_access_token() headers = {"Authorization": f"Bearer {token}"} endpoint = alexa_entity.alexa_id() message = AlexaResponse( name="DoorbellPress", namespace="Alexa.DoorbellEventSource", payload={ "cause": {"type": Cause.PHYSICAL_INTERACTION}, "timestamp": f"{dt_util.utcnow().replace(tzinfo=None).isoformat()}Z", }, ) message.set_endpoint_full(token, endpoint) message_serialized = message.serialize() session = hass.helpers.aiohttp_client.async_get_clientsession() try: with async_timeout.timeout(DEFAULT_TIMEOUT): response = await session.post( config.endpoint, headers=headers, json=message_serialized, allow_redirects=True, ) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.error("Timeout sending report to Alexa.") return response_text = await response.text() _LOGGER.debug("Sent: %s", json.dumps(message_serialized)) _LOGGER.debug("Received (%s): %s", response.status, response_text) if response.status == 202: return response_json = json.loads(response_text) _LOGGER.error( "Error when sending DoorbellPress event to Alexa: %s: %s", response_json["payload"]["code"], response_json["payload"]["description"], )
leppa/home-assistant
homeassistant/components/alexa/state_report.py
Python
apache-2.0
7,913
# Copyright 2015 ETH Zurich # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :mod:`scion_l4` --- Layer 4 handling ==================================== """ # SCION from lib.errors import SCIONParseError from lib.packet.packet_base import L4HeaderBase from lib.packet.scion_udp import SCIONUDPHeader from lib.packet.scmp.hdr import SCMPHeader from lib.types import L4Proto class SCIONL4Unknown(L4HeaderBase): # pragma: no cover NAME = "Unknown" def __init__(self, proto): raise NotImplementedError def from_values(self): raise NotImplementedError def _parse(self): raise NotImplementedError def validate(self): raise NotImplementedError def update(self): raise NotImplementedError def __len__(self): return 0 def __str__(self): return "[Unknown L4 protocol header]" def parse_l4_hdr(proto, data, dst=None, src=None): if proto == L4Proto.UDP: raw_hdr = data.pop(SCIONUDPHeader.LEN) assert src assert dst return SCIONUDPHeader((src, dst, raw_hdr)) if proto == L4Proto.SCMP: raw_hdr = data.pop(SCMPHeader.LEN) return SCMPHeader((src, dst, raw_hdr)) if proto in L4Proto.L4: return None raise SCIONParseError("Unsupported L4 protocol type: %s" % proto)
dmpiergiacomo/scion
python/lib/packet/scion_l4.py
Python
apache-2.0
1,817
from collections import namedtuple TomcatJVMFeature = namedtuple('TomcatJVMFeature', [ 'free', 'total', 'max' ]) TomcatMemoryFeature = namedtuple('TomcatMemoryFeature', [ 'name', 'type', 'initial', 'committed', 'maximum', 'used' ]) TomcatConnectorFeature = namedtuple('TomcatConnectorFeature', [ 'connector', 'maxThread', 'currentThread', 'currentThreadBusy', 'requestMaxTime', 'processingTime', 'requestCount', 'errorCount', 'byteReceived', 'byteSent' ]) TomcatWorkerFeature = namedtuple('TomcatWorkerFeature', [ 'connector', 'stage', 'time', 'byteSent', 'byteReceived', 'client', 'vhost', 'request' ])
cloudviz/agentless-system-crawler
crawler/plugins/applications/tomcat/feature.py
Python
apache-2.0
1,653
#!/usr/bin/python -u # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import sys import itertools import uuid from optparse import OptionParser import random import six from six.moves.urllib.parse import urlparse from swift.common.manager import Manager from swift.common import utils, ring from swift.common.storage_policy import POLICIES from swift.common.http import HTTP_NOT_FOUND from swiftclient import client, get_auth, ClientException from test.probe.common import ENABLED_POLICIES TIMEOUT = 60 def meta_command(name, bases, attrs): """ Look for attrs with a truthy attribute __command__ and add them to an attribute __commands__ on the type that maps names to decorated methods. The decorated methods' doc strings also get mapped in __docs__. Also adds a method run(command_name, *args, **kwargs) that will execute the method mapped to the name in __commands__. """ commands = {} docs = {} for attr, value in attrs.items(): if getattr(value, '__command__', False): commands[attr] = value # methods have always have a __doc__ attribute, sometimes empty docs[attr] = (getattr(value, '__doc__', None) or 'perform the %s command' % attr).strip() attrs['__commands__'] = commands attrs['__docs__'] = docs def run(self, command, *args, **kwargs): return self.__commands__[command](self, *args, **kwargs) attrs.setdefault('run', run) return type(name, bases, attrs) def command(f): f.__command__ = True return f @six.add_metaclass(meta_command) class BrainSplitter(object): def __init__(self, url, token, container_name='test', object_name='test', server_type='container', policy=None): self.url = url self.token = token self.account = utils.split_path(urlparse(url).path, 2, 2)[1] self.container_name = container_name self.object_name = object_name server_list = ['%s-server' % server_type] if server_type else ['all'] self.servers = Manager(server_list) policies = list(ENABLED_POLICIES) random.shuffle(policies) self.policies = itertools.cycle(policies) o = object_name if server_type == 'object' else None c = container_name if server_type in ('object', 'container') else None if server_type in ('container', 'account'): if policy: raise TypeError('Metadata server brains do not ' 'support specific storage policies') self.policy = None self.ring = ring.Ring( '/etc/swift/%s.ring.gz' % server_type) elif server_type == 'object': if not policy: raise TypeError('Object BrainSplitters need to ' 'specify the storage policy') self.policy = policy policy.load_ring('/etc/swift') self.ring = policy.object_ring else: raise ValueError('Unkonwn server_type: %r' % server_type) self.server_type = server_type part, nodes = self.ring.get_nodes(self.account, c, o) node_ids = [n['id'] for n in nodes] if all(n_id in node_ids for n_id in (0, 1)): self.primary_numbers = (1, 2) self.handoff_numbers = (3, 4) else: self.primary_numbers = (3, 4) self.handoff_numbers = (1, 2) @command def start_primary_half(self): """ start servers 1 & 2 """ tuple(self.servers.start(number=n) for n in self.primary_numbers) @command def stop_primary_half(self): """ stop servers 1 & 2 """ tuple(self.servers.stop(number=n) for n in self.primary_numbers) @command def start_handoff_half(self): """ start servers 3 & 4 """ tuple(self.servers.start(number=n) for n in self.handoff_numbers) @command def stop_handoff_half(self): """ stop servers 3 & 4 """ tuple(self.servers.stop(number=n) for n in self.handoff_numbers) @command def put_container(self, policy_index=None): """ put container with next storage policy """ if policy_index is not None: policy = POLICIES.get_by_index(int(policy_index)) if not policy: raise ValueError('Unknown policy with index %s' % policy) elif not self.policy: policy = next(self.policies) else: policy = self.policy headers = {'X-Storage-Policy': policy.name} client.put_container(self.url, self.token, self.container_name, headers=headers) @command def delete_container(self): """ delete container """ client.delete_container(self.url, self.token, self.container_name) @command def put_object(self, headers=None, contents=None): """ issue put for test object """ client.put_object(self.url, self.token, self.container_name, self.object_name, headers=headers, contents=contents) @command def delete_object(self): """ issue delete for test object """ try: client.delete_object(self.url, self.token, self.container_name, self.object_name) except ClientException as err: if err.http_status != HTTP_NOT_FOUND: raise parser = OptionParser('%prog [options] ' '<command>[:<args>[,<args>...]] [<command>...]') parser.usage += '\n\nCommands:\n\t' + \ '\n\t'.join("%s - %s" % (name, doc) for name, doc in BrainSplitter.__docs__.items()) parser.add_option('-c', '--container', default='container-%s' % uuid.uuid4(), help='set container name') parser.add_option('-o', '--object', default='object-%s' % uuid.uuid4(), help='set object name') parser.add_option('-s', '--server_type', default='container', help='set server type') parser.add_option('-P', '--policy_name', default=None, help='set policy') def main(): options, commands = parser.parse_args() if not commands: parser.print_help() return 'ERROR: must specify at least one command' for cmd_args in commands: cmd = cmd_args.split(':', 1)[0] if cmd not in BrainSplitter.__commands__: parser.print_help() return 'ERROR: unknown command %s' % cmd url, token = get_auth('http://127.0.0.1:8080/auth/v1.0', 'test:tester', 'testing') if options.server_type == 'object' and not options.policy_name: options.policy_name = POLICIES.default.name if options.policy_name: options.server_type = 'object' policy = POLICIES.get_by_name(options.policy_name) if not policy: return 'ERROR: unknown policy %r' % options.policy_name else: policy = None brain = BrainSplitter(url, token, options.container, options.object, options.server_type, policy=policy) for cmd_args in commands: parts = cmd_args.split(':', 1) command = parts[0] if len(parts) > 1: args = utils.list_from_csv(parts[1]) else: args = () try: brain.run(command, *args) except ClientException as e: print('**WARNING**: %s raised %s' % (command, e)) print('STATUS'.join(['*' * 25] * 2)) brain.servers.status() sys.exit() if __name__ == "__main__": sys.exit(main())
larsbutler/swift
test/probe/brain.py
Python
apache-2.0
8,284
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import datetime import logging import os import unittest import time from airflow import models, settings, AirflowException from airflow.exceptions import AirflowSkipException from airflow.jobs import BackfillJob from airflow.models import DAG, TaskInstance as TI from airflow.models import State as ST from airflow.models import DagModel, DagStat from airflow.models import clear_task_instances from airflow.models import XCom from airflow.operators.dummy_operator import DummyOperator from airflow.operators.bash_operator import BashOperator from airflow.operators.python_operator import PythonOperator from airflow.operators.python_operator import ShortCircuitOperator from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep from airflow.utils.state import State from airflow.utils.trigger_rule import TriggerRule from mock import patch from parameterized import parameterized DEFAULT_DATE = datetime.datetime(2016, 1, 1) TEST_DAGS_FOLDER = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'dags') class DagTest(unittest.TestCase): def test_parms_not_passed_is_empty_dict(self): """ Test that when 'params' is _not_ passed to a new Dag, that the params attribute is set to an empty dictionary. """ dag = models.DAG('test-dag') self.assertEqual(dict, type(dag.params)) self.assertEqual(0, len(dag.params)) def test_params_passed_and_params_in_default_args_no_override(self): """ Test that when 'params' exists as a key passed to the default_args dict in addition to params being passed explicitly as an argument to the dag, that the 'params' key of the default_args dict is merged with the dict of the params argument. """ params1 = {'parameter1': 1} params2 = {'parameter2': 2} dag = models.DAG('test-dag', default_args={'params': params1}, params=params2) params_combined = params1.copy() params_combined.update(params2) self.assertEqual(params_combined, dag.params) def test_dag_as_context_manager(self): """ Test DAG as a context manager. When used as a context manager, Operators are automatically added to the DAG (unless they specifiy a different DAG) """ dag = DAG( 'dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) dag2 = DAG( 'dag2', start_date=DEFAULT_DATE, default_args={'owner': 'owner2'}) with dag: op1 = DummyOperator(task_id='op1') op2 = DummyOperator(task_id='op2', dag=dag2) self.assertIs(op1.dag, dag) self.assertEqual(op1.owner, 'owner1') self.assertIs(op2.dag, dag2) self.assertEqual(op2.owner, 'owner2') with dag2: op3 = DummyOperator(task_id='op3') self.assertIs(op3.dag, dag2) self.assertEqual(op3.owner, 'owner2') with dag: with dag2: op4 = DummyOperator(task_id='op4') op5 = DummyOperator(task_id='op5') self.assertIs(op4.dag, dag2) self.assertIs(op5.dag, dag) self.assertEqual(op4.owner, 'owner2') self.assertEqual(op5.owner, 'owner1') with DAG('creating_dag_in_cm', start_date=DEFAULT_DATE) as dag: DummyOperator(task_id='op6') self.assertEqual(dag.dag_id, 'creating_dag_in_cm') self.assertEqual(dag.tasks[0].task_id, 'op6') def test_dag_topological_sort(self): dag = DAG( 'dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) # A -> B # A -> C -> D # ordered: B, D, C, A or D, B, C, A or D, C, B, A with dag: op1 = DummyOperator(task_id='A') op2 = DummyOperator(task_id='B') op3 = DummyOperator(task_id='C') op4 = DummyOperator(task_id='D') op1.set_upstream([op2, op3]) op3.set_upstream(op4) topological_list = dag.topological_sort() logging.info(topological_list) tasks = [op2, op3, op4] self.assertTrue(topological_list[0] in tasks) tasks.remove(topological_list[0]) self.assertTrue(topological_list[1] in tasks) tasks.remove(topological_list[1]) self.assertTrue(topological_list[2] in tasks) tasks.remove(topological_list[2]) self.assertTrue(topological_list[3] == op1) dag = DAG( 'dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) # C -> (A u B) -> D # C -> E # ordered: E | D, A | B, C with dag: op1 = DummyOperator(task_id='A') op2 = DummyOperator(task_id='B') op3 = DummyOperator(task_id='C') op4 = DummyOperator(task_id='D') op5 = DummyOperator(task_id='E') op1.set_downstream(op3) op2.set_downstream(op3) op1.set_upstream(op4) op2.set_upstream(op4) op5.set_downstream(op3) topological_list = dag.topological_sort() logging.info(topological_list) set1 = [op4, op5] self.assertTrue(topological_list[0] in set1) set1.remove(topological_list[0]) set2 = [op1, op2] set2.extend(set1) self.assertTrue(topological_list[1] in set2) set2.remove(topological_list[1]) self.assertTrue(topological_list[2] in set2) set2.remove(topological_list[2]) self.assertTrue(topological_list[3] in set2) self.assertTrue(topological_list[4] == op3) dag = DAG( 'dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) self.assertEquals(tuple(), dag.topological_sort()) def test_get_num_task_instances(self): test_dag_id = 'test_get_num_task_instances_dag' test_task_id = 'task_1' test_dag = DAG(dag_id=test_dag_id, start_date=DEFAULT_DATE) test_task = DummyOperator(task_id=test_task_id, dag=test_dag) ti1 = TI(task=test_task, execution_date=DEFAULT_DATE) ti1.state = None ti2 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1)) ti2.state = State.RUNNING ti3 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=2)) ti3.state = State.QUEUED ti4 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=3)) ti4.state = State.RUNNING session = settings.Session() session.merge(ti1) session.merge(ti2) session.merge(ti3) session.merge(ti4) session.commit() self.assertEqual(0, DAG.get_num_task_instances(test_dag_id, ['fakename'], session=session)) self.assertEqual(4, DAG.get_num_task_instances(test_dag_id, [test_task_id], session=session)) self.assertEqual(4, DAG.get_num_task_instances(test_dag_id, ['fakename', test_task_id], session=session)) self.assertEqual(1, DAG.get_num_task_instances(test_dag_id, [test_task_id], states=[None], session=session)) self.assertEqual(2, DAG.get_num_task_instances(test_dag_id, [test_task_id], states=[State.RUNNING], session=session)) self.assertEqual(3, DAG.get_num_task_instances(test_dag_id, [test_task_id], states=[None, State.RUNNING], session=session)) self.assertEqual(4, DAG.get_num_task_instances(test_dag_id, [test_task_id], states=[None, State.QUEUED, State.RUNNING], session=session)) session.close() def test_render_template_field(self): """Tests if render_template from a field works""" dag = DAG('test-dag', start_date=DEFAULT_DATE) with dag: task = DummyOperator(task_id='op1') result = task.render_template('', '{{ foo }}', dict(foo='bar')) self.assertEqual(result, 'bar') def test_render_template_field_macro(self): """ Tests if render_template from a field works, if a custom filter was defined""" dag = DAG('test-dag', start_date=DEFAULT_DATE, user_defined_macros = dict(foo='bar')) with dag: task = DummyOperator(task_id='op1') result = task.render_template('', '{{ foo }}', dict()) self.assertEqual(result, 'bar') def test_user_defined_filters(self): def jinja_udf(name): return 'Hello %s' %name dag = models.DAG('test-dag', start_date=DEFAULT_DATE, user_defined_filters=dict(hello=jinja_udf)) jinja_env = dag.get_template_env() self.assertIn('hello', jinja_env.filters) self.assertEqual(jinja_env.filters['hello'], jinja_udf) def test_render_template_field_filter(self): """ Tests if render_template from a field works, if a custom filter was defined""" def jinja_udf(name): return 'Hello %s' %name dag = DAG('test-dag', start_date=DEFAULT_DATE, user_defined_filters = dict(hello=jinja_udf)) with dag: task = DummyOperator(task_id='op1') result = task.render_template('', "{{ 'world' | hello}}", dict()) self.assertEqual(result, 'Hello world') class DagStatTest(unittest.TestCase): def test_dagstats_crud(self): DagStat.create(dag_id='test_dagstats_crud') session = settings.Session() qry = session.query(DagStat).filter(DagStat.dag_id == 'test_dagstats_crud') self.assertEqual(len(qry.all()), len(State.dag_states)) DagStat.set_dirty(dag_id='test_dagstats_crud') res = qry.all() for stat in res: self.assertTrue(stat.dirty) # create missing DagStat.set_dirty(dag_id='test_dagstats_crud_2') qry2 = session.query(DagStat).filter(DagStat.dag_id == 'test_dagstats_crud_2') self.assertEqual(len(qry2.all()), len(State.dag_states)) dag = DAG( 'test_dagstats_crud', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) with dag: op1 = DummyOperator(task_id='A') now = datetime.datetime.now() dr = dag.create_dagrun( run_id='manual__' + now.isoformat(), execution_date=now, start_date=now, state=State.FAILED, external_trigger=False, ) DagStat.update(dag_ids=['test_dagstats_crud']) res = qry.all() for stat in res: if stat.state == State.FAILED: self.assertEqual(stat.count, 1) else: self.assertEqual(stat.count, 0) DagStat.update() res = qry2.all() for stat in res: self.assertFalse(stat.dirty) class DagRunTest(unittest.TestCase): def create_dag_run(self, dag, state=State.RUNNING, task_states=None, execution_date=None): now = datetime.datetime.now() if execution_date is None: execution_date = now dag_run = dag.create_dagrun( run_id='manual__' + now.isoformat(), execution_date=execution_date, start_date=now, state=state, external_trigger=False, ) if task_states is not None: session = settings.Session() for task_id, state in task_states.items(): ti = dag_run.get_task_instance(task_id) ti.set_state(state, session) session.close() return dag_run def test_id_for_date(self): run_id = models.DagRun.id_for_date( datetime.datetime(2015, 1, 2, 3, 4, 5, 6, None)) self.assertEqual( 'scheduled__2015-01-02T03:04:05', run_id, 'Generated run_id did not match expectations: {0}'.format(run_id)) def test_dagrun_find(self): session = settings.Session() now = datetime.datetime.now() dag_id1 = "test_dagrun_find_externally_triggered" dag_run = models.DagRun( dag_id=dag_id1, run_id='manual__' + now.isoformat(), execution_date=now, start_date=now, state=State.RUNNING, external_trigger=True, ) session.add(dag_run) dag_id2 = "test_dagrun_find_not_externally_triggered" dag_run = models.DagRun( dag_id=dag_id2, run_id='manual__' + now.isoformat(), execution_date=now, start_date=now, state=State.RUNNING, external_trigger=False, ) session.add(dag_run) session.commit() self.assertEqual(1, len(models.DagRun.find(dag_id=dag_id1, external_trigger=True))) self.assertEqual(0, len(models.DagRun.find(dag_id=dag_id1, external_trigger=False))) self.assertEqual(0, len(models.DagRun.find(dag_id=dag_id2, external_trigger=True))) self.assertEqual(1, len(models.DagRun.find(dag_id=dag_id2, external_trigger=False))) def test_dagrun_success_when_all_skipped(self): """ Tests that a DAG run succeeds when all tasks are skipped """ dag = DAG( dag_id='test_dagrun_success_when_all_skipped', start_date=datetime.datetime(2017, 1, 1) ) dag_task1 = ShortCircuitOperator( task_id='test_short_circuit_false', dag=dag, python_callable=lambda: False) dag_task2 = DummyOperator( task_id='test_state_skipped1', dag=dag) dag_task3 = DummyOperator( task_id='test_state_skipped2', dag=dag) dag_task1.set_downstream(dag_task2) dag_task2.set_downstream(dag_task3) initial_task_states = { 'test_short_circuit_false': State.SUCCESS, 'test_state_skipped1': State.SKIPPED, 'test_state_skipped2': State.SKIPPED, } dag_run = self.create_dag_run(dag=dag, state=State.RUNNING, task_states=initial_task_states) updated_dag_state = dag_run.update_state() self.assertEqual(State.SUCCESS, updated_dag_state) def test_dagrun_success_conditions(self): session = settings.Session() dag = DAG( 'test_dagrun_success_conditions', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) # A -> B # A -> C -> D # ordered: B, D, C, A or D, B, C, A or D, C, B, A with dag: op1 = DummyOperator(task_id='A') op2 = DummyOperator(task_id='B') op3 = DummyOperator(task_id='C') op4 = DummyOperator(task_id='D') op1.set_upstream([op2, op3]) op3.set_upstream(op4) dag.clear() now = datetime.datetime.now() dr = dag.create_dagrun(run_id='test_dagrun_success_conditions', state=State.RUNNING, execution_date=now, start_date=now) # op1 = root ti_op1 = dr.get_task_instance(task_id=op1.task_id) ti_op1.set_state(state=State.SUCCESS, session=session) ti_op2 = dr.get_task_instance(task_id=op2.task_id) ti_op3 = dr.get_task_instance(task_id=op3.task_id) ti_op4 = dr.get_task_instance(task_id=op4.task_id) # root is successful, but unfinished tasks state = dr.update_state() self.assertEqual(State.RUNNING, state) # one has failed, but root is successful ti_op2.set_state(state=State.FAILED, session=session) ti_op3.set_state(state=State.SUCCESS, session=session) ti_op4.set_state(state=State.SUCCESS, session=session) state = dr.update_state() self.assertEqual(State.SUCCESS, state) def test_dagrun_deadlock(self): session = settings.Session() dag = DAG( 'text_dagrun_deadlock', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) with dag: op1 = DummyOperator(task_id='A') op2 = DummyOperator(task_id='B') op2.trigger_rule = TriggerRule.ONE_FAILED op2.set_upstream(op1) dag.clear() now = datetime.datetime.now() dr = dag.create_dagrun(run_id='test_dagrun_deadlock', state=State.RUNNING, execution_date=now, start_date=now) ti_op1 = dr.get_task_instance(task_id=op1.task_id) ti_op1.set_state(state=State.SUCCESS, session=session) ti_op2 = dr.get_task_instance(task_id=op2.task_id) ti_op2.set_state(state=State.NONE, session=session) dr.update_state() self.assertEqual(dr.state, State.RUNNING) ti_op2.set_state(state=State.NONE, session=session) op2.trigger_rule = 'invalid' dr.update_state() self.assertEqual(dr.state, State.FAILED) def test_get_task_instance_on_empty_dagrun(self): """ Make sure that a proper value is returned when a dagrun has no task instances """ dag = DAG( dag_id='test_get_task_instance_on_empty_dagrun', start_date=datetime.datetime(2017, 1, 1) ) dag_task1 = ShortCircuitOperator( task_id='test_short_circuit_false', dag=dag, python_callable=lambda: False) session = settings.Session() now = datetime.datetime.now() # Don't use create_dagrun since it will create the task instances too which we # don't want dag_run = models.DagRun( dag_id=dag.dag_id, run_id='manual__' + now.isoformat(), execution_date=now, start_date=now, state=State.RUNNING, external_trigger=False, ) session.add(dag_run) session.commit() ti = dag_run.get_task_instance('test_short_circuit_false') self.assertEqual(None, ti) def test_get_latest_runs(self): session = settings.Session() dag = DAG( dag_id='test_latest_runs_1', start_date=DEFAULT_DATE) dag_1_run_1 = self.create_dag_run(dag, execution_date=datetime.datetime(2015, 1, 1)) dag_1_run_2 = self.create_dag_run(dag, execution_date=datetime.datetime(2015, 1, 2)) dagruns = models.DagRun.get_latest_runs(session) session.close() for dagrun in dagruns: if dagrun.dag_id == 'test_latest_runs_1': self.assertEqual(dagrun.execution_date, datetime.datetime(2015, 1, 2)) def test_is_backfill(self): dag = DAG(dag_id='test_is_backfill', start_date=DEFAULT_DATE) dagrun = self.create_dag_run(dag, execution_date=DEFAULT_DATE) dagrun.run_id = BackfillJob.ID_PREFIX + '_sfddsffds' dagrun2 = self.create_dag_run(dag, execution_date=DEFAULT_DATE + datetime.timedelta(days=1)) self.assertTrue(dagrun.is_backfill) self.assertFalse(dagrun2.is_backfill) class DagBagTest(unittest.TestCase): def test_get_existing_dag(self): """ test that were're able to parse some example DAGs and retrieve them """ dagbag = models.DagBag(include_examples=True) some_expected_dag_ids = ["example_bash_operator", "example_branch_operator"] for dag_id in some_expected_dag_ids: dag = dagbag.get_dag(dag_id) self.assertIsNotNone(dag) self.assertEqual(dag_id, dag.dag_id) self.assertGreaterEqual(dagbag.size(), 7) def test_get_non_existing_dag(self): """ test that retrieving a non existing dag id returns None without crashing """ dagbag = models.DagBag(include_examples=True) non_existing_dag_id = "non_existing_dag_id" self.assertIsNone(dagbag.get_dag(non_existing_dag_id)) def test_process_file_that_contains_multi_bytes_char(self): """ test that we're able to parse file that contains multi-byte char """ from tempfile import NamedTemporaryFile f = NamedTemporaryFile() f.write('\u3042'.encode('utf8')) # write multi-byte char (hiragana) f.flush() dagbag = models.DagBag(include_examples=True) self.assertEqual([], dagbag.process_file(f.name)) def test_zip(self): """ test the loading of a DAG within a zip file that includes dependencies """ dagbag = models.DagBag() dagbag.process_file(os.path.join(TEST_DAGS_FOLDER, "test_zip.zip")) self.assertTrue(dagbag.get_dag("test_zip_dag")) @patch.object(DagModel,'get_current') def test_get_dag_without_refresh(self, mock_dagmodel): """ Test that, once a DAG is loaded, it doesn't get refreshed again if it hasn't been expired. """ dag_id = 'example_bash_operator' mock_dagmodel.return_value = DagModel() mock_dagmodel.return_value.last_expired = None mock_dagmodel.return_value.fileloc = 'foo' class TestDagBag(models.DagBag): process_file_calls = 0 def process_file(self, filepath, only_if_updated=True, safe_mode=True): if 'example_bash_operator.py' == os.path.basename(filepath): TestDagBag.process_file_calls += 1 super(TestDagBag, self).process_file(filepath, only_if_updated, safe_mode) dagbag = TestDagBag(include_examples=True) processed_files = dagbag.process_file_calls # Should not call process_file agani, since it's already loaded during init. self.assertEqual(1, dagbag.process_file_calls) self.assertIsNotNone(dagbag.get_dag(dag_id)) self.assertEqual(1, dagbag.process_file_calls) def test_get_dag_fileloc(self): """ Test that fileloc is correctly set when we load example DAGs, specifically SubDAGs. """ dagbag = models.DagBag(include_examples=True) expected = { 'example_bash_operator': 'example_bash_operator.py', 'example_subdag_operator': 'example_subdag_operator.py', 'example_subdag_operator.section-1': 'subdags/subdag.py' } for dag_id, path in expected.items(): dag = dagbag.get_dag(dag_id) self.assertTrue( dag.fileloc.endswith('airflow/example_dags/' + path)) class TaskInstanceTest(unittest.TestCase): def test_set_task_dates(self): """ Test that tasks properly take start/end dates from DAGs """ dag = DAG('dag', start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + datetime.timedelta(days=10)) op1 = DummyOperator(task_id='op_1', owner='test') self.assertTrue(op1.start_date is None and op1.end_date is None) # dag should assign its dates to op1 because op1 has no dates dag.add_task(op1) self.assertTrue( op1.start_date == dag.start_date and op1.end_date == dag.end_date) op2 = DummyOperator( task_id='op_2', owner='test', start_date=DEFAULT_DATE - datetime.timedelta(days=1), end_date=DEFAULT_DATE + datetime.timedelta(days=11)) # dag should assign its dates to op2 because they are more restrictive dag.add_task(op2) self.assertTrue( op2.start_date == dag.start_date and op2.end_date == dag.end_date) op3 = DummyOperator( task_id='op_3', owner='test', start_date=DEFAULT_DATE + datetime.timedelta(days=1), end_date=DEFAULT_DATE + datetime.timedelta(days=9)) # op3 should keep its dates because they are more restrictive dag.add_task(op3) self.assertTrue( op3.start_date == DEFAULT_DATE + datetime.timedelta(days=1)) self.assertTrue( op3.end_date == DEFAULT_DATE + datetime.timedelta(days=9)) def test_set_dag(self): """ Test assigning Operators to Dags, including deferred assignment """ dag = DAG('dag', start_date=DEFAULT_DATE) dag2 = DAG('dag2', start_date=DEFAULT_DATE) op = DummyOperator(task_id='op_1', owner='test') # no dag assigned self.assertFalse(op.has_dag()) self.assertRaises(AirflowException, getattr, op, 'dag') # no improper assignment with self.assertRaises(TypeError): op.dag = 1 op.dag = dag # no reassignment with self.assertRaises(AirflowException): op.dag = dag2 # but assigning the same dag is ok op.dag = dag self.assertIs(op.dag, dag) self.assertIn(op, dag.tasks) def test_infer_dag(self): dag = DAG('dag', start_date=DEFAULT_DATE) dag2 = DAG('dag2', start_date=DEFAULT_DATE) op1 = DummyOperator(task_id='test_op_1', owner='test') op2 = DummyOperator(task_id='test_op_2', owner='test') op3 = DummyOperator(task_id='test_op_3', owner='test', dag=dag) op4 = DummyOperator(task_id='test_op_4', owner='test', dag=dag2) # double check dags self.assertEqual( [i.has_dag() for i in [op1, op2, op3, op4]], [False, False, True, True]) # can't combine operators with no dags self.assertRaises(AirflowException, op1.set_downstream, op2) # op2 should infer dag from op1 op1.dag = dag op1.set_downstream(op2) self.assertIs(op2.dag, dag) # can't assign across multiple DAGs self.assertRaises(AirflowException, op1.set_downstream, op4) self.assertRaises(AirflowException, op1.set_downstream, [op3, op4]) def test_bitshift_compose_operators(self): dag = DAG('dag', start_date=DEFAULT_DATE) op1 = DummyOperator(task_id='test_op_1', owner='test') op2 = DummyOperator(task_id='test_op_2', owner='test') op3 = DummyOperator(task_id='test_op_3', owner='test') op4 = DummyOperator(task_id='test_op_4', owner='test') op5 = DummyOperator(task_id='test_op_5', owner='test') # can't compose operators without dags with self.assertRaises(AirflowException): op1 >> op2 dag >> op1 >> op2 << op3 # make sure dag assignment carries through # using __rrshift__ self.assertIs(op1.dag, dag) self.assertIs(op2.dag, dag) self.assertIs(op3.dag, dag) # op2 should be downstream of both self.assertIn(op2, op1.downstream_list) self.assertIn(op2, op3.downstream_list) # test dag assignment with __rlshift__ dag << op4 self.assertIs(op4.dag, dag) # dag assignment with __rrshift__ dag >> op5 self.assertIs(op5.dag, dag) @patch.object(DAG, 'concurrency_reached') def test_requeue_over_concurrency(self, mock_concurrency_reached): mock_concurrency_reached.return_value = True dag = DAG(dag_id='test_requeue_over_concurrency', start_date=DEFAULT_DATE, max_active_runs=1, concurrency=2) task = DummyOperator(task_id='test_requeue_over_concurrency_op', dag=dag) ti = TI(task=task, execution_date=datetime.datetime.now()) ti.run() self.assertEqual(ti.state, models.State.NONE) @patch.object(TI, 'pool_full') def test_run_pooling_task(self, mock_pool_full): """ test that running task update task state as without running task. (no dependency check in ti_deps anymore, so also -> SUCCESS) """ # Mock the pool out with a full pool because the pool doesn't actually exist mock_pool_full.return_value = True dag = models.DAG(dag_id='test_run_pooling_task') task = DummyOperator(task_id='test_run_pooling_task_op', dag=dag, pool='test_run_pooling_task_pool', owner='airflow', start_date=datetime.datetime(2016, 2, 1, 0, 0, 0)) ti = TI( task=task, execution_date=datetime.datetime.now()) ti.run() self.assertEqual(ti.state, models.State.SUCCESS) @patch.object(TI, 'pool_full') def test_run_pooling_task_with_mark_success(self, mock_pool_full): """ test that running task with mark_success param update task state as SUCCESS without running task. """ # Mock the pool out with a full pool because the pool doesn't actually exist mock_pool_full.return_value = True dag = models.DAG(dag_id='test_run_pooling_task_with_mark_success') task = DummyOperator( task_id='test_run_pooling_task_with_mark_success_op', dag=dag, pool='test_run_pooling_task_with_mark_success_pool', owner='airflow', start_date=datetime.datetime(2016, 2, 1, 0, 0, 0)) ti = TI( task=task, execution_date=datetime.datetime.now()) ti.run(mark_success=True) self.assertEqual(ti.state, models.State.SUCCESS) def test_run_pooling_task_with_skip(self): """ test that running task which returns AirflowSkipOperator will end up in a SKIPPED state. """ def raise_skip_exception(): raise AirflowSkipException dag = models.DAG(dag_id='test_run_pooling_task_with_skip') task = PythonOperator( task_id='test_run_pooling_task_with_skip', dag=dag, python_callable=raise_skip_exception, owner='airflow', start_date=datetime.datetime(2016, 2, 1, 0, 0, 0)) ti = TI( task=task, execution_date=datetime.datetime.now()) ti.run() self.assertTrue(ti.state == models.State.SKIPPED) def test_retry_delay(self): """ Test that retry delays are respected """ dag = models.DAG(dag_id='test_retry_handling') task = BashOperator( task_id='test_retry_handling_op', bash_command='exit 1', retries=1, retry_delay=datetime.timedelta(seconds=3), dag=dag, owner='airflow', start_date=datetime.datetime(2016, 2, 1, 0, 0, 0)) def run_with_error(ti): try: ti.run() except AirflowException: pass ti = TI( task=task, execution_date=datetime.datetime.now()) # first run -- up for retry run_with_error(ti) self.assertEqual(ti.state, State.UP_FOR_RETRY) self.assertEqual(ti.try_number, 1) # second run -- still up for retry because retry_delay hasn't expired run_with_error(ti) self.assertEqual(ti.state, State.UP_FOR_RETRY) # third run -- failed time.sleep(3) run_with_error(ti) self.assertEqual(ti.state, State.FAILED) @patch.object(TI, 'pool_full') def test_retry_handling(self, mock_pool_full): """ Test that task retries are handled properly """ # Mock the pool with a pool with slots open since the pool doesn't actually exist mock_pool_full.return_value = False dag = models.DAG(dag_id='test_retry_handling') task = BashOperator( task_id='test_retry_handling_op', bash_command='exit 1', retries=1, retry_delay=datetime.timedelta(seconds=0), dag=dag, owner='airflow', start_date=datetime.datetime(2016, 2, 1, 0, 0, 0)) def run_with_error(ti): try: ti.run() except AirflowException: pass ti = TI( task=task, execution_date=datetime.datetime.now()) # first run -- up for retry run_with_error(ti) self.assertEqual(ti.state, State.UP_FOR_RETRY) self.assertEqual(ti.try_number, 1) # second run -- fail run_with_error(ti) self.assertEqual(ti.state, State.FAILED) self.assertEqual(ti.try_number, 2) # Clear the TI state since you can't run a task with a FAILED state without # clearing it first dag.clear() # third run -- up for retry run_with_error(ti) self.assertEqual(ti.state, State.UP_FOR_RETRY) self.assertEqual(ti.try_number, 3) # fourth run -- fail run_with_error(ti) self.assertEqual(ti.state, State.FAILED) self.assertEqual(ti.try_number, 4) def test_next_retry_datetime(self): delay = datetime.timedelta(seconds=30) max_delay = datetime.timedelta(minutes=60) dag = models.DAG(dag_id='fail_dag') task = BashOperator( task_id='task_with_exp_backoff_and_max_delay', bash_command='exit 1', retries=3, retry_delay=delay, retry_exponential_backoff=True, max_retry_delay=max_delay, dag=dag, owner='airflow', start_date=datetime.datetime(2016, 2, 1, 0, 0, 0)) ti = TI( task=task, execution_date=DEFAULT_DATE) ti.end_date = datetime.datetime.now() ti.try_number = 1 dt = ti.next_retry_datetime() # between 30 * 2^0.5 and 30 * 2^1 (15 and 30) self.assertEqual(dt, ti.end_date + datetime.timedelta(seconds=20.0)) ti.try_number = 4 dt = ti.next_retry_datetime() # between 30 * 2^2 and 30 * 2^3 (120 and 240) self.assertEqual(dt, ti.end_date + datetime.timedelta(seconds=181.0)) ti.try_number = 6 dt = ti.next_retry_datetime() # between 30 * 2^4 and 30 * 2^5 (480 and 960) self.assertEqual(dt, ti.end_date + datetime.timedelta(seconds=825.0)) ti.try_number = 9 dt = ti.next_retry_datetime() self.assertEqual(dt, ti.end_date+max_delay) ti.try_number = 50 dt = ti.next_retry_datetime() self.assertEqual(dt, ti.end_date+max_delay) def test_depends_on_past(self): dagbag = models.DagBag() dag = dagbag.get_dag('test_depends_on_past') dag.clear() task = dag.tasks[0] run_date = task.start_date + datetime.timedelta(days=5) ti = TI(task, run_date) # depends_on_past prevents the run task.run(start_date=run_date, end_date=run_date) ti.refresh_from_db() self.assertIs(ti.state, None) # ignore first depends_on_past to allow the run task.run( start_date=run_date, end_date=run_date, ignore_first_depends_on_past=True) ti.refresh_from_db() self.assertEqual(ti.state, State.SUCCESS) # Parameterized tests to check for the correct firing # of the trigger_rule under various circumstances # Numeric fields are in order: # successes, skipped, failed, upstream_failed, done @parameterized.expand([ # # Tests for all_success # ['all_success', 5, 0, 0, 0, 0, True, None, True], ['all_success', 2, 0, 0, 0, 0, True, None, False], ['all_success', 2, 0, 1, 0, 0, True, ST.UPSTREAM_FAILED, False], ['all_success', 2, 1, 0, 0, 0, True, ST.SKIPPED, False], # # Tests for one_success # ['one_success', 5, 0, 0, 0, 5, True, None, True], ['one_success', 2, 0, 0, 0, 2, True, None, True], ['one_success', 2, 0, 1, 0, 3, True, None, True], ['one_success', 2, 1, 0, 0, 3, True, None, True], # # Tests for all_failed # ['all_failed', 5, 0, 0, 0, 5, True, ST.SKIPPED, False], ['all_failed', 0, 0, 5, 0, 5, True, None, True], ['all_failed', 2, 0, 0, 0, 2, True, ST.SKIPPED, False], ['all_failed', 2, 0, 1, 0, 3, True, ST.SKIPPED, False], ['all_failed', 2, 1, 0, 0, 3, True, ST.SKIPPED, False], # # Tests for one_failed # ['one_failed', 5, 0, 0, 0, 0, True, None, False], ['one_failed', 2, 0, 0, 0, 0, True, None, False], ['one_failed', 2, 0, 1, 0, 0, True, None, True], ['one_failed', 2, 1, 0, 0, 3, True, None, False], ['one_failed', 2, 3, 0, 0, 5, True, ST.SKIPPED, False], # # Tests for done # ['all_done', 5, 0, 0, 0, 5, True, None, True], ['all_done', 2, 0, 0, 0, 2, True, None, False], ['all_done', 2, 0, 1, 0, 3, True, None, False], ['all_done', 2, 1, 0, 0, 3, True, None, False] ]) def test_check_task_dependencies(self, trigger_rule, successes, skipped, failed, upstream_failed, done, flag_upstream_failed, expect_state, expect_completed): start_date = datetime.datetime(2016, 2, 1, 0, 0, 0) dag = models.DAG('test-dag', start_date=start_date) downstream = DummyOperator(task_id='downstream', dag=dag, owner='airflow', trigger_rule=trigger_rule) for i in range(5): task = DummyOperator(task_id='runme_{}'.format(i), dag=dag, owner='airflow') task.set_downstream(downstream) run_date = task.start_date + datetime.timedelta(days=5) ti = TI(downstream, run_date) dep_results = TriggerRuleDep()._evaluate_trigger_rule( ti=ti, successes=successes, skipped=skipped, failed=failed, upstream_failed=upstream_failed, done=done, flag_upstream_failed=flag_upstream_failed) completed = all([dep.passed for dep in dep_results]) self.assertEqual(completed, expect_completed) self.assertEqual(ti.state, expect_state) def test_xcom_pull_after_success(self): """ tests xcom set/clear relative to a task in a 'success' rerun scenario """ key = 'xcom_key' value = 'xcom_value' dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly') task = DummyOperator( task_id='test_xcom', dag=dag, pool='test_xcom', owner='airflow', start_date=datetime.datetime(2016, 6, 2, 0, 0, 0)) exec_date = datetime.datetime.now() ti = TI( task=task, execution_date=exec_date) ti.run(mark_success=True) ti.xcom_push(key=key, value=value) self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value) ti.run() # The second run and assert is to handle AIRFLOW-131 (don't clear on # prior success) self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value) # Test AIRFLOW-703: Xcom shouldn't be cleared if the task doesn't # execute, even if dependencies are ignored ti.run(ignore_all_deps=True, mark_success=True) self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value) # Xcom IS finally cleared once task has executed ti.run(ignore_all_deps=True) self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None) def test_xcom_pull_different_execution_date(self): """ tests xcom fetch behavior with different execution dates, using both xcom_pull with "include_prior_dates" and without """ key = 'xcom_key' value = 'xcom_value' dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly') task = DummyOperator( task_id='test_xcom', dag=dag, pool='test_xcom', owner='airflow', start_date=datetime.datetime(2016, 6, 2, 0, 0, 0)) exec_date = datetime.datetime.now() ti = TI( task=task, execution_date=exec_date) ti.run(mark_success=True) ti.xcom_push(key=key, value=value) self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value) ti.run() exec_date += datetime.timedelta(days=1) ti = TI( task=task, execution_date=exec_date) ti.run() # We have set a new execution date (and did not pass in # 'include_prior_dates'which means this task should now have a cleared # xcom value self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None) # We *should* get a value using 'include_prior_dates' self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key, include_prior_dates=True), value) def test_post_execute_hook(self): """ Test that post_execute hook is called with the Operator's result. The result ('error') will cause an error to be raised and trapped. """ class TestError(Exception): pass class TestOperator(PythonOperator): def post_execute(self, context, result): if result == 'error': raise TestError('expected error.') dag = models.DAG(dag_id='test_post_execute_dag') task = TestOperator( task_id='test_operator', dag=dag, python_callable=lambda: 'error', owner='airflow', start_date=datetime.datetime(2017, 2, 1)) ti = TI(task=task, execution_date=datetime.datetime.now()) with self.assertRaises(TestError): ti.run() class ClearTasksTest(unittest.TestCase): def test_clear_task_instances(self): dag = DAG('test_clear_task_instances', start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + datetime.timedelta(days=10)) task0 = DummyOperator(task_id='0', owner='test', dag=dag) task1 = DummyOperator(task_id='1', owner='test', dag=dag, retries=2) ti0 = TI(task=task0, execution_date=DEFAULT_DATE) ti1 = TI(task=task1, execution_date=DEFAULT_DATE) ti0.run() ti1.run() session = settings.Session() qry = session.query(TI).filter( TI.dag_id == dag.dag_id).all() clear_task_instances(qry, session, dag=dag) session.commit() ti0.refresh_from_db() ti1.refresh_from_db() self.assertEqual(ti0.try_number, 1) self.assertEqual(ti0.max_tries, 1) self.assertEqual(ti1.try_number, 1) self.assertEqual(ti1.max_tries, 3) def test_clear_task_instances_without_task(self): dag = DAG('test_clear_task_instances_without_task', start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + datetime.timedelta(days=10)) task0 = DummyOperator(task_id='task0', owner='test', dag=dag) task1 = DummyOperator(task_id='task1', owner='test', dag=dag, retries=2) ti0 = TI(task=task0, execution_date=DEFAULT_DATE) ti1 = TI(task=task1, execution_date=DEFAULT_DATE) ti0.run() ti1.run() # Remove the task from dag. dag.task_dict = {} self.assertFalse(dag.has_task(task0.task_id)) self.assertFalse(dag.has_task(task1.task_id)) session = settings.Session() qry = session.query(TI).filter( TI.dag_id == dag.dag_id).all() clear_task_instances(qry, session) session.commit() # When dag is None, max_tries will be maximum of original max_tries or try_number. ti0.refresh_from_db() ti1.refresh_from_db() self.assertEqual(ti0.try_number, 1) self.assertEqual(ti0.max_tries, 1) self.assertEqual(ti1.try_number, 1) self.assertEqual(ti1.max_tries, 2) def test_clear_task_instances_without_dag(self): dag = DAG('test_clear_task_instances_without_dag', start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + datetime.timedelta(days=10)) task0 = DummyOperator(task_id='task_0', owner='test', dag=dag) task1 = DummyOperator(task_id='task_1', owner='test', dag=dag, retries=2) ti0 = TI(task=task0, execution_date=DEFAULT_DATE) ti1 = TI(task=task1, execution_date=DEFAULT_DATE) ti0.run() ti1.run() session = settings.Session() qry = session.query(TI).filter( TI.dag_id == dag.dag_id).all() clear_task_instances(qry, session) session.commit() # When dag is None, max_tries will be maximum of original max_tries or try_number. ti0.refresh_from_db() ti1.refresh_from_db() self.assertEqual(ti0.try_number, 1) self.assertEqual(ti0.max_tries, 1) self.assertEqual(ti1.try_number, 1) self.assertEqual(ti1.max_tries, 2) def test_dag_clear(self): dag = DAG('test_dag_clear', start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + datetime.timedelta(days=10)) task0 = DummyOperator(task_id='test_dag_clear_task_0', owner='test', dag=dag) ti0 = TI(task=task0, execution_date=DEFAULT_DATE) self.assertEqual(ti0.try_number, 0) ti0.run() self.assertEqual(ti0.try_number, 1) dag.clear() ti0.refresh_from_db() self.assertEqual(ti0.try_number, 1) self.assertEqual(ti0.state, State.NONE) self.assertEqual(ti0.max_tries, 1) task1 = DummyOperator(task_id='test_dag_clear_task_1', owner='test', dag=dag, retries=2) ti1 = TI(task=task1, execution_date=DEFAULT_DATE) self.assertEqual(ti1.max_tries, 2) ti1.try_number = 1 ti1.run() self.assertEqual(ti1.try_number, 2) self.assertEqual(ti1.max_tries, 2) dag.clear() ti0.refresh_from_db() ti1.refresh_from_db() # after clear dag, ti2 should show attempt 3 of 5 self.assertEqual(ti1.max_tries, 4) self.assertEqual(ti1.try_number, 2) # after clear dag, ti1 should show attempt 2 of 2 self.assertEqual(ti0.try_number, 1) self.assertEqual(ti0.max_tries, 1) def test_dags_clear(self): # setup session = settings.Session() dags, tis = [], [] num_of_dags = 5 for i in range(num_of_dags): dag = DAG('test_dag_clear_' + str(i), start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + datetime.timedelta(days=10)) ti = TI(task=DummyOperator(task_id='test_task_clear_' + str(i), owner='test', dag=dag), execution_date=DEFAULT_DATE) dags.append(dag) tis.append(ti) # test clear all dags for i in range(num_of_dags): tis[i].run() self.assertEqual(tis[i].state, State.SUCCESS) self.assertEqual(tis[i].try_number, 1) self.assertEqual(tis[i].max_tries, 0) DAG.clear_dags(dags) for i in range(num_of_dags): tis[i].refresh_from_db() self.assertEqual(tis[i].state, State.NONE) self.assertEqual(tis[i].try_number, 1) self.assertEqual(tis[i].max_tries, 1) # test dry_run for i in range(num_of_dags): tis[i].run() self.assertEqual(tis[i].state, State.SUCCESS) self.assertEqual(tis[i].try_number, 2) self.assertEqual(tis[i].max_tries, 1) DAG.clear_dags(dags, dry_run=True) for i in range(num_of_dags): tis[i].refresh_from_db() self.assertEqual(tis[i].state, State.SUCCESS) self.assertEqual(tis[i].try_number, 2) self.assertEqual(tis[i].max_tries, 1) # test only_failed from random import randint failed_dag_idx = randint(0, len(tis) - 1) tis[failed_dag_idx].state = State.FAILED session.merge(tis[failed_dag_idx]) session.commit() DAG.clear_dags(dags, only_failed=True) for i in range(num_of_dags): tis[i].refresh_from_db() if i != failed_dag_idx: self.assertEqual(tis[i].state, State.SUCCESS) self.assertEqual(tis[i].try_number, 2) self.assertEqual(tis[i].max_tries, 1) else: self.assertEqual(tis[i].state, State.NONE) self.assertEqual(tis[i].try_number, 2) self.assertEqual(tis[i].max_tries, 2) def test_operator_clear(self): dag = DAG('test_operator_clear', start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + datetime.timedelta(days=10)) t1 = DummyOperator(task_id='bash_op', owner='test', dag=dag) t2 = DummyOperator(task_id='dummy_op', owner='test', dag=dag, retries=1) t2.set_upstream(t1) ti1 = TI(task=t1, execution_date=DEFAULT_DATE) ti2 = TI(task=t2, execution_date=DEFAULT_DATE) ti2.run() # Dependency not met self.assertEqual(ti2.try_number, 0) self.assertEqual(ti2.max_tries, 1) t2.clear(upstream=True) ti1.run() ti2.run() self.assertEqual(ti1.try_number, 1) # max_tries is 0 because there is no task instance in db for ti1 # so clear won't change the max_tries. self.assertEqual(ti1.max_tries, 0) self.assertEqual(ti2.try_number, 1) # try_number (0) + retries(1) self.assertEqual(ti2.max_tries, 1) def test_xcom_disable_pickle_type(self): json_obj = {"key": "value"} execution_date = datetime.datetime.now() key = "xcom_test1" dag_id = "test_dag1" task_id = "test_task1" XCom.set(key=key, value=json_obj, dag_id=dag_id, task_id=task_id, execution_date=execution_date, enable_pickling=False) ret_value = XCom.get_one(key=key, dag_id=dag_id, task_id=task_id, execution_date=execution_date, enable_pickling=False) self.assertEqual(ret_value, json_obj) def test_xcom_enable_pickle_type(self): json_obj = {"key": "value"} execution_date = datetime.datetime.now() key = "xcom_test2" dag_id = "test_dag2" task_id = "test_task2" XCom.set(key=key, value=json_obj, dag_id=dag_id, task_id=task_id, execution_date=execution_date, enable_pickling=True) ret_value = XCom.get_one(key=key, dag_id=dag_id, task_id=task_id, execution_date=execution_date, enable_pickling=True) self.assertEqual(ret_value, json_obj) def test_xcom_disable_pickle_type_fail_on_non_json(self): class PickleRce(object): def __reduce__(self): return (os.system, ("ls -alt",)) self.assertRaises(TypeError, XCom.set, key="xcom_test3", value=PickleRce(), dag_id="test_dag3", task_id="test_task3", execution_date=datetime.datetime.now(), enable_pickling=False) def test_xcom_get_many(self): json_obj = {"key": "value"} execution_date = datetime.datetime.now() key = "xcom_test4" dag_id1 = "test_dag4" task_id1 = "test_task4" dag_id2 = "test_dag5" task_id2 = "test_task5" XCom.set(key=key, value=json_obj, dag_id=dag_id1, task_id=task_id1, execution_date=execution_date, enable_pickling=True) XCom.set(key=key, value=json_obj, dag_id=dag_id2, task_id=task_id2, execution_date=execution_date, enable_pickling=True) results = XCom.get_many(key=key, execution_date=execution_date, enable_pickling=True) for result in results: self.assertEqual(result.value, json_obj)
MetrodataTeam/incubator-airflow
tests/models.py
Python
apache-2.0
53,455
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class clusternodegroup_streamidentifier_binding(base_resource) : """ Binding class showing the streamidentifier that can be bound to clusternodegroup. """ def __init__(self) : self._identifiername = "" self._name = "" self.___count = 0 @property def name(self) : ur"""Name of the nodegroup to which you want to bind a cluster node or an entity.<br/>Minimum length = 1. """ try : return self._name except Exception as e: raise e @name.setter def name(self, name) : ur"""Name of the nodegroup to which you want to bind a cluster node or an entity.<br/>Minimum length = 1 """ try : self._name = name except Exception as e: raise e @property def identifiername(self) : ur"""stream identifier and rate limit identifier that need to be bound to this nodegroup. """ try : return self._identifiername except Exception as e: raise e @identifiername.setter def identifiername(self, identifiername) : ur"""stream identifier and rate limit identifier that need to be bound to this nodegroup. """ try : self._identifiername = identifiername except Exception as e: raise e def _get_nitro_response(self, service, response) : ur""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(clusternodegroup_streamidentifier_binding_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.clusternodegroup_streamidentifier_binding except Exception as e : raise e def _get_object_name(self) : ur""" Returns the value of object identifier argument """ try : if self.name is not None : return str(self.name) return None except Exception as e : raise e @classmethod def add(cls, client, resource) : try : if resource and type(resource) is not list : updateresource = clusternodegroup_streamidentifier_binding() updateresource.name = resource.name updateresource.identifiername = resource.identifiername return updateresource.update_resource(client) else : if resource and len(resource) > 0 : updateresources = [clusternodegroup_streamidentifier_binding() for _ in range(len(resource))] for i in range(len(resource)) : updateresources[i].name = resource[i].name updateresources[i].identifiername = resource[i].identifiername return cls.update_bulk_request(client, updateresources) except Exception as e : raise e @classmethod def delete(cls, client, resource) : try : if resource and type(resource) is not list : deleteresource = clusternodegroup_streamidentifier_binding() deleteresource.name = resource.name deleteresource.identifiername = resource.identifiername return deleteresource.delete_resource(client) else : if resource and len(resource) > 0 : deleteresources = [clusternodegroup_streamidentifier_binding() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].name = resource[i].name deleteresources[i].identifiername = resource[i].identifiername return cls.delete_bulk_request(client, deleteresources) except Exception as e : raise e @classmethod def get(cls, service, name) : ur""" Use this API to fetch clusternodegroup_streamidentifier_binding resources. """ try : obj = clusternodegroup_streamidentifier_binding() obj.name = name response = obj.get_resources(service) return response except Exception as e: raise e @classmethod def get_filtered(cls, service, name, filter_) : ur""" Use this API to fetch filtered set of clusternodegroup_streamidentifier_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = clusternodegroup_streamidentifier_binding() obj.name = name option_ = options() option_.filter = filter_ response = obj.getfiltered(service, option_) return response except Exception as e: raise e @classmethod def count(cls, service, name) : ur""" Use this API to count clusternodegroup_streamidentifier_binding resources configued on NetScaler. """ try : obj = clusternodegroup_streamidentifier_binding() obj.name = name option_ = options() option_.count = True response = obj.get_resources(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e @classmethod def count_filtered(cls, service, name, filter_) : ur""" Use this API to count the filtered set of clusternodegroup_streamidentifier_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = clusternodegroup_streamidentifier_binding() obj.name = name option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e class clusternodegroup_streamidentifier_binding_response(base_response) : def __init__(self, length=1) : self.clusternodegroup_streamidentifier_binding = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.clusternodegroup_streamidentifier_binding = [clusternodegroup_streamidentifier_binding() for _ in range(length)]
benfinke/ns_python
nssrc/com/citrix/netscaler/nitro/resource/config/cluster/clusternodegroup_streamidentifier_binding.py
Python
apache-2.0
6,728
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import gc import glob import os import shutil import tempfile import time import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf # pylint: disable=g-bad-import-order from tensorflow.contrib.eager.python.examples.spinn import data from third_party.examples.eager.spinn import spinn from tensorflow.contrib.summary import summary_test_util from tensorflow.python.eager import test from tensorflow.python.framework import test_util from tensorflow.python.training import checkpoint_management from tensorflow.python.training.tracking import util as trackable_utils # pylint: enable=g-bad-import-order def _generate_synthetic_snli_data_batch(sequence_length, batch_size, vocab_size): """Generate a fake batch of SNLI data for testing.""" with tf.device("cpu:0"): labels = tf.random_uniform([batch_size], minval=1, maxval=4, dtype=tf.int64) prem = tf.random_uniform( (sequence_length, batch_size), maxval=vocab_size, dtype=tf.int64) prem_trans = tf.constant(np.array( [[3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3, 2, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2, 3, 2, 2]] * batch_size, dtype=np.int64).T) hypo = tf.random_uniform( (sequence_length, batch_size), maxval=vocab_size, dtype=tf.int64) hypo_trans = tf.constant(np.array( [[3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3, 2, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2, 3, 2, 2]] * batch_size, dtype=np.int64).T) if test_util.is_gpu_available(): labels = labels.gpu() prem = prem.gpu() prem_trans = prem_trans.gpu() hypo = hypo.gpu() hypo_trans = hypo_trans.gpu() return labels, prem, prem_trans, hypo, hypo_trans def _test_spinn_config(d_embed, d_out, logdir=None, inference_sentences=None): """Generate a config tuple for testing. Args: d_embed: Embedding dimensions. d_out: Model output dimensions. logdir: Optional logdir. inference_sentences: A 2-tuple of strings representing the sentences (with binary parsing result), e.g., ("( ( The dog ) ( ( is running ) . ) )", "( ( The dog ) ( moves . ) )"). Returns: A config tuple. """ config_tuple = collections.namedtuple( "Config", ["d_hidden", "d_proj", "d_tracker", "predict", "embed_dropout", "mlp_dropout", "n_mlp_layers", "d_mlp", "d_out", "projection", "lr", "batch_size", "epochs", "force_cpu", "logdir", "log_every", "dev_every", "save_every", "lr_decay_every", "lr_decay_by", "inference_premise", "inference_hypothesis"]) inference_premise = inference_sentences[0] if inference_sentences else None inference_hypothesis = inference_sentences[1] if inference_sentences else None return config_tuple( d_hidden=d_embed, d_proj=d_embed * 2, d_tracker=8, predict=False, embed_dropout=0.1, mlp_dropout=0.1, n_mlp_layers=2, d_mlp=32, d_out=d_out, projection=True, lr=2e-2, batch_size=2, epochs=20, force_cpu=False, logdir=logdir, log_every=1, dev_every=2, save_every=2, lr_decay_every=1, lr_decay_by=0.75, inference_premise=inference_premise, inference_hypothesis=inference_hypothesis) class SpinnTest(test_util.TensorFlowTestCase): def setUp(self): super(SpinnTest, self).setUp() self._test_device = "gpu:0" if test_util.is_gpu_available() else "cpu:0" self._temp_data_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self._temp_data_dir) super(SpinnTest, self).tearDown() def testBundle(self): with tf.device(self._test_device): lstm_iter = [np.array([[0, 1], [2, 3]], dtype=np.float32), np.array([[0, -1], [-2, -3]], dtype=np.float32), np.array([[0, 2], [4, 6]], dtype=np.float32), np.array([[0, -2], [-4, -6]], dtype=np.float32)] out = spinn._bundle(lstm_iter) self.assertEqual(2, len(out)) self.assertEqual(tf.float32, out[0].dtype) self.assertEqual(tf.float32, out[1].dtype) self.assertAllEqual(np.array([[0, 2, 0, -2, 0, 4, 0, -4]]).T, out[0].numpy()) self.assertAllEqual(np.array([[1, 3, -1, -3, 2, 6, -2, -6]]).T, out[1].numpy()) def testUnbunbdle(self): with tf.device(self._test_device): state = [np.array([[0, 1, 2], [3, 4, 5]], dtype=np.float32), np.array([[0, -1, -2], [-3, -4, -5]], dtype=np.float32)] out = spinn._unbundle(state) self.assertEqual(2, len(out)) self.assertEqual(tf.float32, out[0].dtype) self.assertEqual(tf.float32, out[1].dtype) self.assertAllEqual(np.array([[0, 1, 2, 0, -1, -2]]), out[0].numpy()) self.assertAllEqual(np.array([[3, 4, 5, -3, -4, -5]]), out[1].numpy()) def testReducer(self): with tf.device(self._test_device): batch_size = 3 size = 10 tracker_size = 8 reducer = spinn.Reducer(size, tracker_size=tracker_size) left_in = [] right_in = [] tracking = [] for _ in range(batch_size): left_in.append(tf.random_normal((1, size * 2))) right_in.append(tf.random_normal((1, size * 2))) tracking.append(tf.random_normal((1, tracker_size * 2))) out = reducer(left_in, right_in, tracking=tracking) self.assertEqual(batch_size, len(out)) self.assertEqual(tf.float32, out[0].dtype) self.assertEqual((1, size * 2), out[0].shape) def testReduceTreeLSTM(self): with tf.device(self._test_device): size = 10 tracker_size = 8 reducer = spinn.Reducer(size, tracker_size=tracker_size) lstm_in = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]], dtype=np.float32) c1 = np.array([[0, 1], [2, 3]], dtype=np.float32) c2 = np.array([[0, -1], [-2, -3]], dtype=np.float32) h, c = reducer._tree_lstm(c1, c2, lstm_in) self.assertEqual(tf.float32, h.dtype) self.assertEqual(tf.float32, c.dtype) self.assertEqual((2, 2), h.shape) self.assertEqual((2, 2), c.shape) def testTracker(self): with tf.device(self._test_device): batch_size = 2 size = 10 tracker_size = 8 buffer_length = 18 stack_size = 3 tracker = spinn.Tracker(tracker_size, False) tracker.reset_state() # Create dummy inputs for testing. bufs = [] buf = [] for _ in range(buffer_length): buf.append(tf.random_normal((batch_size, size * 2))) bufs.append(buf) self.assertEqual(1, len(bufs)) self.assertEqual(buffer_length, len(bufs[0])) self.assertEqual((batch_size, size * 2), bufs[0][0].shape) stacks = [] stack = [] for _ in range(stack_size): stack.append(tf.random_normal((batch_size, size * 2))) stacks.append(stack) self.assertEqual(1, len(stacks)) self.assertEqual(3, len(stacks[0])) self.assertEqual((batch_size, size * 2), stacks[0][0].shape) for _ in range(2): out1, out2 = tracker(bufs, stacks) self.assertIsNone(out2) self.assertEqual(batch_size, len(out1)) self.assertEqual(tf.float32, out1[0].dtype) self.assertEqual((1, tracker_size * 2), out1[0].shape) self.assertEqual(tf.float32, tracker.state.c.dtype) self.assertEqual((batch_size, tracker_size), tracker.state.c.shape) self.assertEqual(tf.float32, tracker.state.h.dtype) self.assertEqual((batch_size, tracker_size), tracker.state.h.shape) def testSPINN(self): with tf.device(self._test_device): embedding_dims = 10 d_tracker = 8 sequence_length = 15 num_transitions = 27 config_tuple = collections.namedtuple( "Config", ["d_hidden", "d_proj", "d_tracker", "predict"]) config = config_tuple( embedding_dims, embedding_dims * 2, d_tracker, False) s = spinn.SPINN(config) # Create some fake data. buffers = tf.random_normal((sequence_length, 1, config.d_proj)) transitions = tf.constant( [[3], [3], [2], [3], [3], [3], [2], [2], [2], [3], [3], [3], [2], [3], [3], [2], [2], [3], [3], [3], [2], [2], [2], [2], [3], [2], [2]], dtype=tf.int64) self.assertEqual(tf.int64, transitions.dtype) self.assertEqual((num_transitions, 1), transitions.shape) out = s(buffers, transitions, training=True) self.assertEqual(tf.float32, out.dtype) self.assertEqual((1, embedding_dims), out.shape) def testSNLIClassifierAndTrainer(self): with tf.device(self._test_device): vocab_size = 40 batch_size = 2 d_embed = 10 sequence_length = 15 d_out = 4 config = _test_spinn_config(d_embed, d_out) # Create fake embedding matrix. embed = tf.random_normal((vocab_size, d_embed)) model = spinn.SNLIClassifier(config, embed) trainer = spinn.SNLIClassifierTrainer(model, config.lr) (labels, prem, prem_trans, hypo, hypo_trans) = _generate_synthetic_snli_data_batch(sequence_length, batch_size, vocab_size) # Invoke model under non-training mode. logits = model(prem, prem_trans, hypo, hypo_trans, training=False) self.assertEqual(tf.float32, logits.dtype) self.assertEqual((batch_size, d_out), logits.shape) # Invoke model under training model. logits = model(prem, prem_trans, hypo, hypo_trans, training=True) self.assertEqual(tf.float32, logits.dtype) self.assertEqual((batch_size, d_out), logits.shape) # Calculate loss. loss1 = trainer.loss(labels, logits) self.assertEqual(tf.float32, loss1.dtype) self.assertEqual((), loss1.shape) loss2, logits = trainer.train_batch( labels, prem, prem_trans, hypo, hypo_trans) self.assertEqual(tf.float32, loss2.dtype) self.assertEqual((), loss2.shape) self.assertEqual(tf.float32, logits.dtype) self.assertEqual((batch_size, d_out), logits.shape) # Training on the batch should have led to a change in the loss value. self.assertNotEqual(loss1.numpy(), loss2.numpy()) def _create_test_data(self, snli_1_0_dir): fake_train_file = os.path.join(snli_1_0_dir, "snli_1.0_train.txt") os.makedirs(snli_1_0_dir) # Four sentences in total. with open(fake_train_file, "wt") as f: f.write("gold_label\tsentence1_binary_parse\tsentence2_binary_parse\t" "sentence1_parse\tsentence2_parse\tsentence1\tsentence2\t" "captionID\tpairID\tlabel1\tlabel2\tlabel3\tlabel4\tlabel5\n") f.write("neutral\t( ( Foo bar ) . )\t( ( foo . )\t" "DummySentence1Parse\tDummySentence2Parse\t" "Foo bar.\tfoo baz.\t" "4705552913.jpg#2\t4705552913.jpg#2r1n\t" "neutral\tentailment\tneutral\tneutral\tneutral\n") f.write("contradiction\t( ( Bar foo ) . )\t( ( baz . )\t" "DummySentence1Parse\tDummySentence2Parse\t" "Foo bar.\tfoo baz.\t" "4705552913.jpg#2\t4705552913.jpg#2r1n\t" "neutral\tentailment\tneutral\tneutral\tneutral\n") f.write("entailment\t( ( Quux quuz ) . )\t( ( grault . )\t" "DummySentence1Parse\tDummySentence2Parse\t" "Foo bar.\tfoo baz.\t" "4705552913.jpg#2\t4705552913.jpg#2r1n\t" "neutral\tentailment\tneutral\tneutral\tneutral\n") f.write("entailment\t( ( Quuz quux ) . )\t( ( garply . )\t" "DummySentence1Parse\tDummySentence2Parse\t" "Foo bar.\tfoo baz.\t" "4705552913.jpg#2\t4705552913.jpg#2r1n\t" "neutral\tentailment\tneutral\tneutral\tneutral\n") glove_dir = os.path.join(self._temp_data_dir, "glove") os.makedirs(glove_dir) glove_file = os.path.join(glove_dir, "glove.42B.300d.txt") words = [".", "foo", "bar", "baz", "quux", "quuz", "grault", "garply"] with open(glove_file, "wt") as f: for i, word in enumerate(words): f.write("%s " % word) for j in range(data.WORD_VECTOR_LEN): f.write("%.5f" % (i * 0.1)) if j < data.WORD_VECTOR_LEN - 1: f.write(" ") else: f.write("\n") return fake_train_file def testInferSpinnWorks(self): """Test inference with the spinn model.""" snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0") self._create_test_data(snli_1_0_dir) vocab = data.load_vocabulary(self._temp_data_dir) word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab) config = _test_spinn_config( data.WORD_VECTOR_LEN, 4, logdir=os.path.join(self._temp_data_dir, "logdir"), inference_sentences=("( foo ( bar . ) )", "( bar ( foo . ) )")) logits = spinn.train_or_infer_spinn( embed, word2index, None, None, None, config) self.assertEqual(tf.float32, logits.dtype) self.assertEqual((3,), logits.shape) def testInferSpinnThrowsErrorIfOnlyOneSentenceIsSpecified(self): snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0") self._create_test_data(snli_1_0_dir) vocab = data.load_vocabulary(self._temp_data_dir) word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab) config = _test_spinn_config( data.WORD_VECTOR_LEN, 4, logdir=os.path.join(self._temp_data_dir, "logdir"), inference_sentences=("( foo ( bar . ) )", None)) with self.assertRaises(ValueError): spinn.train_or_infer_spinn(embed, word2index, None, None, None, config) def testTrainSpinn(self): """Test with fake toy SNLI data and GloVe vectors.""" # 1. Create and load a fake SNLI data file and a fake GloVe embedding file. snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0") fake_train_file = self._create_test_data(snli_1_0_dir) vocab = data.load_vocabulary(self._temp_data_dir) word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab) train_data = data.SnliData(fake_train_file, word2index) dev_data = data.SnliData(fake_train_file, word2index) test_data = data.SnliData(fake_train_file, word2index) # 2. Create a fake config. config = _test_spinn_config( data.WORD_VECTOR_LEN, 4, logdir=os.path.join(self._temp_data_dir, "logdir")) # 3. Test training of a SPINN model. trainer = spinn.train_or_infer_spinn( embed, word2index, train_data, dev_data, test_data, config) # 4. Load train loss values from the summary files and verify that they # decrease with training. summary_file = glob.glob(os.path.join(config.logdir, "events.out.*"))[0] events = summary_test_util.events_from_file(summary_file) train_losses = [event.summary.value[0].simple_value for event in events if event.summary.value and event.summary.value[0].tag == "train/loss"] self.assertEqual(config.epochs, len(train_losses)) # 5. Verify that checkpoints exist and contains all the expected variables. self.assertTrue(glob.glob(os.path.join(config.logdir, "ckpt*"))) object_graph = trackable_utils.object_metadata( checkpoint_management.latest_checkpoint(config.logdir)) ckpt_variable_names = set() for node in object_graph.nodes: for attribute in node.attributes: ckpt_variable_names.add(attribute.full_name) self.assertIn("global_step", ckpt_variable_names) for v in trainer.variables: variable_name = v.name[:v.name.index(":")] if ":" in v.name else v.name self.assertIn(variable_name, ckpt_variable_names) class EagerSpinnSNLIClassifierBenchmark(test.Benchmark): def benchmarkEagerSpinnSNLIClassifier(self): test_device = "gpu:0" if test_util.is_gpu_available() else "cpu:0" with tf.device(test_device): burn_in_iterations = 2 benchmark_iterations = 10 vocab_size = 1000 batch_size = 128 sequence_length = 15 d_embed = 200 d_out = 4 embed = tf.random_normal((vocab_size, d_embed)) config = _test_spinn_config(d_embed, d_out) model = spinn.SNLIClassifier(config, embed) trainer = spinn.SNLIClassifierTrainer(model, config.lr) (labels, prem, prem_trans, hypo, hypo_trans) = _generate_synthetic_snli_data_batch(sequence_length, batch_size, vocab_size) for _ in range(burn_in_iterations): trainer.train_batch(labels, prem, prem_trans, hypo, hypo_trans) gc.collect() start_time = time.time() for _ in xrange(benchmark_iterations): trainer.train_batch(labels, prem, prem_trans, hypo, hypo_trans) wall_time = time.time() - start_time # Named "examples"_per_sec to conform with other benchmarks. extras = {"examples_per_sec": benchmark_iterations / wall_time} self.report_benchmark( name="Eager_SPINN_SNLIClassifier_Benchmark", iters=benchmark_iterations, wall_time=wall_time, extras=extras) if __name__ == "__main__": test.main()
chemelnucfin/tensorflow
tensorflow/contrib/eager/python/examples/spinn/spinn_test.py
Python
apache-2.0
18,370
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Cloudbase Solutions SRL # Copyright 2013 Pedro Navarro Perez # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for hyperv neutron rpc """ import mock from neutron.agent import rpc as agent_rpc from neutron.common import topics from neutron.openstack.common import context from neutron.openstack.common import rpc from neutron.plugins.hyperv import agent_notifier_api as ana from neutron.plugins.hyperv.common import constants from neutron.tests import base class rpcHyperVApiTestCase(base.BaseTestCase): def _test_hyperv_neutron_api( self, rpcapi, topic, method, rpc_method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') expected_retval = 'foo' if method == 'call' else None expected_msg = rpcapi.make_msg(method, **kwargs) expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION if rpc_method == 'cast' and method == 'run_instance': kwargs['call'] = False with mock.patch.object(rpc, rpc_method) as rpc_method_mock: rpc_method_mock.return_value = expected_retval retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, expected_retval) expected_args = [ctxt, topic, expected_msg] for arg, expected_arg in zip(rpc_method_mock.call_args[0], expected_args): self.assertEqual(arg, expected_arg) def test_delete_network(self): rpcapi = ana.AgentNotifierApi(topics.AGENT) self._test_hyperv_neutron_api( rpcapi, topics.get_topic_name( topics.AGENT, topics.NETWORK, topics.DELETE), 'network_delete', rpc_method='fanout_cast', network_id='fake_request_spec') def test_port_update(self): rpcapi = ana.AgentNotifierApi(topics.AGENT) self._test_hyperv_neutron_api( rpcapi, topics.get_topic_name( topics.AGENT, topics.PORT, topics.UPDATE), 'port_update', rpc_method='fanout_cast', port='fake_port', network_type='fake_network_type', segmentation_id='fake_segmentation_id', physical_network='fake_physical_network') def test_port_delete(self): rpcapi = ana.AgentNotifierApi(topics.AGENT) self._test_hyperv_neutron_api( rpcapi, topics.get_topic_name( topics.AGENT, topics.PORT, topics.DELETE), 'port_delete', rpc_method='fanout_cast', port_id='port_id') def test_tunnel_update(self): rpcapi = ana.AgentNotifierApi(topics.AGENT) self._test_hyperv_neutron_api( rpcapi, topics.get_topic_name( topics.AGENT, constants.TUNNEL, topics.UPDATE), 'tunnel_update', rpc_method='fanout_cast', tunnel_ip='fake_ip', tunnel_id='fake_id') def test_device_details(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_hyperv_neutron_api( rpcapi, topics.PLUGIN, 'get_device_details', rpc_method='call', device='fake_device', agent_id='fake_agent_id') def test_update_device_down(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_hyperv_neutron_api( rpcapi, topics.PLUGIN, 'update_device_down', rpc_method='call', device='fake_device', agent_id='fake_agent_id', host='fake_host') def test_tunnel_sync(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_hyperv_neutron_api( rpcapi, topics.PLUGIN, 'tunnel_sync', rpc_method='call', tunnel_ip='fake_tunnel_ip', tunnel_type=None)
sajuptpm/neutron-ipam
neutron/tests/unit/hyperv/test_hyperv_rpcapi.py
Python
apache-2.0
4,541
# -*- encoding: utf-8 -*- # # Copyright © 2013 Intel Corp # # Authors: Lianhao Lu <lianhao.lu@intel.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from ceilometer.central import manager from ceilometer.hardware.inspector import base as inspector_base from ceilometer.tests import base as test_base class FakeInspector(inspector_base.Inspector): CPU = inspector_base.CPUStats(cpu_1_min=0.99, cpu_5_min=0.77, cpu_15_min=0.55) DISK = (inspector_base.Disk(device='/dev/sda1', path='/'), inspector_base.DiskStats(size=1000, used=90)) MEMORY = inspector_base.MemoryStats(total=1000, used=90) NET = (inspector_base.Interface(name='test.teest', mac='001122334455', ip='10.0.0.2'), inspector_base.InterfaceStats(bandwidth=1000, rx_bytes=90, tx_bytes=80, error=1)) def inspect_cpu(self, host): yield self.CPU def inspect_disk(self, host): yield self.DISK def inspect_memory(self, host): yield self.MEMORY def inspect_network(self, host): yield self.NET class TestPollsterBase(test_base.BaseTestCase): def faux_get_inspector(url, namespace=None): return FakeInspector() def setUp(self): super(TestPollsterBase, self).setUp() self.hosts = ["test://test", "test://test2"] self.useFixture(fixtures.MonkeyPatch( 'ceilometer.hardware.inspector.get_inspector', self.faux_get_inspector)) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, name, expected_value, expected_type, expected_unit=None): mgr = manager.AgentManager() pollster = factory() cache = {} samples = list(pollster.get_samples(mgr, cache, self.hosts)) self.assertTrue(samples) self.assertIn(pollster.CACHE_KEY, cache) for host in self.hosts: self.assertIn(host, cache[pollster.CACHE_KEY]) self.assertEqual(set([name]), set([s.name for s in samples])) match = [s for s in samples if s.name == name] self.assertEqual(expected_value, match[0].volume) self.assertEqual(expected_type, match[0].type) if expected_unit: self.assertEqual(expected_unit, match[0].unit)
NeCTAR-RC/ceilometer
ceilometer/tests/hardware/pollsters/base.py
Python
apache-2.0
3,105
"""DataUpdateCoordinator for WLED.""" from __future__ import annotations import asyncio from typing import Callable from wled import WLED, Device as WLEDDevice, WLEDConnectionClosed, WLEDError from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_HOST, EVENT_HOMEASSISTANT_STOP from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from .const import ( CONF_KEEP_MASTER_LIGHT, DEFAULT_KEEP_MASTER_LIGHT, DOMAIN, LOGGER, SCAN_INTERVAL, ) class WLEDDataUpdateCoordinator(DataUpdateCoordinator[WLEDDevice]): """Class to manage fetching WLED data from single endpoint.""" keep_master_light: bool def __init__( self, hass: HomeAssistant, *, entry: ConfigEntry, ) -> None: """Initialize global WLED data updater.""" self.keep_master_light = entry.options.get( CONF_KEEP_MASTER_LIGHT, DEFAULT_KEEP_MASTER_LIGHT ) self.wled = WLED(entry.data[CONF_HOST], session=async_get_clientsession(hass)) self.unsub: Callable | None = None super().__init__( hass, LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL, ) @property def has_master_light(self) -> bool: """Return if the coordinated device has an master light.""" return self.keep_master_light or ( self.data is not None and len(self.data.state.segments) > 1 ) def update_listeners(self) -> None: """Call update on all listeners.""" for update_callback in self._listeners: update_callback() @callback def _use_websocket(self) -> None: """Use WebSocket for updates, instead of polling.""" async def listen() -> None: """Listen for state changes via WebSocket.""" try: await self.wled.connect() except WLEDError as err: self.logger.info(err) if self.unsub: self.unsub() self.unsub = None return try: await self.wled.listen(callback=self.async_set_updated_data) except WLEDConnectionClosed as err: self.last_update_success = False self.logger.info(err) except WLEDError as err: self.last_update_success = False self.update_listeners() self.logger.error(err) # Ensure we are disconnected await self.wled.disconnect() if self.unsub: self.unsub() self.unsub = None async def close_websocket(_) -> None: """Close WebSocket connection.""" await self.wled.disconnect() # Clean disconnect WebSocket on Home Assistant shutdown self.unsub = self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_STOP, close_websocket ) # Start listening asyncio.create_task(listen()) async def _async_update_data(self) -> WLEDDevice: """Fetch data from WLED.""" try: device = await self.wled.update(full_update=not self.last_update_success) except WLEDError as error: raise UpdateFailed(f"Invalid response from API: {error}") from error # If the device supports a WebSocket, try activating it. if ( device.info.websocket is not None and not self.wled.connected and not self.unsub ): self._use_websocket() return device
Danielhiversen/home-assistant
homeassistant/components/wled/coordinator.py
Python
apache-2.0
3,789
import numpy as np from pych.extern import Chapel @Chapel() def ex_numpy(a=np.ndarray): """ use Time; var b: [a.domain] real; b = 2.0; for i in a.domain { a[i] = getCurrentTime() / 60; writeln(a[i]); } writeln(a); writeln("Done."); """ return None def test_chapel_slicing(): a = np.ones((10), dtype=np.float) b = a[::2] assert all(a == [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ]) # this assert ensures array originally started as filled with ones ex_numpy(b) assert all(a[::2] == b) # this assert ensures a was updated when b was, since slices of numpy arrays # share memory with the original array # b should contain references to the current time, but since that will vary # from run to run, this is a more robust comparison c = np.ones((10), dtype=np.float) # c starts out the same as a d = c[::2] for i, e in enumerate(d): d[i] = i # d should contain 0.0 through 4.0, updating alternating indexes in c # starting with the first index to contain the same contents. The # assert in the following line reflects this knowledge. assert all(c == [ 0.0, 1.0, 1.0, 1.0, 2.0, 1.0, 3.0, 1.0, 4.0, 1.0]) and all(d == [ 0.0, 1.0, 2.0, 3.0, 4.0])
chapel-lang/pychapel
docs/source/examples/test_chapel_slicing_inline.py
Python
apache-2.0
1,302
# Copyright (c) 2013-2015, Rethink Robotics # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the Rethink Robotics nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from .head_action import HeadActionServer
UCRoboticsLab/BaxterTictactoe
src/baxter_interface/src/head_action/__init__.py
Python
apache-2.0
1,596
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for 3d convolutional operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import nn_ops import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import test def GetTestConfigs(): """Get all the valid tests configs to run. Returns: all the valid test configs as tuples of data_format and use_gpu. """ test_configs = [("NDHWC", False), ("NDHWC", True)] if test.is_gpu_available(cuda_only=True): # "NCDHW" format is only supported on CUDA. test_configs += [("NCDHW", True)] return test_configs class Conv3DTest(test.TestCase): def _DtypesToTest(self, use_gpu): if use_gpu: if not test_util.CudaSupportsHalfMatMulAndConv(): return [dtypes.float32] else: # It is important that float32 comes before float16 here, # as we will be using its gradients as reference for fp16 gradients. return [dtypes.float32, dtypes.float16] else: return [dtypes.float64, dtypes.float32, dtypes.float16] def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride, padding, data_format, dtype, use_gpu): total_size_tensor = 1 total_size_filter = 1 for s in tensor_in_sizes: total_size_tensor *= s for s in filter_in_sizes: total_size_filter *= s # Initializes the input tensor with array containing numbers from 0 to 1. # We keep the input tensor values fairly small to avoid overflowing float16 # during the conv3d. x1 = [f * 1.0 / total_size_tensor for f in range(1, total_size_tensor + 1)] x2 = [f * 1.0 / total_size_filter for f in range(1, total_size_filter + 1)] with self.cached_session(use_gpu=use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype) t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype) if isinstance(stride, collections.Iterable): strides = [1] + list(stride) + [1] else: strides = [1, stride, stride, stride, 1] if data_format == "NCDHW": t1 = test_util.NHWCToNCHW(t1) strides = test_util.NHWCToNCHW(strides) conv = nn_ops.conv3d(t1, t2, strides, padding=padding, data_format=data_format) if data_format == "NCDHW": conv = test_util.NCHWToNHWC(conv) return conv def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, expected): results = [] for data_format, use_gpu in GetTestConfigs(): for dtype in self._DtypesToTest(use_gpu): result = self._SetupValuesForDevice( tensor_in_sizes, filter_in_sizes, stride, padding, data_format, dtype, use_gpu=use_gpu) results.append(result) with self.cached_session() as sess: values = sess.run(results) for value in values: print("expected = ", expected) print("actual = ", value) tol = 1e-6 if value.dtype == np.float16: tol = 1e-3 self.assertAllClose(expected, value.flatten(), atol=tol, rtol=tol) def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes, stride, dilation, padding, data_format, use_gpu): total_size_tensor = 1 total_size_filter = 1 for s in tensor_in_sizes: total_size_tensor *= s for s in filter_in_sizes: total_size_filter *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x1 = [f * 1.0 for f in range(1, total_size_tensor + 1)] x2 = [f * 1.0 for f in range(1, total_size_filter + 1)] with self.cached_session(use_gpu=use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) if isinstance(stride, collections.Iterable): strides = list(stride) else: strides = [stride, stride, stride] if data_format == "NCDHW": t1 = test_util.NHWCToNCHW(t1) full_strides = [1, 1] + strides full_dilation = [1, 1] + dilation else: full_strides = [1] + strides + [1] full_dilation = [1] + dilation + [1] expected = nn_ops.convolution( t1, t2, padding=padding, strides=strides, dilation_rate=dilation, data_format=data_format) computed = nn_ops.conv3d( t1, t2, strides=full_strides, dilations=full_dilation, padding=padding, data_format=data_format) if data_format == "NCDHW": expected = test_util.NCHWToNHWC(expected) computed = test_util.NCHWToNHWC(computed) return expected, computed def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, dilations): expected_results = [] computed_results = [] default_dilations = ( dilations[0] == 1 and dilations[1] == 1 and dilations[2] == 1) for data_format, use_gpu in GetTestConfigs(): # If any dilation rate is larger than 1, only do test on the GPU # because we currently do not have a CPU implementation for arbitrary # dilation rates. if default_dilations or use_gpu: expected, computed = self._ComputeReferenceDilatedConv( tensor_in_sizes, filter_in_sizes, stride, dilations, padding, data_format, use_gpu) expected_results.append(expected) computed_results.append(computed) tolerance = 1e-2 if use_gpu else 1e-5 with self.cached_session() as sess: expected_values = sess.run(expected_results) computed_values = sess.run(computed_results) for e_value, c_value in zip(expected_values, computed_values): print("expected = ", e_value) print("actual = ", c_value) self.assertAllClose( e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=1e-6) def testConv3D1x1x1Filter(self): expected_output = [ 0.18518519, 0.22222222, 0.25925926, 0.40740741, 0.5, 0.59259259, 0.62962963, 0.77777778, 0.92592593, 0.85185185, 1.05555556, 1.25925926, 1.07407407, 1.33333333, 1.59259259, 1.2962963, 1.61111111, 1.92592593 ] # These are equivalent to the Conv2D1x1 case. self._VerifyValues( tensor_in_sizes=[1, 2, 3, 1, 3], filter_in_sizes=[1, 1, 1, 3, 3], stride=1, padding="VALID", expected=expected_output) self._VerifyValues( tensor_in_sizes=[1, 2, 1, 3, 3], filter_in_sizes=[1, 1, 1, 3, 3], stride=1, padding="VALID", expected=expected_output) self._VerifyValues( tensor_in_sizes=[1, 1, 2, 3, 3], filter_in_sizes=[1, 1, 1, 3, 3], stride=1, padding="VALID", expected=expected_output) def testConv3D1x1x1Filter2x1x1Dilation(self): if test.is_gpu_available(cuda_only=True): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 3, 6, 1, 1], filter_in_sizes=[1, 1, 1, 1, 1], stride=1, padding="VALID", dilations=[2, 1, 1]) # Expected values computed using scipy's correlate function. def testConv3D2x2x2Filter(self): expected_output = [ 3.77199074, 3.85069444, 3.92939815, 4.2650463, 4.35763889, 4.45023148, 6.73032407, 6.89236111, 7.05439815, 7.22337963, 7.39930556, 7.57523148, 9.68865741, 9.93402778, 10.17939815, 10.18171296, 10.44097222, 10.70023148 ] # expected_shape = [1, 3, 1, 2, 5] self._VerifyValues( tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout stride=1, padding="VALID", expected=expected_output) def testConv3D2x2x2Filter1x2x1Dilation(self): if test.is_gpu_available(cuda_only=True): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 4, 6, 3, 1], filter_in_sizes=[2, 2, 2, 1, 1], stride=1, padding="VALID", dilations=[1, 2, 1]) def testConv3DStrides(self): expected_output = [ 0.06071429, 0.08988095, 0.10238095, 0.11488095, 0.12738095, 0.13988095, 0.08452381, 0.26071429, 0.35238095, 0.36488095, 0.37738095, 0.38988095, 0.40238095, 0.23452381, 0.46071429, 0.61488095, 0.62738095, 0.63988095, 0.65238095, 0.66488095, 0.38452381, 1.12738095, 1.48988095, 1.50238095, 1.51488095, 1.52738095, 1.53988095, 0.88452381, 1.32738095, 1.75238095, 1.76488095, 1.77738095, 1.78988095, 1.80238095, 1.03452381, 1.52738095, 2.01488095, 2.02738095, 2.03988095, 2.05238095, 2.06488095, 1.18452381, 2.19404762, 2.88988095, 2.90238095, 2.91488095, 2.92738095, 2.93988095, 1.68452381, 2.39404762, 3.15238095, 3.16488095, 3.17738095, 3.18988095, 3.20238095, 1.83452381, 2.59404762, 3.41488095, 3.42738095, 3.43988095, 3.45238095, 3.46488095, 1.98452381 ] self._VerifyValues( tensor_in_sizes=[1, 5, 8, 7, 1], filter_in_sizes=[1, 2, 3, 1, 1], stride=[2, 3, 1], # different stride for each spatial dimension padding="SAME", expected=expected_output) def testConv3D2x2x2FilterStride2(self): expected_output = [ 3.77199074, 3.85069444, 3.92939815, 9.68865741, 9.93402778, 10.17939815 ] self._VerifyValues( tensor_in_sizes=[1, 4, 2, 3, 3], filter_in_sizes=[2, 2, 2, 3, 3], stride=2, padding="VALID", expected=expected_output) def testConv3DStride3(self): expected_output = [ 1.51140873, 1.57167659, 1.63194444, 1.56349206, 1.62673611, 1.68998016, 1.6155754, 1.68179563, 1.74801587, 1.9280754, 2.01215278, 2.09623016, 1.98015873, 2.0672123, 2.15426587, 2.03224206, 2.12227183, 2.21230159, 4.4280754, 4.65500992, 4.88194444, 4.48015873, 4.71006944, 4.93998016, 4.53224206, 4.76512897, 4.99801587, 4.84474206, 5.09548611, 5.34623016, 4.8968254, 5.15054563, 5.40426587, 4.94890873, 5.20560516, 5.46230159 ] self._VerifyValues( tensor_in_sizes=[1, 6, 7, 8, 2], filter_in_sizes=[3, 2, 1, 2, 3], stride=3, padding="VALID", expected=expected_output) def testConv3D2x2x2FilterStride2Same(self): expected_output = [ 3.77199074, 3.85069444, 3.92939815, 2.0162037, 2.06597222, 2.11574074, 9.68865741, 9.93402778, 10.17939815, 4.59953704, 4.73263889, 4.86574074 ] self._VerifyValues( tensor_in_sizes=[1, 4, 2, 3, 3], filter_in_sizes=[2, 2, 2, 3, 3], stride=2, padding="SAME", expected=expected_output) def testKernelSmallerThanStride(self): expected_output = [ 0.03703704, 0.11111111, 0.25925926, 0.33333333, 0.7037037, 0.77777778, 0.92592593, 1. ] self._VerifyValues( tensor_in_sizes=[1, 3, 3, 3, 1], filter_in_sizes=[1, 1, 1, 1, 1], stride=2, padding="SAME", expected=expected_output) self._VerifyValues( tensor_in_sizes=[1, 3, 3, 3, 1], filter_in_sizes=[1, 1, 1, 1, 1], stride=2, padding="VALID", expected=expected_output) expected_output = [ 0.54081633, 0.58017493, 0.28061224, 0.81632653, 0.85568513, 0.40306122, 0.41873178, 0.4340379, 0.19642857, 2.46938776, 2.50874636, 1.1377551, 2.74489796, 2.78425656, 1.26020408, 1.16873178, 1.1840379, 0.51785714, 1.09511662, 1.10604956, 0.44642857, 1.17164723, 1.18258017, 0.47704082, 0.3691691, 0.37244898, 0.125 ] self._VerifyValues( tensor_in_sizes=[1, 7, 7, 7, 1], filter_in_sizes=[2, 2, 2, 1, 1], stride=3, padding="SAME", expected=expected_output) expected_output = [ 0.540816, 0.580175, 0.816327, 0.855685, 2.469388, 2.508746, 2.744898, 2.784257 ] self._VerifyValues( tensor_in_sizes=[1, 7, 7, 7, 1], filter_in_sizes=[2, 2, 2, 1, 1], stride=3, padding="VALID", expected=expected_output) def testKernelSizeMatchesInputSize(self): self._VerifyValues( tensor_in_sizes=[1, 2, 1, 2, 1], filter_in_sizes=[2, 1, 2, 1, 2], stride=1, padding="VALID", expected=[1.5625, 1.875]) def _ConstructAndTestGradientForConfig( self, batch, input_shape, filter_shape, in_depth, out_depth, stride, padding, test_input, data_format, use_gpu): input_planes, input_rows, input_cols = input_shape filter_planes, filter_rows, filter_cols = filter_shape input_shape = [batch, input_planes, input_rows, input_cols, in_depth] filter_shape = [ filter_planes, filter_rows, filter_cols, in_depth, out_depth ] if isinstance(stride, collections.Iterable): strides = [1] + list(stride) + [1] else: strides = [1, stride, stride, stride, 1] if padding == "VALID": output_planes = int( math.ceil((input_planes - filter_planes + 1.0) / strides[1])) output_rows = int( math.ceil((input_rows - filter_rows + 1.0) / strides[2])) output_cols = int( math.ceil((input_cols - filter_cols + 1.0) / strides[3])) else: output_planes = int(math.ceil(float(input_planes) / strides[1])) output_rows = int(math.ceil(float(input_rows) / strides[2])) output_cols = int(math.ceil(float(input_cols) / strides[3])) output_shape = [batch, output_planes, output_rows, output_cols, out_depth] input_size = 1 for x in input_shape: input_size *= x filter_size = 1 for x in filter_shape: filter_size *= x input_data = [x * 1.0 / input_size for x in range(0, input_size)] filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)] for data_type in self._DtypesToTest(use_gpu=use_gpu): # TODO(mjanusz): Modify gradient_checker to also provide max relative # error and synchronize the tolerance levels between the tests for forward # and backward computations. if data_type == dtypes.float64: tolerance = 1e-8 elif data_type == dtypes.float32: tolerance = 5e-3 elif data_type == dtypes.float16: tolerance = 1e-3 with self.cached_session(use_gpu=use_gpu): orig_input_tensor = constant_op.constant( input_data, shape=input_shape, dtype=data_type, name="input") filter_tensor = constant_op.constant( filter_data, shape=filter_shape, dtype=data_type, name="filter") if data_format == "NCDHW": input_tensor = test_util.NHWCToNCHW(orig_input_tensor) new_strides = test_util.NHWCToNCHW(strides) else: input_tensor = orig_input_tensor new_strides = strides conv = nn_ops.conv3d( input_tensor, filter_tensor, new_strides, padding, data_format=data_format, name="conv") if data_format == "NCDHW": conv = test_util.NCHWToNHWC(conv) self.assertEqual(conv.shape, tensor_shape.TensorShape(output_shape)) if test_input: jacob_t, jacob_n = gradient_checker.compute_gradient( orig_input_tensor, input_shape, conv, output_shape) else: jacob_t, jacob_n = gradient_checker.compute_gradient( filter_tensor, filter_shape, conv, output_shape) if data_type != dtypes.float16: reference_jacob_t = jacob_t err = np.fabs(jacob_t - jacob_n).max() else: # Compare fp16 theoretical gradients to fp32 theoretical gradients, # since fp16 numerical gradients are too imprecise. err = np.fabs(jacob_t - reference_jacob_t).max() print("conv3d gradient error = ", err) self.assertLess(err, tolerance) def ConstructAndTestGradient(self, **kwargs): for data_format, use_gpu in GetTestConfigs(): self._ConstructAndTestGradientForConfig(data_format=data_format, use_gpu=use_gpu, **kwargs) def testInputGradientValidPaddingStrideOne(self): self.ConstructAndTestGradient( batch=2, input_shape=(3, 5, 4), filter_shape=(3, 3, 3), in_depth=2, out_depth=3, stride=1, padding="VALID", test_input=True) def testFilterGradientValidPaddingStrideOne(self): self.ConstructAndTestGradient( batch=4, input_shape=(4, 6, 5), filter_shape=(2, 2, 2), in_depth=2, out_depth=3, stride=1, padding="VALID", test_input=False) def testInputGradientValidPaddingStrideTwo(self): self.ConstructAndTestGradient( batch=2, input_shape=(6, 3, 5), filter_shape=(3, 3, 3), in_depth=2, out_depth=3, stride=2, padding="VALID", test_input=True) def testFilterGradientValidPaddingStrideTwo(self): self.ConstructAndTestGradient( batch=2, input_shape=(7, 6, 5), filter_shape=(2, 2, 2), in_depth=2, out_depth=3, stride=2, padding="VALID", test_input=False) def testInputGradientValidPaddingStrideThree(self): self.ConstructAndTestGradient( batch=2, input_shape=(3, 7, 6), filter_shape=(3, 3, 3), in_depth=2, out_depth=3, stride=3, padding="VALID", test_input=True) def testFilterGradientValidPaddingStrideThree(self): self.ConstructAndTestGradient( batch=2, input_shape=(4, 4, 7), filter_shape=(4, 4, 4), in_depth=2, out_depth=3, stride=3, padding="VALID", test_input=False) def testInputGradientSamePaddingStrideOne(self): self.ConstructAndTestGradient( batch=2, input_shape=(3, 2, 2), filter_shape=(3, 2, 1), in_depth=2, out_depth=1, stride=1, padding="SAME", test_input=True) def testFilterGradientSamePaddingStrideOne(self): self.ConstructAndTestGradient( batch=2, input_shape=(3, 6, 5), filter_shape=(2, 2, 2), in_depth=2, out_depth=3, stride=1, padding="SAME", test_input=False) def testInputGradientSamePaddingStrideTwo(self): self.ConstructAndTestGradient( batch=2, input_shape=(6, 3, 4), filter_shape=(3, 3, 3), in_depth=2, out_depth=3, stride=2, padding="SAME", test_input=True) def testFilterGradientSamePaddingStrideTwo(self): self.ConstructAndTestGradient( batch=4, input_shape=(7, 3, 5), filter_shape=(2, 2, 2), in_depth=2, out_depth=3, stride=2, padding="SAME", test_input=False) def testInputGradientSamePaddingStrideThree(self): self.ConstructAndTestGradient( batch=2, input_shape=(9, 3, 6), filter_shape=(3, 3, 3), in_depth=2, out_depth=3, stride=3, padding="SAME", test_input=True) def testFilterGradientSamePaddingStrideThree(self): self.ConstructAndTestGradient( batch=2, input_shape=(9, 4, 7), filter_shape=(4, 4, 4), in_depth=2, out_depth=3, stride=3, padding="SAME", test_input=False) def testInputGradientSamePaddingDifferentStrides(self): self.ConstructAndTestGradient( batch=1, input_shape=(5, 8, 7), filter_shape=(1, 2, 3), in_depth=2, out_depth=3, stride=[2, 3, 1], padding="SAME", test_input=True) def testFilterGradientKernelSizeMatchesInputSize(self): self.ConstructAndTestGradient( batch=2, input_shape=(5, 4, 3), filter_shape=(5, 4, 3), in_depth=2, out_depth=3, stride=1, padding="VALID", test_input=False) def testInputGradientKernelSizeMatchesInputSize(self): self.ConstructAndTestGradient( batch=2, input_shape=(5, 4, 3), filter_shape=(5, 4, 3), in_depth=2, out_depth=3, stride=1, padding="VALID", test_input=True) def disabledtestFilterGradientSamePaddingDifferentStrides(self): self.ConstructAndTestGradient( batch=1, input_shape=(5, 8, 7), filter_shape=(1, 2, 3), in_depth=2, out_depth=3, stride=[2, 3, 1], padding="SAME", test_input=False) # Testing for backprops def _RunAndVerifyBackprop(self, input_sizes, filter_sizes, output_sizes, strides, dilations, padding, data_format, use_gpu, err, mode): total_input_size = 1 total_filter_size = 1 for s in input_sizes: total_input_size *= s for s in filter_sizes: total_filter_size *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x1 = [f * 1.0 for f in range(1, total_input_size + 1)] x2 = [f * 1.0 for f in range(1, total_filter_size + 1)] default_dilations = ( dilations[0] == 1 and dilations[1] == 1 and dilations[2] == 1) # If any dilation rate is larger than 1, only do test on the GPU # because we currently do not have a CPU implementation for arbitrary # dilation rates. if default_dilations or use_gpu: with self.cached_session(use_gpu=use_gpu) as sess: if data_format == "NCDHW": input_sizes = test_util.NHWCToNCHW(input_sizes) t1 = constant_op.constant(x1, shape=input_sizes) t2 = constant_op.constant(x2, shape=filter_sizes) full_strides = [1] + strides + [1] full_dilations = [1] + dilations + [1] if data_format == "NCDHW": full_strides = test_util.NHWCToNCHW(full_strides) full_dilations = test_util.NHWCToNCHW(full_dilations) actual = nn_ops.conv3d( t1, t2, strides=full_strides, dilations=full_dilations, padding=padding, data_format=data_format) expected = nn_ops.convolution( t1, t2, padding=padding, strides=strides, dilation_rate=dilations, data_format=data_format) if data_format == "NCDHW": actual = test_util.NCHWToNHWC(actual) expected = test_util.NCHWToNHWC(expected) actual_grad = gradients_impl.gradients(actual, t1 if mode == "input" else t2)[0] expected_grad = gradients_impl.gradients(expected, t1 if mode == "input" else t2)[0] # "values" consists of two tensors for two backprops actual_value = sess.run(actual_grad) expected_value = sess.run(expected_grad) self.assertShapeEqual(actual_value, actual_grad) self.assertShapeEqual(expected_value, expected_grad) print("expected = ", expected_value) print("actual = ", actual_value) self.assertArrayNear(expected_value.flatten(), actual_value.flatten(), err) def testConv3D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self): if test.is_gpu_available(cuda_only=True): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackprop( input_sizes=[1, 3, 6, 1, 1], filter_sizes=[2, 2, 1, 1, 1], output_sizes=[1, 1, 5, 1, 1], strides=[1, 1, 1], dilations=[2, 1, 1], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5, mode="filter") def testConv3D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self): if test.is_gpu_available(cuda_only=True): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackprop( input_sizes=[1, 3, 6, 1, 1], filter_sizes=[2, 2, 1, 1, 1], output_sizes=[1, 1, 5, 1, 1], strides=[1, 1, 1], dilations=[2, 1, 1], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5, mode="input") if __name__ == "__main__": test.main()
dongjoon-hyun/tensorflow
tensorflow/python/kernel_tests/conv_ops_3d_test.py
Python
apache-2.0
25,745
# -*- coding: utf-8 -*- # Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect from nailgun.errors import errors from nailgun.expression import Expression from nailgun.test.base import BaseTestCase class TestExpressionParser(BaseTestCase): def test_expression_parser(self): cluster = self.env.create_cluster(api=False, mode='ha_compact') models = { 'cluster': cluster, 'settings': cluster.attributes.editable, 'release': cluster.release } hypervisor = models['settings']['common']['libvirt_type']['value'] # if you change/add test cases, please also modify # static/tests/unit/expression.js test_cases = ( # test scalars ('true', True), ('false', False), ('123', 123), ('"123"', '123'), ("'123'", '123'), # test null ('null', None), ('null == false', False), ('null == true', False), ('null == null', True), # test boolean operators ('true or false', True), ('true and false', False), ('not true', False), # test precedence ('true or true and false or false', True), ('true == true and false == false', True), # test comparison ('123 == 123', True), ('123 == 321', False), ('123 != 321', True), ('123 != "123"', True), # test grouping ('(true or true) and not (false or false)', True), # test errors ('(true', errors.ParseError), ('false and', errors.ParseError), ('== 123', errors.ParseError), ('#^@$*()#@!', errors.ParseError), # test modelpaths ('cluster:mode', 'ha_compact'), ('cluster:mode == "ha_compact"', True), ('cluster:mode != "multinode"', True), ('"controller" in release:roles_metadata', True), ('"unknown-role" in release:roles_metadata', False), ('settings:common.libvirt_type.value', hypervisor), ('settings:common.libvirt_type.value == "{0}"'.format(hypervisor), True), ('cluster:mode == "ha_compact" and not (' 'settings:common.libvirt_type.value ' '!= "{0}")'.format(hypervisor), True), # test nonexistent keys ('cluster:nonexistentkey', TypeError), ('cluster:nonexistentkey == null', True, False), # test evaluation flow ('cluster:mode != "ha_compact" and cluster:nonexistentkey == null', False), ('cluster:mode == "ha_compact" and cluster:nonexistentkey == null', TypeError), ('cluster:mode == "ha_compact" and cluster:nonexistentkey == null', True, False), ) def evaluate_expression(expression, models, strict): return Expression(expression, models, strict).evaluate() for test_case in test_cases: expression = test_case[0] result = test_case[1] strict = test_case[2] if len(test_case) > 2 else True if inspect.isclass(result) and issubclass(result, Exception): self.assertRaises(result, evaluate_expression, expression, models, strict) else: self.assertEqual(evaluate_expression(expression, models, strict), result)
huntxu/fuel-web
nailgun/nailgun/test/unit/test_expression_parser.py
Python
apache-2.0
4,177
# -*- coding: utf-8 -*- """ License: BSD (c) 2009 ::: www.CodeResort.com - BV Network AS (simon-code@bvnetwork.no) """ import unittest import xmlrpclib import os import time from trac.util.compat import sorted from tracrpc.tests import rpc_testenv, TracRpcTestCase from tracrpc.util import StringIO class RpcWikiTestCase(TracRpcTestCase): def setUp(self): TracRpcTestCase.setUp(self) self.anon = xmlrpclib.ServerProxy(rpc_testenv.url_anon) self.user = xmlrpclib.ServerProxy(rpc_testenv.url_user) self.admin = xmlrpclib.ServerProxy(rpc_testenv.url_admin) def tearDown(self): TracRpcTestCase.tearDown(self) def test_attachments(self): # Note: Quite similar to the tracrpc.tests.json.JsonTestCase.test_binary image_url = os.path.join(rpc_testenv.trac_src, 'trac', 'htdocs', 'feed.png') image_in = StringIO(open(image_url, 'r').read()) # Create attachment self.admin.wiki.putAttachmentEx('TitleIndex', 'feed2.png', 'test image', xmlrpclib.Binary(image_in.getvalue())) self.assertEquals(image_in.getvalue(), self.admin.wiki.getAttachment( 'TitleIndex/feed2.png').data) # Update attachment (adding new) self.admin.wiki.putAttachmentEx('TitleIndex', 'feed2.png', 'test image', xmlrpclib.Binary(image_in.getvalue()), False) self.assertEquals(image_in.getvalue(), self.admin.wiki.getAttachment( 'TitleIndex/feed2.2.png').data) # List attachments self.assertEquals(['TitleIndex/feed2.2.png', 'TitleIndex/feed2.png'], sorted(self.admin.wiki.listAttachments('TitleIndex'))) # Delete both attachments self.admin.wiki.deleteAttachment('TitleIndex/feed2.png') self.admin.wiki.deleteAttachment('TitleIndex/feed2.2.png') # List attachments again self.assertEquals([], self.admin.wiki.listAttachments('TitleIndex')) def test_getRecentChanges(self): self.admin.wiki.putPage('WikiOne', 'content one', {}) time.sleep(1) self.admin.wiki.putPage('WikiTwo', 'content two', {}) attrs2 = self.admin.wiki.getPageInfo('WikiTwo') changes = self.admin.wiki.getRecentChanges(attrs2['lastModified']) self.assertEquals(1, len(changes)) self.assertEquals('WikiTwo', changes[0]['name']) self.assertEquals('admin', changes[0]['author']) self.assertEquals(1, changes[0]['version']) self.admin.wiki.deletePage('WikiOne') self.admin.wiki.deletePage('WikiTwo') def test_getPageHTMLWithImage(self): # Create the wiki page (absolute image reference) self.admin.wiki.putPage('ImageTest', '[[Image(wiki:ImageTest:feed.png, nolink)]]\n', {}) # Create attachment image_url = os.path.join(rpc_testenv.trac_src, 'trac', 'htdocs', 'feed.png') self.admin.wiki.putAttachmentEx('ImageTest', 'feed.png', 'test image', xmlrpclib.Binary(open(image_url, 'r').read())) # Check rendering absolute markup_1 = self.admin.wiki.getPageHTML('ImageTest') self.assertEquals('<html><body><p>\n<img src="http://127.0.0.1:8765' '/raw-attachment/wiki/ImageTest/feed.png" alt="test image" ' 'title="test image" />\n</p>\n</body></html>', markup_1) # Change to relative image reference and check again self.admin.wiki.putPage('ImageTest', '[[Image(feed.png, nolink)]]\n', {}) markup_2 = self.admin.wiki.getPageHTML('ImageTest') self.assertEquals(markup_2, markup_1) def test_getPageHTMLWithManipulator(self): self.admin.wiki.putPage('FooBar', 'foo bar', {}) # Enable wiki manipulator plugin = os.path.join(rpc_testenv.tracdir, 'plugins', 'Manipulator.py') open(plugin, 'w').write( "from trac.core import *\n" "from trac.wiki.api import IWikiPageManipulator\n" "class WikiManipulator(Component):\n" " implements(IWikiPageManipulator)\n" " def prepare_wiki_page(self, req, page, fields):\n" " fields['text'] = 'foo bar baz'\n" " def validate_wiki_page(req, page):\n" " return []\n") rpc_testenv.restart() # Perform tests self.assertEquals('<html><body><p>\nfoo bar baz\n</p>\n</body></html>', self.admin.wiki.getPageHTML('FooBar')) # Remove plugin and restart os.unlink(plugin) rpc_testenv.restart() def test_suite(): return unittest.makeSuite(RpcWikiTestCase) if __name__ == '__main__': unittest.main(defaultTest='test_suite')
hexenxp14/tracxmlrpc
tracrpc/tests/wiki.py
Python
bsd-2-clause
4,849
import unittest from pyramid import testing from pyramid.compat import ( text_, PY3, ) class TestRoute(unittest.TestCase): def _getTargetClass(self): from pyramid.urldispatch import Route return Route def _makeOne(self, *arg): return self._getTargetClass()(*arg) def test_provides_IRoute(self): from pyramid.interfaces import IRoute from zope.interface.verify import verifyObject verifyObject(IRoute, self._makeOne('name', 'pattern')) def test_ctor(self): import types route = self._makeOne('name', ':path', 'factory') self.assertEqual(route.pattern, ':path') self.assertEqual(route.path, ':path') self.assertEqual(route.name, 'name') self.assertEqual(route.factory, 'factory') self.assertTrue(route.generate.__class__ is types.FunctionType) self.assertTrue(route.match.__class__ is types.FunctionType) def test_ctor_defaults(self): import types route = self._makeOne('name', ':path') self.assertEqual(route.pattern, ':path') self.assertEqual(route.path, ':path') self.assertEqual(route.name, 'name') self.assertEqual(route.factory, None) self.assertTrue(route.generate.__class__ is types.FunctionType) self.assertTrue(route.match.__class__ is types.FunctionType) def test_match(self): route = self._makeOne('name', ':path') self.assertEqual(route.match('/whatever'), {'path':'whatever'}) def test_generate(self): route = self._makeOne('name', ':path') self.assertEqual(route.generate({'path':'abc'}), '/abc') class RoutesMapperTests(unittest.TestCase): def setUp(self): testing.setUp() def tearDown(self): testing.tearDown() def _getRequest(self, **kw): from pyramid.threadlocal import get_current_registry environ = {'SERVER_NAME':'localhost', 'wsgi.url_scheme':'http'} environ.update(kw) request = DummyRequest(environ) reg = get_current_registry() request.registry = reg return request def _getTargetClass(self): from pyramid.urldispatch import RoutesMapper return RoutesMapper def _makeOne(self): klass = self._getTargetClass() return klass() def test_provides_IRoutesMapper(self): from pyramid.interfaces import IRoutesMapper from zope.interface.verify import verifyObject verifyObject(IRoutesMapper, self._makeOne()) def test_no_route_matches(self): mapper = self._makeOne() request = self._getRequest(PATH_INFO='/') result = mapper(request) self.assertEqual(result['match'], None) self.assertEqual(result['route'], None) def test_connect_name_exists_removes_old(self): mapper = self._makeOne() mapper.connect('foo', 'archives/:action/:article') mapper.connect('foo', 'archives/:action/:article2') self.assertEqual(len(mapper.routelist), 1) self.assertEqual(len(mapper.routes), 1) self.assertEqual(mapper.routes['foo'].pattern, 'archives/:action/:article2') self.assertEqual(mapper.routelist[0].pattern, 'archives/:action/:article2') def test_connect_static(self): mapper = self._makeOne() mapper.connect('foo', 'archives/:action/:article', static=True) self.assertEqual(len(mapper.routelist), 0) self.assertEqual(len(mapper.routes), 1) self.assertEqual(mapper.routes['foo'].pattern, 'archives/:action/:article') def test_connect_static_overridden(self): mapper = self._makeOne() mapper.connect('foo', 'archives/:action/:article', static=True) self.assertEqual(len(mapper.routelist), 0) self.assertEqual(len(mapper.routes), 1) self.assertEqual(mapper.routes['foo'].pattern, 'archives/:action/:article') mapper.connect('foo', 'archives/:action/:article2') self.assertEqual(len(mapper.routelist), 1) self.assertEqual(len(mapper.routes), 1) self.assertEqual(mapper.routes['foo'].pattern, 'archives/:action/:article2') self.assertEqual(mapper.routelist[0].pattern, 'archives/:action/:article2') def test___call__pathinfo_cant_be_decoded(self): from pyramid.exceptions import URLDecodeError mapper = self._makeOne() if PY3: # pragma: no cover path_info = b'\xff\xfe\xe6\x00'.decode('latin-1') else: path_info = b'\xff\xfe\xe6\x00' request = self._getRequest(PATH_INFO=path_info) self.assertRaises(URLDecodeError, mapper, request) def test___call__route_matches(self): mapper = self._makeOne() mapper.connect('foo', 'archives/:action/:article') request = self._getRequest(PATH_INFO='/archives/action1/article1') result = mapper(request) self.assertEqual(result['route'], mapper.routes['foo']) self.assertEqual(result['match']['action'], 'action1') self.assertEqual(result['match']['article'], 'article1') def test___call__route_matches_with_predicates(self): mapper = self._makeOne() mapper.connect('foo', 'archives/:action/:article', predicates=[lambda *arg: True]) request = self._getRequest(PATH_INFO='/archives/action1/article1') result = mapper(request) self.assertEqual(result['route'], mapper.routes['foo']) self.assertEqual(result['match']['action'], 'action1') self.assertEqual(result['match']['article'], 'article1') def test___call__route_fails_to_match_with_predicates(self): mapper = self._makeOne() mapper.connect('foo', 'archives/:action/article1', predicates=[lambda *arg: True, lambda *arg: False]) mapper.connect('bar', 'archives/:action/:article') request = self._getRequest(PATH_INFO='/archives/action1/article1') result = mapper(request) self.assertEqual(result['route'], mapper.routes['bar']) self.assertEqual(result['match']['action'], 'action1') self.assertEqual(result['match']['article'], 'article1') def test___call__custom_predicate_gets_info(self): mapper = self._makeOne() def pred(info, request): self.assertEqual(info['match'], {'action':'action1'}) self.assertEqual(info['route'], mapper.routes['foo']) return True mapper.connect('foo', 'archives/:action/article1', predicates=[pred]) request = self._getRequest(PATH_INFO='/archives/action1/article1') mapper(request) def test_cc_bug(self): # "unordered" as reported in IRC by author of # http://labs.creativecommons.org/2010/01/13/cc-engine-and-web-non-frameworks/ mapper = self._makeOne() mapper.connect('rdf', 'licenses/:license_code/:license_version/rdf') mapper.connect('juri', 'licenses/:license_code/:license_version/:jurisdiction') request = self._getRequest(PATH_INFO='/licenses/1/v2/rdf') result = mapper(request) self.assertEqual(result['route'], mapper.routes['rdf']) self.assertEqual(result['match']['license_code'], '1') self.assertEqual(result['match']['license_version'], 'v2') request = self._getRequest(PATH_INFO='/licenses/1/v2/usa') result = mapper(request) self.assertEqual(result['route'], mapper.routes['juri']) self.assertEqual(result['match']['license_code'], '1') self.assertEqual(result['match']['license_version'], 'v2') self.assertEqual(result['match']['jurisdiction'], 'usa') def test___call__root_route_matches(self): mapper = self._makeOne() mapper.connect('root', '') request = self._getRequest(PATH_INFO='/') result = mapper(request) self.assertEqual(result['route'], mapper.routes['root']) self.assertEqual(result['match'], {}) def test___call__root_route_matches2(self): mapper = self._makeOne() mapper.connect('root', '/') request = self._getRequest(PATH_INFO='/') result = mapper(request) self.assertEqual(result['route'], mapper.routes['root']) self.assertEqual(result['match'], {}) def test___call__root_route_when_path_info_empty(self): mapper = self._makeOne() mapper.connect('root', '/') request = self._getRequest(PATH_INFO='') result = mapper(request) self.assertEqual(result['route'], mapper.routes['root']) self.assertEqual(result['match'], {}) def test___call__root_route_when_path_info_notempty(self): mapper = self._makeOne() mapper.connect('root', '/') request = self._getRequest(PATH_INFO='/') result = mapper(request) self.assertEqual(result['route'], mapper.routes['root']) self.assertEqual(result['match'], {}) def test___call__no_path_info(self): mapper = self._makeOne() mapper.connect('root', '/') request = self._getRequest() result = mapper(request) self.assertEqual(result['route'], mapper.routes['root']) self.assertEqual(result['match'], {}) def test_has_routes(self): mapper = self._makeOne() self.assertEqual(mapper.has_routes(), False) mapper.connect('whatever', 'archives/:action/:article') self.assertEqual(mapper.has_routes(), True) def test_get_routes(self): from pyramid.urldispatch import Route mapper = self._makeOne() self.assertEqual(mapper.get_routes(), []) mapper.connect('whatever', 'archives/:action/:article') routes = mapper.get_routes() self.assertEqual(len(routes), 1) self.assertEqual(routes[0].__class__, Route) def test_get_route_matches(self): mapper = self._makeOne() mapper.connect('whatever', 'archives/:action/:article') result = mapper.get_route('whatever') self.assertEqual(result.pattern, 'archives/:action/:article') def test_get_route_misses(self): mapper = self._makeOne() result = mapper.get_route('whatever') self.assertEqual(result, None) def test_generate(self): mapper = self._makeOne() def generator(kw): return 123 route = DummyRoute(generator) mapper.routes['abc'] = route self.assertEqual(mapper.generate('abc', {}), 123) class TestCompileRoute(unittest.TestCase): def _callFUT(self, pattern): from pyramid.urldispatch import _compile_route return _compile_route(pattern) def test_no_star(self): matcher, generator = self._callFUT('/foo/:baz/biz/:buz/bar') self.assertEqual(matcher('/foo/baz/biz/buz/bar'), {'baz':'baz', 'buz':'buz'}) self.assertEqual(matcher('foo/baz/biz/buz/bar'), None) self.assertEqual(generator({'baz':1, 'buz':2}), '/foo/1/biz/2/bar') def test_with_star(self): matcher, generator = self._callFUT('/foo/:baz/biz/:buz/bar*traverse') self.assertEqual(matcher('/foo/baz/biz/buz/bar'), {'baz':'baz', 'buz':'buz', 'traverse':()}) self.assertEqual(matcher('/foo/baz/biz/buz/bar/everything/else/here'), {'baz':'baz', 'buz':'buz', 'traverse':('everything', 'else', 'here')}) self.assertEqual(matcher('foo/baz/biz/buz/bar'), None) self.assertEqual(generator( {'baz':1, 'buz':2, 'traverse':'/a/b'}), '/foo/1/biz/2/bar/a/b') def test_with_bracket_star(self): matcher, generator = self._callFUT( '/foo/{baz}/biz/{buz}/bar{remainder:.*}') self.assertEqual(matcher('/foo/baz/biz/buz/bar'), {'baz':'baz', 'buz':'buz', 'remainder':''}) self.assertEqual(matcher('/foo/baz/biz/buz/bar/everything/else/here'), {'baz':'baz', 'buz':'buz', 'remainder':'/everything/else/here'}) self.assertEqual(matcher('foo/baz/biz/buz/bar'), None) self.assertEqual(generator( {'baz':1, 'buz':2, 'remainder':'/a/b'}), '/foo/1/biz/2/bar/a/b') def test_no_beginning_slash(self): matcher, generator = self._callFUT('foo/:baz/biz/:buz/bar') self.assertEqual(matcher('/foo/baz/biz/buz/bar'), {'baz':'baz', 'buz':'buz'}) self.assertEqual(matcher('foo/baz/biz/buz/bar'), None) self.assertEqual(generator({'baz':1, 'buz':2}), '/foo/1/biz/2/bar') def test_custom_regex(self): matcher, generator = self._callFUT('foo/{baz}/biz/{buz:[^/\.]+}.{bar}') self.assertEqual(matcher('/foo/baz/biz/buz.bar'), {'baz':'baz', 'buz':'buz', 'bar':'bar'}) self.assertEqual(matcher('foo/baz/biz/buz/bar'), None) self.assertEqual(generator({'baz':1, 'buz':2, 'bar': 'html'}), '/foo/1/biz/2.html') def test_custom_regex_with_colons(self): matcher, generator = self._callFUT('foo/{baz}/biz/{buz:(?:[^/\.]+)}.{bar}') self.assertEqual(matcher('/foo/baz/biz/buz.bar'), {'baz':'baz', 'buz':'buz', 'bar':'bar'}) self.assertEqual(matcher('foo/baz/biz/buz/bar'), None) self.assertEqual(generator({'baz':1, 'buz':2, 'bar': 'html'}), '/foo/1/biz/2.html') def test_mixed_newstyle_oldstyle_pattern_defaults_to_newstyle(self): # pattern: '\\/foo\\/(?P<baz>abc)\\/biz\\/(?P<buz>[^/]+)\\/bar$' # note presence of :abc in pattern (oldstyle match) matcher, generator = self._callFUT('foo/{baz:abc}/biz/{buz}/bar') self.assertEqual(matcher('/foo/abc/biz/buz/bar'), {'baz':'abc', 'buz':'buz'}) self.assertEqual(generator({'baz':1, 'buz':2}), '/foo/1/biz/2/bar') def test_custom_regex_with_embedded_squigglies(self): matcher, generator = self._callFUT('/{buz:\d{4}}') self.assertEqual(matcher('/2001'), {'buz':'2001'}) self.assertEqual(matcher('/200'), None) self.assertEqual(generator({'buz':2001}), '/2001') def test_custom_regex_with_embedded_squigglies2(self): matcher, generator = self._callFUT('/{buz:\d{3,4}}') self.assertEqual(matcher('/2001'), {'buz':'2001'}) self.assertEqual(matcher('/200'), {'buz':'200'}) self.assertEqual(matcher('/20'), None) self.assertEqual(generator({'buz':2001}), '/2001') def test_custom_regex_with_embedded_squigglies3(self): matcher, generator = self._callFUT( '/{buz:(\d{2}|\d{4})-[a-zA-Z]{3,4}-\d{2}}') self.assertEqual(matcher('/2001-Nov-15'), {'buz':'2001-Nov-15'}) self.assertEqual(matcher('/99-June-10'), {'buz':'99-June-10'}) self.assertEqual(matcher('/2-Nov-15'), None) self.assertEqual(matcher('/200-Nov-15'), None) self.assertEqual(matcher('/2001-No-15'), None) self.assertEqual(generator({'buz':'2001-Nov-15'}), '/2001-Nov-15') self.assertEqual(generator({'buz':'99-June-10'}), '/99-June-10') def test_pattern_with_high_order_literal(self): pattern = text_(b'/La Pe\xc3\xb1a/{x}', 'utf-8') matcher, generator = self._callFUT(pattern) self.assertEqual(matcher(text_(b'/La Pe\xc3\xb1a/x', 'utf-8')), {'x':'x'}) self.assertEqual(generator({'x':'1'}), '/La%20Pe%C3%B1a/1') def test_pattern_generate_with_high_order_dynamic(self): pattern = '/{x}' _, generator = self._callFUT(pattern) self.assertEqual( generator({'x':text_(b'La Pe\xc3\xb1a', 'utf-8')}), '/La%20Pe%C3%B1a') def test_docs_sample_generate(self): # sample from urldispatch.rst pattern = text_(b'/La Pe\xc3\xb1a/{city}', 'utf-8') _, generator = self._callFUT(pattern) self.assertEqual( generator({'city':text_(b'Qu\xc3\xa9bec', 'utf-8')}), '/La%20Pe%C3%B1a/Qu%C3%A9bec') def test_generate_with_mixedtype_values(self): pattern = '/{city}/{state}' _, generator = self._callFUT(pattern) result = generator( {'city': text_(b'Qu\xc3\xa9bec', 'utf-8'), 'state': b'La Pe\xc3\xb1a'} ) self.assertEqual(result, '/Qu%C3%A9bec/La%20Pe%C3%B1a') # should be a native string self.assertEqual(type(result), str) def test_highorder_pattern_utf8(self): pattern = b'/La Pe\xc3\xb1a/{city}' self.assertRaises(ValueError, self._callFUT, pattern) def test_generate_with_string_remainder_and_unicode_replacement(self): pattern = text_(b'/abc*remainder', 'utf-8') _, generator = self._callFUT(pattern) result = generator( {'remainder': text_(b'/Qu\xc3\xa9bec/La Pe\xc3\xb1a', 'utf-8')} ) self.assertEqual(result, '/abc/Qu%C3%A9bec/La%20Pe%C3%B1a') # should be a native string self.assertEqual(type(result), str) def test_generate_with_string_remainder_and_nonstring_replacement(self): pattern = text_(b'/abc/*remainder', 'utf-8') _, generator = self._callFUT(pattern) result = generator( {'remainder': None} ) self.assertEqual(result, '/abc/None') # should be a native string self.assertEqual(type(result), str) class TestCompileRouteFunctional(unittest.TestCase): def matches(self, pattern, path, expected): from pyramid.urldispatch import _compile_route matcher = _compile_route(pattern)[0] result = matcher(path) self.assertEqual(result, expected) def generates(self, pattern, dict, result): from pyramid.urldispatch import _compile_route self.assertEqual(_compile_route(pattern)[1](dict), result) def test_matcher_functional_notdynamic(self): self.matches('/', '', None) self.matches('', '', None) self.matches('/', '/foo', None) self.matches('/foo/', '/foo', None) self.matches('', '/', {}) self.matches('/', '/', {}) def test_matcher_functional_newstyle(self): self.matches('/{x}', '', None) self.matches('/{x}', '/', None) self.matches('/abc/{def}', '/abc/', None) self.matches('/{x}', '/a', {'x':'a'}) self.matches('zzz/{x}', '/zzz/abc', {'x':'abc'}) self.matches('zzz/{x}*traverse', '/zzz/abc', {'x':'abc', 'traverse':()}) self.matches('zzz/{x}*traverse', '/zzz/abc/def/g', {'x':'abc', 'traverse':('def', 'g')}) self.matches('*traverse', '/zzz/abc', {'traverse':('zzz', 'abc')}) self.matches('*traverse', '/zzz/ abc', {'traverse':('zzz', ' abc')}) #'/La%20Pe%C3%B1a' self.matches('{x}', text_(b'/La Pe\xc3\xb1a', 'utf-8'), {'x':text_(b'La Pe\xc3\xb1a', 'utf-8')}) # '/La%20Pe%C3%B1a/x' self.matches('*traverse', text_(b'/La Pe\xc3\xb1a/x'), {'traverse':(text_(b'La Pe\xc3\xb1a'), 'x')}) self.matches('/foo/{id}.html', '/foo/bar.html', {'id':'bar'}) self.matches('/{num:[0-9]+}/*traverse', '/555/abc/def', {'num':'555', 'traverse':('abc', 'def')}) self.matches('/{num:[0-9]*}/*traverse', '/555/abc/def', {'num':'555', 'traverse':('abc', 'def')}) self.matches('zzz/{_}', '/zzz/abc', {'_':'abc'}) self.matches('zzz/{_abc}', '/zzz/abc', {'_abc':'abc'}) self.matches('zzz/{abc_def}', '/zzz/abc', {'abc_def':'abc'}) def test_matcher_functional_oldstyle(self): self.matches('/:x', '', None) self.matches('/:x', '/', None) self.matches('/abc/:def', '/abc/', None) self.matches('/:x', '/a', {'x':'a'}) self.matches('zzz/:x', '/zzz/abc', {'x':'abc'}) self.matches('zzz/:x*traverse', '/zzz/abc', {'x':'abc', 'traverse':()}) self.matches('zzz/:x*traverse', '/zzz/abc/def/g', {'x':'abc', 'traverse':('def', 'g')}) self.matches('*traverse', '/zzz/abc', {'traverse':('zzz', 'abc')}) self.matches('*traverse', '/zzz/ abc', {'traverse':('zzz', ' abc')}) #'/La%20Pe%C3%B1a' # pattern, path, expected self.matches(':x', text_(b'/La Pe\xc3\xb1a', 'utf-8'), {'x':text_(b'La Pe\xc3\xb1a', 'utf-8')}) # '/La%20Pe%C3%B1a/x' self.matches('*traverse', text_(b'/La Pe\xc3\xb1a/x', 'utf-8'), {'traverse':(text_(b'La Pe\xc3\xb1a', 'utf-8'), 'x')}) self.matches('/foo/:id.html', '/foo/bar.html', {'id':'bar'}) self.matches('/foo/:id_html', '/foo/bar_html', {'id_html':'bar_html'}) self.matches('zzz/:_', '/zzz/abc', {'_':'abc'}) self.matches('zzz/:_abc', '/zzz/abc', {'_abc':'abc'}) self.matches('zzz/:abc_def', '/zzz/abc', {'abc_def':'abc'}) def test_generator_functional_notdynamic(self): self.generates('', {}, '/') self.generates('/', {}, '/') def test_generator_functional_newstyle(self): self.generates('/{x}', {'x':''}, '/') self.generates('/{x}', {'x':'a'}, '/a') self.generates('zzz/{x}', {'x':'abc'}, '/zzz/abc') self.generates('zzz/{x}*traverse', {'x':'abc', 'traverse':''}, '/zzz/abc') self.generates('zzz/{x}*traverse', {'x':'abc', 'traverse':'/def/g'}, '/zzz/abc/def/g') self.generates('/{x}', {'x':text_(b'/La Pe\xc3\xb1a', 'utf-8')}, '//La%20Pe%C3%B1a') self.generates('/{x}*y', {'x':text_(b'/La Pe\xc3\xb1a', 'utf-8'), 'y':'/rest/of/path'}, '//La%20Pe%C3%B1a/rest/of/path') self.generates('*traverse', {'traverse':('a', text_(b'La Pe\xf1a'))}, '/a/La%20Pe%C3%B1a') self.generates('/foo/{id}.html', {'id':'bar'}, '/foo/bar.html') self.generates('/foo/{_}', {'_':'20'}, '/foo/20') self.generates('/foo/{_abc}', {'_abc':'20'}, '/foo/20') self.generates('/foo/{abc_def}', {'abc_def':'20'}, '/foo/20') def test_generator_functional_oldstyle(self): self.generates('/:x', {'x':''}, '/') self.generates('/:x', {'x':'a'}, '/a') self.generates('zzz/:x', {'x':'abc'}, '/zzz/abc') self.generates('zzz/:x*traverse', {'x':'abc', 'traverse':''}, '/zzz/abc') self.generates('zzz/:x*traverse', {'x':'abc', 'traverse':'/def/g'}, '/zzz/abc/def/g') self.generates('/:x', {'x':text_(b'/La Pe\xc3\xb1a', 'utf-8')}, '//La%20Pe%C3%B1a') self.generates('/:x*y', {'x':text_(b'/La Pe\xc3\xb1a', 'utf-8'), 'y':'/rest/of/path'}, '//La%20Pe%C3%B1a/rest/of/path') self.generates('*traverse', {'traverse':('a', text_(b'La Pe\xf1a'))}, '/a/La%20Pe%C3%B1a') self.generates('/foo/:id.html', {'id':'bar'}, '/foo/bar.html') self.generates('/foo/:_', {'_':'20'}, '/foo/20') self.generates('/foo/:_abc', {'_abc':'20'}, '/foo/20') self.generates('/foo/:abc_def', {'abc_def':'20'}, '/foo/20') class DummyContext(object): """ """ class DummyRequest(object): def __init__(self, environ): self.environ = environ class DummyRoute(object): def __init__(self, generator): self.generate = generator
danielpronych/pyramid-doxygen
pyramid/tests/test_urldispatch.py
Python
bsd-2-clause
23,774
__all__ = ['ttypes', 'constants', 'SpartsFooService', 'SpartsBarService']
djipko/sparts
sparts/gen/sparts_examples/__init__.py
Python
bsd-3-clause
74
# -*- coding: utf-8 -*- from __future__ import absolute_import import logging logging.getLogger().setLevel(logging.DEBUG) from vilya.libs.mlock import MLock from vilya.models.project import CodeDoubanProject from vilya.models.ticket import Ticket from vilya.models.lru_counter import ( ProjectOwnLRUCounter, ProjectWatchLRUCounter) from vilya.models.npull import Pull2 # mc keys, ('{self.id}', '{self.from_sha}', '{self.to_sha}') MC_KEY_PULL_MERGE_BASE = 'PullRequest:%s:%s:%s:merge_base' MC_KEY_PULL_IS_MERGABLE = 'PullRequest:%s:%s:%s:is_auto_mergable' MC_KEY_PULL_ID_BY_PID_AND_TID = 'PullRequest:%s:%s:pull_id' PullRequest = Pull2 def add_pull(ticket, pullreq, user): from dispatches import dispatch from vilya.libs.text import get_mentions_from_text from vilya.libs.signals import pullrequest_signal from vilya.models.user import get_author_by_email from vilya.models.user import User from vilya.models.trello.core import process_trello_notify reporter = user.username commits = pullreq.commits # TODO: refactory is! auto number how to? shas = [p.sha for p in commits] ticket_len = Ticket.get_count_by_proj_id(ticket.project_id) if ticket_len == 0: # 检查是否创建过新PR,若未创建过则以旧PR号为基准累加 max_ticket_id = PullRequest.get_max_ticket_id(ticket.project_id) if max_ticket_id >= 0: ticket = Ticket.add(ticket.project_id, ticket.title, ticket.description, ticket.author, max_ticket_id + 1) else: ticket = Ticket.add(ticket.project_id, ticket.title, ticket.description, ticket.author) else: ticket = Ticket.add(ticket.project_id, ticket.title, ticket.description, ticket.author) pullreq = pullreq.insert(ticket.ticket_number) if shas: ticket.add_commits(','.join(shas), reporter) noti_receivers = [committer.name for committer in CodeDoubanProject.get_committers_by_project(pullreq.to_proj.id)] # noqa noti_receivers = noti_receivers + [pullreq.to_proj.owner.name] get_commit_author = lambda u: get_author_by_email(u.email.encode('utf-8')) commit_authors = {get_commit_author(c.author) for c in commits} commit_authors = {a for a in commit_authors if a} noti_receivers.extend(commit_authors) # diffs, author_by_file, authors = pullreq.get_diffs(with_blame=True) # FIXME: diffs没用到? # diffs = pullreq.get_diffs() # blame代码变更的原作者, 也加到at users at_users = get_mentions_from_text(ticket.description) # at_users.extend(authors) at_users.append(pullreq.to_proj.owner_id) at_users = set(at_users) # FIXME: invited_users is used on page /hub/my_pull_requests/ invited_users = [User(u).add_invited_pull_request(ticket.id) for u in at_users] ProjectOwnLRUCounter(user.username).use(pullreq.to_proj.id) ProjectWatchLRUCounter(user.username).use(pullreq.to_proj.id) # TODO: 重构Feed之后取消这个信号的发送 pullrequest_signal.send(user.username, extra_receivers=noti_receivers, pullreq=pullreq, comment=ticket.description, ticket_id=ticket.ticket_id, ticket=ticket, status="unmerge", new_version=True) dispatch('pullreq', data=dict(sender=user.username, content=ticket.description, ticket=ticket, status='unmerge', new_version=True, extra_receivers=noti_receivers)) dispatch('pr_actions', data=dict(type='pr_opened', hooks=pullreq.to_proj.hooks, author=user, title=ticket.title, body=ticket.description, ticket=ticket, pullreq=pullreq)) # FIXME: move to dispatch process_trello_notify(user, ticket) return pullreq # TODO: user merge_pull_before() and merge_pull_after() # TODO: remove argu: request def merge_pull(ticket, pullreq, user, message, request): from dispatches import dispatch from queues_handler import sphinx_builds_add from vilya.libs.signals import pullrequest_signal project = pullreq.to_proj # before # check user permission if not project.has_push_perm(user.name): return 'You do not have merge permission' # check if pull merged if pullreq.merged: return 'This pull was already merged' with MLock.merge_pull(proj_id=pullreq.to_proj.id) as err: if err: return "Merging by someone else ..." # if up-to-date, just close it if pullreq.is_up_to_date(): content = u"Closed due to up-to-date merge" comment = ticket.add_comment(content, user.name) close_pull(ticket, pullreq, user, content, comment, request) return "Closed due to up-to-date merge" # do merge_commit_sha = pullreq.merge(user, message) if merge_commit_sha is None: return "Merge failed" # after ticket_id = ticket.ticket_id # close ticket ticket.close(user.name) # build docs sphinx_builds_add(project.name) # delete tmp branch pullreq.remove_branch_if_temp() # TODO: 重构Feed后取消这个信号的发送 pullrequest_signal.send(user.username, comment='', pullreq=pullreq, ticket_id=ticket_id, ticket=ticket, status="merged", new_version=True) dispatch('pullreq', data=dict(sender=user.username, comment=None, ticket=ticket, status='merged', new_version=True) ) # remove argu: request dispatch('pr_actions', data=dict(type='pr_merge', hooks=project.hooks, request=request, commit_message=message, author=user, ticket=ticket, pullreq=pullreq) ) return None def close_pull(ticket, pullreq, user, content, comment, request): from dispatches import dispatch from vilya.libs.signals import pullrequest_signal project = ticket.project author = user.name ticket.close(author) pullreq.remove_branch_if_temp() # TODO: 重构Feed后取消发送这个信号 pullrequest_signal.send(author, comment=content, pullreq=pullreq, ticket_id=ticket.ticket_id, ticket=ticket, status="closed", new_version=True) dispatch('pullreq', data={ 'sender': author, 'comment': comment, 'ticket': ticket, 'status': 'closed'}) dispatch('pr_actions', data=dict( type='pr_closed', hooks=project.hooks, request=request, author=user, ticket=ticket, pullreq=pullreq, content=content))
douban/code
vilya/models/pull.py
Python
bsd-3-clause
7,853
# *- encoding: utf-8 -*- """ nistats version, required package versions, and utilities for checking """ # Author: Bertrand Thirion # License: simplified BSD # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha release # X.YbN # Beta release # X.YrcN # Release Candidate # X.Y # Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # __version__ = '0.1.0.dev0' _NISTATS_INSTALL_MSG = 'See %s for installation information.' % ( 'http://nistats.github.io/introduction.html#installation') # This is a tuple to preserve order, so that dependencies are checked # in some meaningful order (more => less 'core'). We avoid using # collections.OrderedDict to preserve Python 2.6 compatibility. REQUIRED_MODULE_METADATA = ( ('numpy', { 'min_version': '1.6.1', 'required_at_installation': True, 'install_info': _NISTATS_INSTALL_MSG}), ('scipy', { 'min_version': '0.9.0', 'required_at_installation': True, 'install_info': _NISTATS_INSTALL_MSG}), ('nilearn', { 'min_version': '0.1.0', 'required_at_installation': True, 'install_info': _NISTATS_INSTALL_MSG}), ('nibabel', { 'min_version': '1.1.0', 'required_at_installation': False}), ('pandas', { 'min_version': '0.12.0', 'required_at_installation': True, 'install_info': _NISTATS_INSTALL_MSG}), ) OPTIONAL_MATPLOTLIB_MIN_VERSION = '1.1.1' def _import_module_with_version_check( module_name, minimum_version, install_info=None): """Check that module is installed with a recent enough version """ from distutils.version import LooseVersion try: module = __import__(module_name) except ImportError as exc: user_friendly_info = ('Module "{0}" could not be found. {1}').format( module_name, install_info or 'Please install it properly to use nistats.') exc.args += (user_friendly_info,) raise # Avoid choking on modules with no __version__ attribute module_version = getattr(module, '__version__', '0.0.0') version_too_old = (not LooseVersion(module_version) >= LooseVersion(minimum_version)) if version_too_old: message = ( 'A {module_name} version of at least {minimum_version} ' 'is required to use nistats. {module_version} was found. ' 'Please upgrade {module_name}').format( module_name=module_name, minimum_version=minimum_version, module_version=module_version) raise ImportError(message) return module def _check_module_dependencies(is_nistats_installing=False): """Throw an exception if nistats dependencies are not installed. Parameters ---------- is_nistats_installing: boolean if True, only error on missing packages that cannot be auto-installed. if False, error on any missing package. Throws ------- ImportError """ for (module_name, module_metadata) in REQUIRED_MODULE_METADATA: if not (is_nistats_installing and not module_metadata['required_at_installation']): # Skip check only when installing and it's a module that # will be auto-installed. _import_module_with_version_check( module_name=module_name, minimum_version=module_metadata['min_version'], install_info=module_metadata.get('install_info'))
salma1601/process-asl
procasl/externals/nistats/version.py
Python
bsd-3-clause
3,756
import sys import numpy as np import pandas as pd from pvlib import iam, modelchain, pvsystem, temperature, inverter from pvlib.modelchain import ModelChain from pvlib.pvsystem import PVSystem from pvlib.tracking import SingleAxisTracker from pvlib.location import Location from pvlib._deprecation import pvlibDeprecationWarning from .conftest import assert_series_equal, assert_frame_equal import pytest from .conftest import fail_on_pvlib_version @pytest.fixture(scope='function') def sapm_dc_snl_ac_system(sapm_module_params, cec_inverter_parameters, sapm_temperature_cs5p_220m): module = 'Canadian_Solar_CS5P_220M___2009_' module_parameters = sapm_module_params.copy() temp_model_params = sapm_temperature_cs5p_220m.copy() system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module=module, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, inverter_parameters=cec_inverter_parameters) return system @pytest.fixture def cec_dc_snl_ac_system(cec_module_cs5p_220m, cec_inverter_parameters, sapm_temperature_cs5p_220m): module_parameters = cec_module_cs5p_220m.copy() module_parameters['b'] = 0.05 module_parameters['EgRef'] = 1.121 module_parameters['dEgdT'] = -0.0002677 temp_model_params = sapm_temperature_cs5p_220m.copy() system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module=module_parameters['Name'], module_parameters=module_parameters, temperature_model_parameters=temp_model_params, inverter_parameters=cec_inverter_parameters) return system @pytest.fixture def cec_dc_snl_ac_arrays(cec_module_cs5p_220m, cec_inverter_parameters, sapm_temperature_cs5p_220m): module_parameters = cec_module_cs5p_220m.copy() module_parameters['b'] = 0.05 module_parameters['EgRef'] = 1.121 module_parameters['dEgdT'] = -0.0002677 temp_model_params = sapm_temperature_cs5p_220m.copy() array_one = pvsystem.Array( mount=pvsystem.FixedMount(surface_tilt=32.2, surface_azimuth=180), module=module_parameters['Name'], module_parameters=module_parameters.copy(), temperature_model_parameters=temp_model_params.copy() ) array_two = pvsystem.Array( mount=pvsystem.FixedMount(surface_tilt=42.2, surface_azimuth=220), module=module_parameters['Name'], module_parameters=module_parameters.copy(), temperature_model_parameters=temp_model_params.copy() ) system = PVSystem( arrays=[array_one, array_two], inverter_parameters=cec_inverter_parameters ) return system @pytest.fixture def cec_dc_native_snl_ac_system(cec_module_cs5p_220m, cec_inverter_parameters, sapm_temperature_cs5p_220m): module_parameters = cec_module_cs5p_220m.copy() temp_model_params = sapm_temperature_cs5p_220m.copy() system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module=module_parameters['Name'], module_parameters=module_parameters, temperature_model_parameters=temp_model_params, inverter_parameters=cec_inverter_parameters) return system @pytest.fixture def pvsyst_dc_snl_ac_system(pvsyst_module_params, cec_inverter_parameters, sapm_temperature_cs5p_220m): module = 'PVsyst test module' module_parameters = pvsyst_module_params module_parameters['b'] = 0.05 temp_model_params = sapm_temperature_cs5p_220m.copy() system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module=module, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, inverter_parameters=cec_inverter_parameters) return system @pytest.fixture def pvsyst_dc_snl_ac_arrays(pvsyst_module_params, cec_inverter_parameters, sapm_temperature_cs5p_220m): module = 'PVsyst test module' module_parameters = pvsyst_module_params module_parameters['b'] = 0.05 temp_model_params = sapm_temperature_cs5p_220m.copy() array_one = pvsystem.Array( mount=pvsystem.FixedMount(surface_tilt=32.2, surface_azimuth=180), module=module, module_parameters=module_parameters.copy(), temperature_model_parameters=temp_model_params.copy() ) array_two = pvsystem.Array( mount=pvsystem.FixedMount(surface_tilt=42.2, surface_azimuth=220), module=module, module_parameters=module_parameters.copy(), temperature_model_parameters=temp_model_params.copy() ) system = PVSystem( arrays=[array_one, array_two], inverter_parameters=cec_inverter_parameters ) return system @pytest.fixture def cec_dc_adr_ac_system(sam_data, cec_module_cs5p_220m, sapm_temperature_cs5p_220m): module_parameters = cec_module_cs5p_220m.copy() module_parameters['b'] = 0.05 module_parameters['EgRef'] = 1.121 module_parameters['dEgdT'] = -0.0002677 temp_model_params = sapm_temperature_cs5p_220m.copy() inverters = sam_data['adrinverter'] inverter = inverters['Zigor__Sunzet_3_TL_US_240V__CEC_2011_'].copy() system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module=module_parameters['Name'], module_parameters=module_parameters, temperature_model_parameters=temp_model_params, inverter_parameters=inverter) return system @pytest.fixture def pvwatts_dc_snl_ac_system(cec_inverter_parameters): module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003} system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module_parameters=module_parameters, inverter_parameters=cec_inverter_parameters) return system @pytest.fixture(scope="function") def pvwatts_dc_pvwatts_ac_system(sapm_temperature_cs5p_220m): module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003} temp_model_params = sapm_temperature_cs5p_220m.copy() inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95} system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, inverter_parameters=inverter_parameters) return system @pytest.fixture(scope="function") def pvwatts_dc_pvwatts_ac_system_arrays(sapm_temperature_cs5p_220m): module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003} temp_model_params = sapm_temperature_cs5p_220m.copy() inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95} array_one = pvsystem.Array( mount=pvsystem.FixedMount(surface_tilt=32.2, surface_azimuth=180), module_parameters=module_parameters.copy(), temperature_model_parameters=temp_model_params.copy() ) array_two = pvsystem.Array( mount=pvsystem.FixedMount(surface_tilt=42.2, surface_azimuth=220), module_parameters=module_parameters.copy(), temperature_model_parameters=temp_model_params.copy() ) system = PVSystem( arrays=[array_one, array_two], inverter_parameters=inverter_parameters) return system @pytest.fixture(scope="function") def pvwatts_dc_pvwatts_ac_faiman_temp_system(): module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003} temp_model_params = {'u0': 25.0, 'u1': 6.84} inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95} system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, inverter_parameters=inverter_parameters) return system @pytest.fixture(scope="function") def pvwatts_dc_pvwatts_ac_pvsyst_temp_system(): module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003} temp_model_params = {'u_c': 29.0, 'u_v': 0.0, 'module_efficiency': 0.1, 'alpha_absorption': 0.9} inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95} system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, inverter_parameters=inverter_parameters) return system @pytest.fixture(scope="function") def pvwatts_dc_pvwatts_ac_fuentes_temp_system(): module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003} temp_model_params = {'noct_installed': 45} inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95} system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, inverter_parameters=inverter_parameters) return system @pytest.fixture(scope="function") def pvwatts_dc_pvwatts_ac_noct_sam_temp_system(): module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003} temp_model_params = {'noct': 45, 'module_efficiency': 0.2} inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95} system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, inverter_parameters=inverter_parameters) return system @pytest.fixture(scope="function") def system_no_aoi(cec_module_cs5p_220m, sapm_temperature_cs5p_220m, cec_inverter_parameters): module_parameters = cec_module_cs5p_220m.copy() module_parameters['EgRef'] = 1.121 module_parameters['dEgdT'] = -0.0002677 temp_model_params = sapm_temperature_cs5p_220m.copy() inverter_parameters = cec_inverter_parameters.copy() system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, inverter_parameters=inverter_parameters) return system @pytest.fixture def system_no_temp(cec_module_cs5p_220m, cec_inverter_parameters): module_parameters = cec_module_cs5p_220m.copy() module_parameters['EgRef'] = 1.121 module_parameters['dEgdT'] = -0.0002677 inverter_parameters = cec_inverter_parameters.copy() system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module_parameters=module_parameters, inverter_parameters=inverter_parameters) return system @pytest.fixture def location(): return Location(32.2, -111, altitude=700) @pytest.fixture def weather(): times = pd.date_range('20160101 1200-0700', periods=2, freq='6H') weather = pd.DataFrame({'ghi': [500, 0], 'dni': [800, 0], 'dhi': [100, 0]}, index=times) return weather @pytest.fixture def total_irrad(weather): return pd.DataFrame({'poa_global': [800., 500.], 'poa_direct': [500., 300.], 'poa_diffuse': [300., 200.]}, index=weather.index) @pytest.fixture(scope='function') def sapm_dc_snl_ac_system_Array(sapm_module_params, cec_inverter_parameters, sapm_temperature_cs5p_220m): module = 'Canadian_Solar_CS5P_220M___2009_' module_parameters = sapm_module_params.copy() temp_model_params = sapm_temperature_cs5p_220m.copy() array_one = pvsystem.Array(mount=pvsystem.FixedMount(surface_tilt=32, surface_azimuth=180), albedo=0.2, module=module, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, modules_per_string=1, strings=1) array_two = pvsystem.Array(mount=pvsystem.FixedMount(surface_tilt=15, surface_azimuth=180), albedo=0.2, module=module, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, modules_per_string=1, strings=1) return PVSystem(arrays=[array_one, array_two], inverter_parameters=cec_inverter_parameters) @pytest.fixture(scope='function') def sapm_dc_snl_ac_system_same_arrays(sapm_module_params, cec_inverter_parameters, sapm_temperature_cs5p_220m): """A system with two identical arrays.""" module = 'Canadian_Solar_CS5P_220M___2009_' module_parameters = sapm_module_params.copy() temp_model_params = sapm_temperature_cs5p_220m.copy() array_one = pvsystem.Array(mount=pvsystem.FixedMount(surface_tilt=32.2, surface_azimuth=180), module=module, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, modules_per_string=1, strings=1) array_two = pvsystem.Array(mount=pvsystem.FixedMount(surface_tilt=32.2, surface_azimuth=180), module=module, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, modules_per_string=1, strings=1) return PVSystem(arrays=[array_one, array_two], inverter_parameters=cec_inverter_parameters) def test_ModelChain_creation(sapm_dc_snl_ac_system, location): ModelChain(sapm_dc_snl_ac_system, location) def test_with_sapm(sapm_dc_snl_ac_system, location, weather): mc = ModelChain.with_sapm(sapm_dc_snl_ac_system, location) assert mc.dc_model == mc.sapm mc.run_model(weather) def test_with_pvwatts(pvwatts_dc_pvwatts_ac_system, location, weather): mc = ModelChain.with_pvwatts(pvwatts_dc_pvwatts_ac_system, location) assert mc.dc_model == mc.pvwatts_dc assert mc.temperature_model == mc.sapm_temp mc.run_model(weather) def test_run_model_with_irradiance(sapm_dc_snl_ac_system, location): mc = ModelChain(sapm_dc_snl_ac_system, location) times = pd.date_range('20160101 1200-0700', periods=2, freq='6H') irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150}, index=times) ac = mc.run_model(irradiance).results.ac expected = pd.Series(np.array([187.80746494643176, -0.02]), index=times) assert_series_equal(ac, expected) @pytest.fixture(scope='function') def multi_array_sapm_dc_snl_ac_system( sapm_temperature_cs5p_220m, sapm_module_params, cec_inverter_parameters): module_parameters = sapm_module_params temp_model_parameters = sapm_temperature_cs5p_220m.copy() inverter_parameters = cec_inverter_parameters array_one = pvsystem.Array( mount=pvsystem.FixedMount(surface_tilt=32.2, surface_azimuth=180), module_parameters=module_parameters, temperature_model_parameters=temp_model_parameters ) array_two = pvsystem.Array( mount=pvsystem.FixedMount(surface_tilt=32.2, surface_azimuth=220), module_parameters=module_parameters, temperature_model_parameters=temp_model_parameters ) two_array_system = PVSystem( arrays=[array_one, array_two], inverter_parameters=inverter_parameters ) array_one_system = PVSystem( arrays=[array_one], inverter_parameters=inverter_parameters ) array_two_system = PVSystem( arrays=[array_two], inverter_parameters=inverter_parameters ) return {'two_array_system': two_array_system, 'array_one_system': array_one_system, 'array_two_system': array_two_system} def test_run_model_from_irradiance_arrays_no_loss( multi_array_sapm_dc_snl_ac_system, location): mc_both = ModelChain( multi_array_sapm_dc_snl_ac_system['two_array_system'], location, aoi_model='no_loss', spectral_model='no_loss', losses_model='no_loss' ) mc_one = ModelChain( multi_array_sapm_dc_snl_ac_system['array_one_system'], location, aoi_model='no_loss', spectral_model='no_loss', losses_model='no_loss' ) mc_two = ModelChain( multi_array_sapm_dc_snl_ac_system['array_two_system'], location, aoi_model='no_loss', spectral_model='no_loss', losses_model='no_loss' ) times = pd.date_range('20160101 1200-0700', periods=2, freq='6H') irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150}, index=times) mc_one.run_model(irradiance) mc_two.run_model(irradiance) mc_both.run_model(irradiance) assert_frame_equal( mc_both.results.dc[0], mc_one.results.dc ) assert_frame_equal( mc_both.results.dc[1], mc_two.results.dc ) @pytest.mark.parametrize("input_type", [tuple, list]) def test_run_model_from_irradiance_arrays_no_loss_input_type( multi_array_sapm_dc_snl_ac_system, location, input_type): mc_both = ModelChain( multi_array_sapm_dc_snl_ac_system['two_array_system'], location, aoi_model='no_loss', spectral_model='no_loss', losses_model='no_loss' ) mc_one = ModelChain( multi_array_sapm_dc_snl_ac_system['array_one_system'], location, aoi_model='no_loss', spectral_model='no_loss', losses_model='no_loss' ) mc_two = ModelChain( multi_array_sapm_dc_snl_ac_system['array_two_system'], location, aoi_model='no_loss', spectral_model='no_loss', losses_model='no_loss' ) times = pd.date_range('20160101 1200-0700', periods=2, freq='6H') irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150}, index=times) mc_one.run_model(irradiance) mc_two.run_model(irradiance) mc_both.run_model(input_type((irradiance, irradiance))) assert_frame_equal( mc_both.results.dc[0], mc_one.results.dc ) assert_frame_equal( mc_both.results.dc[1], mc_two.results.dc ) @pytest.mark.parametrize('inverter', ['adr']) def test_ModelChain_invalid_inverter_params_arrays( inverter, sapm_dc_snl_ac_system_same_arrays, location, adr_inverter_parameters): inverter_params = {'adr': adr_inverter_parameters} sapm_dc_snl_ac_system_same_arrays.inverter_parameters = \ inverter_params[inverter] with pytest.raises(ValueError, match=r'adr inverter function cannot'): ModelChain(sapm_dc_snl_ac_system_same_arrays, location) @pytest.mark.parametrize("input_type", [tuple, list]) def test_prepare_inputs_multi_weather( sapm_dc_snl_ac_system_Array, location, input_type): times = pd.date_range(start='20160101 1200-0700', end='20160101 1800-0700', freq='6H') mc = ModelChain(sapm_dc_snl_ac_system_Array, location) weather = pd.DataFrame({'ghi': 1, 'dhi': 1, 'dni': 1}, index=times) mc.prepare_inputs(input_type((weather, weather))) num_arrays = sapm_dc_snl_ac_system_Array.num_arrays assert len(mc.results.total_irrad) == num_arrays def test_prepare_inputs_no_irradiance(sapm_dc_snl_ac_system, location): mc = ModelChain(sapm_dc_snl_ac_system, location) weather = pd.DataFrame() with pytest.raises(ValueError): mc.prepare_inputs(weather) def test_prepare_inputs_arrays_one_missing_irradiance( sapm_dc_snl_ac_system_Array, location): """If any of the input DataFrames is missing a column then a ValueError is raised.""" mc = ModelChain(sapm_dc_snl_ac_system_Array, location) weather = pd.DataFrame( {'ghi': [1], 'dhi': [1], 'dni': [1]} ) weather_incomplete = pd.DataFrame( {'ghi': [1], 'dhi': [1]} ) with pytest.raises(ValueError, match=r"Incomplete input data\. .*"): mc.prepare_inputs((weather, weather_incomplete)) with pytest.raises(ValueError, match=r"Incomplete input data\. .*"): mc.prepare_inputs((weather_incomplete, weather)) @pytest.mark.parametrize("input_type", [tuple, list]) def test_prepare_inputs_weather_wrong_length( sapm_dc_snl_ac_system_Array, location, input_type): mc = ModelChain(sapm_dc_snl_ac_system_Array, location) weather = pd.DataFrame({'ghi': [1], 'dhi': [1], 'dni': [1]}) with pytest.raises(ValueError, match="Input must be same length as number of Arrays " r"in system\. Expected 2, got 1\."): mc.prepare_inputs(input_type((weather,))) with pytest.raises(ValueError, match="Input must be same length as number of Arrays " r"in system\. Expected 2, got 3\."): mc.prepare_inputs(input_type((weather, weather, weather))) def test_ModelChain_times_error_arrays(sapm_dc_snl_ac_system_Array, location): """ModelChain.times is assigned a single index given multiple weather DataFrames. """ error_str = r"Input DataFrames must have same index\." mc = ModelChain(sapm_dc_snl_ac_system_Array, location) irradiance = {'ghi': [1, 2], 'dhi': [1, 2], 'dni': [1, 2]} times_one = pd.date_range(start='1/1/2020', freq='6H', periods=2) times_two = pd.date_range(start='1/1/2020 00:15', freq='6H', periods=2) weather_one = pd.DataFrame(irradiance, index=times_one) weather_two = pd.DataFrame(irradiance, index=times_two) with pytest.raises(ValueError, match=error_str): mc.prepare_inputs((weather_one, weather_two)) # test with overlapping, but differently sized indices. times_three = pd.date_range(start='1/1/2020', freq='6H', periods=3) irradiance_three = irradiance irradiance_three['ghi'].append(3) irradiance_three['dhi'].append(3) irradiance_three['dni'].append(3) weather_three = pd.DataFrame(irradiance_three, index=times_three) with pytest.raises(ValueError, match=error_str): mc.prepare_inputs((weather_one, weather_three)) def test_ModelChain_times_arrays(sapm_dc_snl_ac_system_Array, location): """ModelChain.times is assigned a single index given multiple weather DataFrames. """ mc = ModelChain(sapm_dc_snl_ac_system_Array, location) irradiance_one = {'ghi': [1, 2], 'dhi': [1, 2], 'dni': [1, 2]} irradiance_two = {'ghi': [2, 1], 'dhi': [2, 1], 'dni': [2, 1]} times = pd.date_range(start='1/1/2020', freq='6H', periods=2) weather_one = pd.DataFrame(irradiance_one, index=times) weather_two = pd.DataFrame(irradiance_two, index=times) mc.prepare_inputs((weather_one, weather_two)) assert mc.results.times.equals(times) mc = ModelChain(sapm_dc_snl_ac_system_Array, location) mc.prepare_inputs(weather_one) assert mc.results.times.equals(times) @pytest.mark.parametrize("missing", ['dhi', 'ghi', 'dni']) def test_prepare_inputs_missing_irrad_component( sapm_dc_snl_ac_system, location, missing): mc = ModelChain(sapm_dc_snl_ac_system, location) weather = pd.DataFrame({'dhi': [1, 2], 'dni': [1, 2], 'ghi': [1, 2]}) weather.drop(columns=missing, inplace=True) with pytest.raises(ValueError): mc.prepare_inputs(weather) @pytest.mark.parametrize('ac_model', ['sandia', 'pvwatts']) @pytest.mark.parametrize("input_type", [tuple, list]) def test_run_model_arrays_weather(sapm_dc_snl_ac_system_same_arrays, pvwatts_dc_pvwatts_ac_system_arrays, location, ac_model, input_type): system = {'sandia': sapm_dc_snl_ac_system_same_arrays, 'pvwatts': pvwatts_dc_pvwatts_ac_system_arrays} mc = ModelChain(system[ac_model], location, aoi_model='no_loss', spectral_model='no_loss') times = pd.date_range('20200101 1200-0700', periods=2, freq='2H') weather_one = pd.DataFrame({'dni': [900, 800], 'ghi': [600, 500], 'dhi': [150, 100]}, index=times) weather_two = pd.DataFrame({'dni': [500, 400], 'ghi': [300, 200], 'dhi': [75, 65]}, index=times) mc.run_model(input_type((weather_one, weather_two))) assert (mc.results.dc[0] != mc.results.dc[1]).all().all() assert not mc.results.ac.empty def test_run_model_perez(sapm_dc_snl_ac_system, location): mc = ModelChain(sapm_dc_snl_ac_system, location, transposition_model='perez') times = pd.date_range('20160101 1200-0700', periods=2, freq='6H') irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150}, index=times) ac = mc.run_model(irradiance).results.ac expected = pd.Series(np.array([187.94295642, -2.00000000e-02]), index=times) assert_series_equal(ac, expected) def test_run_model_gueymard_perez(sapm_dc_snl_ac_system, location): mc = ModelChain(sapm_dc_snl_ac_system, location, airmass_model='gueymard1993', transposition_model='perez') times = pd.date_range('20160101 1200-0700', periods=2, freq='6H') irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150}, index=times) ac = mc.run_model(irradiance).results.ac expected = pd.Series(np.array([187.94317405, -2.00000000e-02]), index=times) assert_series_equal(ac, expected) def test_run_model_with_weather_sapm_temp(sapm_dc_snl_ac_system, location, weather, mocker): # test with sapm cell temperature model weather['wind_speed'] = 5 weather['temp_air'] = 10 mc = ModelChain(sapm_dc_snl_ac_system, location) mc.temperature_model = 'sapm' m_sapm = mocker.spy(sapm_dc_snl_ac_system, 'get_cell_temperature') mc.run_model(weather) assert m_sapm.call_count == 1 # assert_called_once_with cannot be used with series, so need to use # assert_series_equal on call_args assert_series_equal(m_sapm.call_args[0][1], weather['temp_air']) # temp assert_series_equal(m_sapm.call_args[0][2], weather['wind_speed']) # wind assert m_sapm.call_args[1]['model'] == 'sapm' assert not mc.results.ac.empty def test_run_model_with_weather_pvsyst_temp(sapm_dc_snl_ac_system, location, weather, mocker): # test with pvsyst cell temperature model weather['wind_speed'] = 5 weather['temp_air'] = 10 sapm_dc_snl_ac_system.arrays[0].racking_model = 'freestanding' sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters = \ temperature._temperature_model_params('pvsyst', 'freestanding') mc = ModelChain(sapm_dc_snl_ac_system, location) mc.temperature_model = 'pvsyst' m_pvsyst = mocker.spy(sapm_dc_snl_ac_system, 'get_cell_temperature') mc.run_model(weather) assert m_pvsyst.call_count == 1 assert_series_equal(m_pvsyst.call_args[0][1], weather['temp_air']) assert_series_equal(m_pvsyst.call_args[0][2], weather['wind_speed']) assert m_pvsyst.call_args[1]['model'] == 'pvsyst' assert not mc.results.ac.empty def test_run_model_with_weather_faiman_temp(sapm_dc_snl_ac_system, location, weather, mocker): # test with faiman cell temperature model weather['wind_speed'] = 5 weather['temp_air'] = 10 sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters = { 'u0': 25.0, 'u1': 6.84 } mc = ModelChain(sapm_dc_snl_ac_system, location) mc.temperature_model = 'faiman' m_faiman = mocker.spy(sapm_dc_snl_ac_system, 'get_cell_temperature') mc.run_model(weather) assert m_faiman.call_count == 1 assert_series_equal(m_faiman.call_args[0][1], weather['temp_air']) assert_series_equal(m_faiman.call_args[0][2], weather['wind_speed']) assert m_faiman.call_args[1]['model'] == 'faiman' assert not mc.results.ac.empty def test_run_model_with_weather_fuentes_temp(sapm_dc_snl_ac_system, location, weather, mocker): weather['wind_speed'] = 5 weather['temp_air'] = 10 sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters = { 'noct_installed': 45, 'surface_tilt': 30, } mc = ModelChain(sapm_dc_snl_ac_system, location) mc.temperature_model = 'fuentes' m_fuentes = mocker.spy(sapm_dc_snl_ac_system, 'get_cell_temperature') mc.run_model(weather) assert m_fuentes.call_count == 1 assert_series_equal(m_fuentes.call_args[0][1], weather['temp_air']) assert_series_equal(m_fuentes.call_args[0][2], weather['wind_speed']) assert m_fuentes.call_args[1]['model'] == 'fuentes' assert not mc.results.ac.empty def test_run_model_with_weather_noct_sam_temp(sapm_dc_snl_ac_system, location, weather, mocker): weather['wind_speed'] = 5 weather['temp_air'] = 10 sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters = { 'noct': 45, 'module_efficiency': 0.2 } mc = ModelChain(sapm_dc_snl_ac_system, location) mc.temperature_model = 'noct_sam' m_noct_sam = mocker.spy(sapm_dc_snl_ac_system, 'get_cell_temperature') mc.run_model(weather) assert m_noct_sam.call_count == 1 assert_series_equal(m_noct_sam.call_args[0][1], weather['temp_air']) assert_series_equal(m_noct_sam.call_args[0][2], weather['wind_speed']) # check that effective_irradiance was used assert m_noct_sam.call_args[1] == { 'effective_irradiance': mc.results.effective_irradiance, 'model': 'noct_sam'} def test_run_model_tracker(sapm_dc_snl_ac_system, location, weather, mocker): with pytest.warns(pvlibDeprecationWarning): system = SingleAxisTracker( module_parameters=sapm_dc_snl_ac_system.arrays[0].module_parameters, # noqa: E501 temperature_model_parameters=( sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters ), inverter_parameters=sapm_dc_snl_ac_system.inverter_parameters) mocker.spy(system, 'singleaxis') mc = ModelChain(system, location) mc.run_model(weather) assert system.singleaxis.call_count == 1 assert (mc.results.tracking.columns == ['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt']).all() assert mc.results.ac[0] > 0 assert np.isnan(mc.results.ac[1]) assert isinstance(mc.results.dc, pd.DataFrame) def test_run_model_tracker_list( sapm_dc_snl_ac_system, location, weather, mocker): with pytest.warns(pvlibDeprecationWarning): system = SingleAxisTracker( module_parameters=sapm_dc_snl_ac_system.arrays[0].module_parameters, # noqa: E501 temperature_model_parameters=( sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters ), inverter_parameters=sapm_dc_snl_ac_system.inverter_parameters) mocker.spy(system, 'singleaxis') mc = ModelChain(system, location) mc.run_model([weather]) assert system.singleaxis.call_count == 1 assert (mc.results.tracking.columns == ['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt']).all() assert mc.results.ac[0] > 0 assert np.isnan(mc.results.ac[1]) assert isinstance(mc.results.dc, tuple) assert len(mc.results.dc) == 1 def test__assign_total_irrad(sapm_dc_snl_ac_system, location, weather, total_irrad): data = pd.concat([weather, total_irrad], axis=1) mc = ModelChain(sapm_dc_snl_ac_system, location) mc._assign_total_irrad(data) assert_frame_equal(mc.results.total_irrad, total_irrad) def test_prepare_inputs_from_poa(sapm_dc_snl_ac_system, location, weather, total_irrad): data = pd.concat([weather, total_irrad], axis=1) mc = ModelChain(sapm_dc_snl_ac_system, location) mc.prepare_inputs_from_poa(data) weather_expected = weather.copy() weather_expected['temp_air'] = 20 weather_expected['wind_speed'] = 0 # order as expected weather_expected = weather_expected[ ['ghi', 'dhi', 'dni', 'wind_speed', 'temp_air']] # weather attribute assert_frame_equal(mc.results.weather, weather_expected) # total_irrad attribute assert_frame_equal(mc.results.total_irrad, total_irrad) assert not pd.isnull(mc.results.solar_position.index[0]) @pytest.mark.parametrize("input_type", [tuple, list]) def test_prepare_inputs_from_poa_multi_data( sapm_dc_snl_ac_system_Array, location, total_irrad, weather, input_type): mc = ModelChain(sapm_dc_snl_ac_system_Array, location) poa = pd.concat([weather, total_irrad], axis=1) mc.prepare_inputs_from_poa(input_type((poa, poa))) num_arrays = sapm_dc_snl_ac_system_Array.num_arrays assert len(mc.results.total_irrad) == num_arrays @pytest.mark.parametrize("input_type", [tuple, list]) def test_prepare_inputs_from_poa_wrong_number_arrays( sapm_dc_snl_ac_system_Array, location, total_irrad, weather, input_type): len_error = r"Input must be same length as number of Arrays in system\. " \ r"Expected 2, got [0-9]+\." type_error = r"Input must be a tuple of length 2, got .*\." mc = ModelChain(sapm_dc_snl_ac_system_Array, location) poa = pd.concat([weather, total_irrad], axis=1) with pytest.raises(TypeError, match=type_error): mc.prepare_inputs_from_poa(poa) with pytest.raises(ValueError, match=len_error): mc.prepare_inputs_from_poa(input_type((poa,))) with pytest.raises(ValueError, match=len_error): mc.prepare_inputs_from_poa(input_type((poa, poa, poa))) def test_prepare_inputs_from_poa_arrays_different_indices( sapm_dc_snl_ac_system_Array, location, total_irrad, weather): error_str = r"Input DataFrames must have same index\." mc = ModelChain(sapm_dc_snl_ac_system_Array, location) poa = pd.concat([weather, total_irrad], axis=1) with pytest.raises(ValueError, match=error_str): mc.prepare_inputs_from_poa((poa, poa.shift(periods=1, freq='6H'))) def test_prepare_inputs_from_poa_arrays_missing_column( sapm_dc_snl_ac_system_Array, location, weather, total_irrad): mc = ModelChain(sapm_dc_snl_ac_system_Array, location) poa = pd.concat([weather, total_irrad], axis=1) with pytest.raises(ValueError, match=r"Incomplete input data\. " r"Data needs to contain .*\. " r"Detected data in element 1 " r"contains: .*"): mc.prepare_inputs_from_poa((poa, poa.drop(columns='poa_global'))) def test__prepare_temperature(sapm_dc_snl_ac_system, location, weather, total_irrad): data = weather.copy() data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss') # prepare_temperature expects mc.total_irrad and mc.results.weather # to be set mc._assign_weather(data) mc._assign_total_irrad(data) mc._prepare_temperature(data) expected = pd.Series([48.928025, 38.080016], index=data.index) assert_series_equal(mc.results.cell_temperature, expected) data['module_temperature'] = [40., 30.] mc._prepare_temperature(data) expected = pd.Series([42.4, 31.5], index=data.index) assert_series_equal(mc.results.cell_temperature, expected) data['cell_temperature'] = [50., 35.] mc._prepare_temperature(data) assert_series_equal(mc.results.cell_temperature, data['cell_temperature']) def test__prepare_temperature_len1_weather_tuple( sapm_dc_snl_ac_system, location, weather, total_irrad): # GH 1192 weather['module_temperature'] = [40., 30.] data = weather.copy() mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss') mc.run_model([data]) expected = pd.Series([42.617244212941394, 30.0], index=data.index) assert_series_equal(mc.results.cell_temperature[0], expected) data = weather.copy().rename( columns={ "ghi": "poa_global", "dhi": "poa_diffuse", "dni": "poa_direct"} ) mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss') mc.run_model_from_poa([data]) expected = pd.Series([41.5, 30.0], index=data.index) assert_series_equal(mc.results.cell_temperature[0], expected) data = weather.copy()[["module_temperature", "ghi"]].rename( columns={"ghi": "effective_irradiance"} ) mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss') mc.run_model_from_effective_irradiance([data]) expected = pd.Series([41.5, 30.0], index=data.index) assert_series_equal(mc.results.cell_temperature[0], expected) def test__prepare_temperature_arrays_weather(sapm_dc_snl_ac_system_same_arrays, location, weather, total_irrad): data = weather.copy() data[['poa_global', 'poa_direct', 'poa_diffuse']] = total_irrad data_two = data.copy() mc = ModelChain(sapm_dc_snl_ac_system_same_arrays, location, aoi_model='no_loss', spectral_model='no_loss') # prepare_temperature expects mc.results.total_irrad and mc.results.weather # to be set mc._assign_weather((data, data_two)) mc._assign_total_irrad((data, data_two)) mc._prepare_temperature((data, data_two)) expected = pd.Series([48.928025, 38.080016], index=data.index) assert_series_equal(mc.results.cell_temperature[0], expected) assert_series_equal(mc.results.cell_temperature[1], expected) data['module_temperature'] = [40., 30.] mc._prepare_temperature((data, data_two)) expected = pd.Series([42.4, 31.5], index=data.index) assert (mc.results.cell_temperature[1] != expected).all() assert_series_equal(mc.results.cell_temperature[0], expected) data['cell_temperature'] = [50., 35.] mc._prepare_temperature((data, data_two)) assert_series_equal( mc.results.cell_temperature[0], data['cell_temperature']) data_two['module_temperature'] = [40., 30.] mc._prepare_temperature((data, data_two)) assert_series_equal(mc.results.cell_temperature[1], expected) assert_series_equal( mc.results.cell_temperature[0], data['cell_temperature']) data_two['cell_temperature'] = [10.0, 20.0] mc._prepare_temperature((data, data_two)) assert_series_equal( mc.results.cell_temperature[1], data_two['cell_temperature']) assert_series_equal( mc.results.cell_temperature[0], data['cell_temperature']) @pytest.mark.parametrize('temp_params,temp_model', [({'a': -3.47, 'b': -.0594, 'deltaT': 3}, ModelChain.sapm_temp), ({'u_c': 29.0, 'u_v': 0}, ModelChain.pvsyst_temp), ({'u0': 25.0, 'u1': 6.84}, ModelChain.faiman_temp), ({'noct_installed': 45}, ModelChain.fuentes_temp), ({'noct': 45, 'module_efficiency': 0.2}, ModelChain.noct_sam_temp)]) def test_temperature_models_arrays_multi_weather( temp_params, temp_model, sapm_dc_snl_ac_system_same_arrays, location, weather, total_irrad): for array in sapm_dc_snl_ac_system_same_arrays.arrays: array.temperature_model_parameters = temp_params # set air temp so it does not default to the same value for both arrays weather['temp_air'] = 25 weather_one = weather weather_two = weather.copy() * 0.5 mc = ModelChain(sapm_dc_snl_ac_system_same_arrays, location, aoi_model='no_loss', spectral_model='no_loss') mc.prepare_inputs((weather_one, weather_two)) temp_model(mc) assert (mc.results.cell_temperature[0] != mc.results.cell_temperature[1]).all() def test_run_model_solar_position_weather( pvwatts_dc_pvwatts_ac_system, location, weather, mocker): mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, aoi_model='no_loss', spectral_model='no_loss') weather['pressure'] = 90000 weather['temp_air'] = 25 m = mocker.spy(location, 'get_solarposition') mc.run_model(weather) # assert_called_once_with cannot be used with series, so need to use # assert_series_equal on call_args assert_series_equal(m.call_args[1]['temperature'], weather['temp_air']) assert_series_equal(m.call_args[1]['pressure'], weather['pressure']) def test_run_model_from_poa(sapm_dc_snl_ac_system, location, total_irrad): mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss') ac = mc.run_model_from_poa(total_irrad).results.ac expected = pd.Series(np.array([149.280238, 96.678385]), index=total_irrad.index) assert_series_equal(ac, expected) @pytest.mark.parametrize("input_type", [tuple, list]) def test_run_model_from_poa_arrays(sapm_dc_snl_ac_system_Array, location, weather, total_irrad, input_type): data = weather.copy() data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad mc = ModelChain(sapm_dc_snl_ac_system_Array, location, aoi_model='no_loss', spectral_model='no_loss') mc.run_model_from_poa(input_type((data, data))) # arrays have different orientation, but should give same dc power # because we are the same passing POA irradiance and air # temperature. assert_frame_equal(mc.results.dc[0], mc.results.dc[1]) def test_run_model_from_poa_arrays_solar_position_weather( sapm_dc_snl_ac_system_Array, location, weather, total_irrad, mocker): data = weather.copy() data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad data['pressure'] = 90000 data['temp_air'] = 25 data2 = data.copy() data2['pressure'] = 95000 data2['temp_air'] = 30 mc = ModelChain(sapm_dc_snl_ac_system_Array, location, aoi_model='no_loss', spectral_model='no_loss') m = mocker.spy(location, 'get_solarposition') mc.run_model_from_poa((data, data2)) # mc uses only the first weather data for solar position corrections assert_series_equal(m.call_args[1]['temperature'], data['temp_air']) assert_series_equal(m.call_args[1]['pressure'], data['pressure']) def test_run_model_from_poa_tracking(sapm_dc_snl_ac_system, location, total_irrad): with pytest.warns(pvlibDeprecationWarning): system = SingleAxisTracker( module_parameters=sapm_dc_snl_ac_system.arrays[0].module_parameters, # noqa: E501 temperature_model_parameters=( sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters ), inverter_parameters=sapm_dc_snl_ac_system.inverter_parameters) mc = ModelChain(system, location, aoi_model='no_loss', spectral_model='no_loss') ac = mc.run_model_from_poa(total_irrad).results.ac assert (mc.results.tracking.columns == ['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt']).all() expected = pd.Series(np.array([149.280238, 96.678385]), index=total_irrad.index) assert_series_equal(ac, expected) @pytest.mark.parametrize("input_type", [lambda x: x[0], tuple, list]) def test_run_model_from_effective_irradiance(sapm_dc_snl_ac_system, location, weather, total_irrad, input_type): data = weather.copy() data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad data['effective_irradiance'] = data['poa_global'] mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss') ac = mc.run_model_from_effective_irradiance(input_type((data,))).results.ac expected = pd.Series(np.array([149.280238, 96.678385]), index=data.index) assert_series_equal(ac, expected) @pytest.mark.parametrize("input_type", [tuple, list]) def test_run_model_from_effective_irradiance_multi_array( sapm_dc_snl_ac_system_Array, location, weather, total_irrad, input_type): data = weather.copy() data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad data['effective_irradiance'] = data['poa_global'] mc = ModelChain(sapm_dc_snl_ac_system_Array, location, aoi_model='no_loss', spectral_model='no_loss') mc.run_model_from_effective_irradiance(input_type((data, data))) # arrays have different orientation, but should give same dc power # because we are the same passing POA irradiance and air # temperature. assert_frame_equal(mc.results.dc[0], mc.results.dc[1]) @pytest.mark.parametrize("input_type", [lambda x: x[0], tuple, list]) def test_run_model_from_effective_irradiance_no_poa_global( sapm_dc_snl_ac_system, location, weather, total_irrad, input_type): data = weather.copy() data['effective_irradiance'] = total_irrad['poa_global'] mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss') ac = mc.run_model_from_effective_irradiance(input_type((data,))).results.ac expected = pd.Series(np.array([149.280238, 96.678385]), index=data.index) assert_series_equal(ac, expected) def test_run_model_from_effective_irradiance_poa_global_differs( sapm_dc_snl_ac_system, location, weather, total_irrad): data = weather.copy() data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad data['effective_irradiance'] = data['poa_global'] * 0.8 mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss') ac = mc.run_model_from_effective_irradiance(data).results.ac expected = pd.Series(np.array([118.302801, 76.099841]), index=data.index) assert_series_equal(ac, expected) @pytest.mark.parametrize("input_type", [tuple, list]) def test_run_model_from_effective_irradiance_arrays_error( sapm_dc_snl_ac_system_Array, location, weather, total_irrad, input_type): data = weather.copy() data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad data['effetive_irradiance'] = data['poa_global'] mc = ModelChain(sapm_dc_snl_ac_system_Array, location) len_error = r"Input must be same length as number of Arrays in system\. " \ r"Expected 2, got [0-9]+\." type_error = r"Input must be a tuple of length 2, got DataFrame\." with pytest.raises(TypeError, match=type_error): mc.run_model_from_effective_irradiance(data) with pytest.raises(ValueError, match=len_error): mc.run_model_from_effective_irradiance(input_type((data,))) with pytest.raises(ValueError, match=len_error): mc.run_model_from_effective_irradiance(input_type((data, data, data))) with pytest.raises(ValueError, match=r"Input DataFrames must have same index\."): mc.run_model_from_effective_irradiance( (data, data.shift(periods=1, freq='6H')) ) @pytest.mark.parametrize("input_type", [tuple, list]) def test_run_model_from_effective_irradiance_arrays( sapm_dc_snl_ac_system_Array, location, weather, total_irrad, input_type): data = weather.copy() data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad data['effective_irradiance'] = data['poa_global'] data['cell_temperature'] = 40 mc = ModelChain(sapm_dc_snl_ac_system_Array, location) mc.run_model_from_effective_irradiance(input_type((data, data))) # arrays have different orientation, but should give same dc power # because we are the same passing effective irradiance and cell # temperature. assert_frame_equal(mc.results.dc[0], mc.results.dc[1]) # test that unequal inputs create unequal results data_two = data.copy() data_two['effective_irradiance'] = data['poa_global'] * 0.5 mc.run_model_from_effective_irradiance(input_type((data, data_two))) assert (mc.results.dc[0] != mc.results.dc[1]).all().all() def test_run_model_from_effective_irradiance_minimal_input( sapm_dc_snl_ac_system, sapm_dc_snl_ac_system_Array, location, total_irrad): data = pd.DataFrame({'effective_irradiance': total_irrad['poa_global'], 'cell_temperature': 40}, index=total_irrad.index) mc = ModelChain(sapm_dc_snl_ac_system, location) mc.run_model_from_effective_irradiance(data) # make sure, for a single Array, the result is the correct type and value assert_series_equal(mc.results.cell_temperature, data['cell_temperature']) assert not mc.results.dc.empty assert not mc.results.ac.empty # test with multiple arrays mc = ModelChain(sapm_dc_snl_ac_system_Array, location) mc.run_model_from_effective_irradiance((data, data)) assert_frame_equal(mc.results.dc[0], mc.results.dc[1]) assert not mc.results.ac.empty def test_run_model_singleton_weather_single_array(cec_dc_snl_ac_system, location, weather): mc = ModelChain(cec_dc_snl_ac_system, location, aoi_model="no_loss", spectral_model="no_loss") mc.run_model([weather]) assert isinstance(mc.results.weather, tuple) assert isinstance(mc.results.total_irrad, tuple) assert isinstance(mc.results.aoi, tuple) assert isinstance(mc.results.aoi_modifier, tuple) assert isinstance(mc.results.spectral_modifier, tuple) assert isinstance(mc.results.effective_irradiance, tuple) assert isinstance(mc.results.dc, tuple) assert isinstance(mc.results.cell_temperature, tuple) assert len(mc.results.cell_temperature) == 1 assert isinstance(mc.results.cell_temperature[0], pd.Series) def test_run_model_from_poa_singleton_weather_single_array( sapm_dc_snl_ac_system, location, total_irrad): mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss') ac = mc.run_model_from_poa([total_irrad]).results.ac expected = pd.Series(np.array([149.280238, 96.678385]), index=total_irrad.index) assert isinstance(mc.results.weather, tuple) assert isinstance(mc.results.cell_temperature, tuple) assert len(mc.results.cell_temperature) == 1 assert isinstance(mc.results.cell_temperature[0], pd.Series) assert_series_equal(ac, expected) def test_run_model_from_effective_irradiance_weather_single_array( sapm_dc_snl_ac_system, location, weather, total_irrad): data = weather.copy() data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad data['effective_irradiance'] = data['poa_global'] mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss') ac = mc.run_model_from_effective_irradiance([data]).results.ac expected = pd.Series(np.array([149.280238, 96.678385]), index=data.index) assert isinstance(mc.results.weather, tuple) assert isinstance(mc.results.cell_temperature, tuple) assert len(mc.results.cell_temperature) == 1 assert isinstance(mc.results.cell_temperature[0], pd.Series) assert isinstance(mc.results.dc, tuple) assert len(mc.results.dc) == 1 assert isinstance(mc.results.dc[0], pd.DataFrame) assert_series_equal(ac, expected) def poadc(mc): mc.results.dc = mc.results.total_irrad['poa_global'] * 0.2 mc.results.dc.name = None # assert_series_equal will fail without this @pytest.mark.parametrize('dc_model', [ 'sapm', 'cec', 'desoto', 'pvsyst', 'singlediode', 'pvwatts_dc']) def test_infer_dc_model(sapm_dc_snl_ac_system, cec_dc_snl_ac_system, pvsyst_dc_snl_ac_system, pvwatts_dc_pvwatts_ac_system, location, dc_model, weather, mocker): dc_systems = {'sapm': sapm_dc_snl_ac_system, 'cec': cec_dc_snl_ac_system, 'desoto': cec_dc_snl_ac_system, 'pvsyst': pvsyst_dc_snl_ac_system, 'singlediode': cec_dc_snl_ac_system, 'pvwatts_dc': pvwatts_dc_pvwatts_ac_system} dc_model_function = {'sapm': 'sapm', 'cec': 'calcparams_cec', 'desoto': 'calcparams_desoto', 'pvsyst': 'calcparams_pvsyst', 'singlediode': 'calcparams_desoto', 'pvwatts_dc': 'pvwatts_dc'} temp_model_function = {'sapm': 'sapm', 'cec': 'sapm', 'desoto': 'sapm', 'pvsyst': 'pvsyst', 'singlediode': 'sapm', 'pvwatts_dc': 'sapm'} temp_model_params = {'sapm': {'a': -3.40641, 'b': -0.0842075, 'deltaT': 3}, 'pvsyst': {'u_c': 29.0, 'u_v': 0}} system = dc_systems[dc_model] for array in system.arrays: array.temperature_model_parameters = temp_model_params[ temp_model_function[dc_model]] # remove Adjust from model parameters for desoto, singlediode if dc_model in ['desoto', 'singlediode']: for array in system.arrays: array.module_parameters.pop('Adjust') m = mocker.spy(pvsystem, dc_model_function[dc_model]) mc = ModelChain(system, location, aoi_model='no_loss', spectral_model='no_loss', temperature_model=temp_model_function[dc_model]) mc.run_model(weather) assert m.call_count == 1 assert isinstance(mc.results.dc, (pd.Series, pd.DataFrame)) def test_infer_dc_model_incomplete(multi_array_sapm_dc_snl_ac_system, location): match = 'Could not infer DC model from the module_parameters attributes ' system = multi_array_sapm_dc_snl_ac_system['two_array_system'] system.arrays[0].module_parameters.pop('A0') with pytest.raises(ValueError, match=match): ModelChain(system, location) @pytest.mark.parametrize('dc_model', ['cec', 'desoto', 'pvsyst']) def test_singlediode_dc_arrays(location, dc_model, cec_dc_snl_ac_arrays, pvsyst_dc_snl_ac_arrays, weather): systems = {'cec': cec_dc_snl_ac_arrays, 'pvsyst': pvsyst_dc_snl_ac_arrays, 'desoto': cec_dc_snl_ac_arrays} temp_sapm = {'a': -3.40641, 'b': -0.0842075, 'deltaT': 3} temp_pvsyst = {'u_c': 29.0, 'u_v': 0} temp_model_params = {'cec': temp_sapm, 'desoto': temp_sapm, 'pvsyst': temp_pvsyst} temp_model = {'cec': 'sapm', 'desoto': 'sapm', 'pvsyst': 'pvsyst'} system = systems[dc_model] for array in system.arrays: array.temperature_model_parameters = temp_model_params[dc_model] if dc_model == 'desoto': for array in system.arrays: array.module_parameters.pop('Adjust') mc = ModelChain(system, location, aoi_model='no_loss', spectral_model='no_loss', temperature_model=temp_model[dc_model]) mc.run_model(weather) assert isinstance(mc.results.dc, tuple) assert len(mc.results.dc) == system.num_arrays for dc in mc.results.dc: assert isinstance(dc, (pd.Series, pd.DataFrame)) @pytest.mark.parametrize('dc_model', ['sapm', 'cec', 'cec_native']) def test_infer_spectral_model(location, sapm_dc_snl_ac_system, cec_dc_snl_ac_system, cec_dc_native_snl_ac_system, dc_model): dc_systems = {'sapm': sapm_dc_snl_ac_system, 'cec': cec_dc_snl_ac_system, 'cec_native': cec_dc_native_snl_ac_system} system = dc_systems[dc_model] mc = ModelChain(system, location, aoi_model='physical') assert isinstance(mc, ModelChain) @pytest.mark.parametrize('temp_model', [ 'sapm_temp', 'faiman_temp', 'pvsyst_temp', 'fuentes_temp', 'noct_sam_temp']) def test_infer_temp_model(location, sapm_dc_snl_ac_system, pvwatts_dc_pvwatts_ac_pvsyst_temp_system, pvwatts_dc_pvwatts_ac_faiman_temp_system, pvwatts_dc_pvwatts_ac_fuentes_temp_system, pvwatts_dc_pvwatts_ac_noct_sam_temp_system, temp_model): dc_systems = {'sapm_temp': sapm_dc_snl_ac_system, 'pvsyst_temp': pvwatts_dc_pvwatts_ac_pvsyst_temp_system, 'faiman_temp': pvwatts_dc_pvwatts_ac_faiman_temp_system, 'fuentes_temp': pvwatts_dc_pvwatts_ac_fuentes_temp_system, 'noct_sam_temp': pvwatts_dc_pvwatts_ac_noct_sam_temp_system} system = dc_systems[temp_model] mc = ModelChain(system, location, aoi_model='physical', spectral_model='no_loss') assert temp_model == mc.temperature_model.__name__ assert isinstance(mc, ModelChain) def test_infer_temp_model_invalid(location, sapm_dc_snl_ac_system): sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters.pop('a') with pytest.raises(ValueError): ModelChain(sapm_dc_snl_ac_system, location, aoi_model='physical', spectral_model='no_loss') def test_temperature_model_inconsistent(location, sapm_dc_snl_ac_system): with pytest.raises(ValueError): ModelChain(sapm_dc_snl_ac_system, location, aoi_model='physical', spectral_model='no_loss', temperature_model='pvsyst') def test_dc_model_user_func(pvwatts_dc_pvwatts_ac_system, location, weather, mocker): m = mocker.spy(sys.modules[__name__], 'poadc') mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model=poadc, aoi_model='no_loss', spectral_model='no_loss') mc.run_model(weather) assert m.call_count == 1 assert isinstance(mc.results.ac, (pd.Series, pd.DataFrame)) assert not mc.results.ac.empty def test_pvwatts_dc_multiple_strings(pvwatts_dc_pvwatts_ac_system, location, weather, mocker): system = pvwatts_dc_pvwatts_ac_system m = mocker.spy(system, 'scale_voltage_current_power') mc1 = ModelChain(system, location, aoi_model='no_loss', spectral_model='no_loss') mc1.run_model(weather) assert m.call_count == 1 system.arrays[0].modules_per_string = 2 mc2 = ModelChain(system, location, aoi_model='no_loss', spectral_model='no_loss') mc2.run_model(weather) assert isinstance(mc2.results.ac, (pd.Series, pd.DataFrame)) assert not mc2.results.ac.empty expected = pd.Series(data=[2., np.nan], index=mc2.results.dc.index, name='p_mp') assert_series_equal(mc2.results.dc / mc1.results.dc, expected) def acdc(mc): mc.results.ac = mc.results.dc @pytest.mark.parametrize('inverter_model', ['sandia', 'adr', 'pvwatts', 'sandia_multi', 'pvwatts_multi']) def test_ac_models(sapm_dc_snl_ac_system, cec_dc_adr_ac_system, pvwatts_dc_pvwatts_ac_system, cec_dc_snl_ac_arrays, pvwatts_dc_pvwatts_ac_system_arrays, location, inverter_model, weather, mocker): ac_systems = {'sandia': sapm_dc_snl_ac_system, 'sandia_multi': cec_dc_snl_ac_arrays, 'adr': cec_dc_adr_ac_system, 'pvwatts': pvwatts_dc_pvwatts_ac_system, 'pvwatts_multi': pvwatts_dc_pvwatts_ac_system_arrays} inverter_to_ac_model = { 'sandia': 'sandia', 'sandia_multi': 'sandia', 'adr': 'adr', 'pvwatts': 'pvwatts', 'pvwatts_multi': 'pvwatts'} ac_model = inverter_to_ac_model[inverter_model] system = ac_systems[inverter_model] mc_inferred = ModelChain(system, location, aoi_model='no_loss', spectral_model='no_loss') mc = ModelChain(system, location, ac_model=ac_model, aoi_model='no_loss', spectral_model='no_loss') # tests ModelChain.infer_ac_model assert mc_inferred.ac_model.__name__ == mc.ac_model.__name__ m = mocker.spy(inverter, inverter_model) mc.run_model(weather) assert m.call_count == 1 assert isinstance(mc.results.ac, pd.Series) assert not mc.results.ac.empty assert mc.results.ac[1] < 1 def test_ac_model_user_func(pvwatts_dc_pvwatts_ac_system, location, weather, mocker): m = mocker.spy(sys.modules[__name__], 'acdc') mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, ac_model=acdc, aoi_model='no_loss', spectral_model='no_loss') mc.run_model(weather) assert m.call_count == 1 assert_series_equal(mc.results.ac, mc.results.dc) assert not mc.results.ac.empty def test_ac_model_not_a_model(pvwatts_dc_pvwatts_ac_system, location, weather): exc_text = 'not a valid AC power model' with pytest.raises(ValueError, match=exc_text): ModelChain(pvwatts_dc_pvwatts_ac_system, location, ac_model='not_a_model', aoi_model='no_loss', spectral_model='no_loss') def test_infer_ac_model_invalid_params(location): # only the keys are relevant here, using arbitrary values module_parameters = {'pdc0': 1, 'gamma_pdc': 1} system = pvsystem.PVSystem( arrays=[pvsystem.Array( mount=pvsystem.FixedMount(0, 180), module_parameters=module_parameters )], inverter_parameters={'foo': 1, 'bar': 2} ) with pytest.raises(ValueError, match='could not infer AC model'): ModelChain(system, location) def constant_aoi_loss(mc): mc.results.aoi_modifier = 0.9 @pytest.mark.parametrize('aoi_model', [ 'sapm', 'ashrae', 'physical', 'martin_ruiz' ]) def test_aoi_models(sapm_dc_snl_ac_system, location, aoi_model, weather, mocker): mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm', aoi_model=aoi_model, spectral_model='no_loss') m = mocker.spy(sapm_dc_snl_ac_system, 'get_iam') mc.run_model(weather=weather) assert m.call_count == 1 assert isinstance(mc.results.ac, pd.Series) assert not mc.results.ac.empty assert mc.results.ac[0] > 150 and mc.results.ac[0] < 200 assert mc.results.ac[1] < 1 @pytest.mark.parametrize('aoi_model', [ 'sapm', 'ashrae', 'physical', 'martin_ruiz' ]) def test_aoi_models_singleon_weather_single_array( sapm_dc_snl_ac_system, location, aoi_model, weather): mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm', aoi_model=aoi_model, spectral_model='no_loss') mc.run_model(weather=[weather]) assert isinstance(mc.results.aoi_modifier, tuple) assert len(mc.results.aoi_modifier) == 1 assert isinstance(mc.results.ac, pd.Series) assert not mc.results.ac.empty assert mc.results.ac[0] > 150 and mc.results.ac[0] < 200 assert mc.results.ac[1] < 1 def test_aoi_model_no_loss(sapm_dc_snl_ac_system, location, weather): mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm', aoi_model='no_loss', spectral_model='no_loss') mc.run_model(weather) assert mc.results.aoi_modifier == 1.0 assert not mc.results.ac.empty assert mc.results.ac[0] > 150 and mc.results.ac[0] < 200 assert mc.results.ac[1] < 1 def test_aoi_model_user_func(sapm_dc_snl_ac_system, location, weather, mocker): m = mocker.spy(sys.modules[__name__], 'constant_aoi_loss') mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm', aoi_model=constant_aoi_loss, spectral_model='no_loss') mc.run_model(weather) assert m.call_count == 1 assert mc.results.aoi_modifier == 0.9 assert not mc.results.ac.empty assert mc.results.ac[0] > 140 and mc.results.ac[0] < 200 assert mc.results.ac[1] < 1 @pytest.mark.parametrize('aoi_model', [ 'sapm', 'ashrae', 'physical', 'martin_ruiz' ]) def test_infer_aoi_model(location, system_no_aoi, aoi_model): for k in iam._IAM_MODEL_PARAMS[aoi_model]: system_no_aoi.arrays[0].module_parameters.update({k: 1.0}) mc = ModelChain(system_no_aoi, location, spectral_model='no_loss') assert isinstance(mc, ModelChain) def test_infer_aoi_model_invalid(location, system_no_aoi): exc_text = 'could not infer AOI model' with pytest.raises(ValueError, match=exc_text): ModelChain(system_no_aoi, location, spectral_model='no_loss') def constant_spectral_loss(mc): mc.results.spectral_modifier = 0.9 @pytest.mark.parametrize('spectral_model', [ 'sapm', 'first_solar', 'no_loss', constant_spectral_loss ]) def test_spectral_models(sapm_dc_snl_ac_system, location, spectral_model, weather): # add pw to weather dataframe weather['precipitable_water'] = [0.3, 0.5] mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm', aoi_model='no_loss', spectral_model=spectral_model) spectral_modifier = mc.run_model(weather).results.spectral_modifier assert isinstance(spectral_modifier, (pd.Series, float, int)) @pytest.mark.parametrize('spectral_model', [ 'sapm', 'first_solar', 'no_loss', constant_spectral_loss ]) def test_spectral_models_singleton_weather_single_array( sapm_dc_snl_ac_system, location, spectral_model, weather): # add pw to weather dataframe weather['precipitable_water'] = [0.3, 0.5] mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm', aoi_model='no_loss', spectral_model=spectral_model) spectral_modifier = mc.run_model([weather]).results.spectral_modifier assert isinstance(spectral_modifier, tuple) assert len(spectral_modifier) == 1 assert isinstance(spectral_modifier[0], (pd.Series, float, int)) def constant_losses(mc): mc.results.losses = 0.9 mc.results.dc *= mc.results.losses def dc_constant_losses(mc): mc.results.dc['p_mp'] *= 0.9 def test_dc_ohmic_model_ohms_from_percent(cec_dc_snl_ac_system, cec_dc_snl_ac_arrays, location, weather, mocker): m = mocker.spy(pvsystem, 'dc_ohms_from_percent') system = cec_dc_snl_ac_system for array in system.arrays: array.array_losses_parameters = dict(dc_ohmic_percent=3) mc = ModelChain(system, location, aoi_model='no_loss', spectral_model='no_loss', dc_ohmic_model='dc_ohms_from_percent') mc.run_model(weather) assert m.call_count == 1 assert isinstance(mc.results.dc_ohmic_losses, pd.Series) system = cec_dc_snl_ac_arrays for array in system.arrays: array.array_losses_parameters = dict(dc_ohmic_percent=3) mc = ModelChain(system, location, aoi_model='no_loss', spectral_model='no_loss', dc_ohmic_model='dc_ohms_from_percent') mc.run_model(weather) assert m.call_count == 3 assert len(mc.results.dc_ohmic_losses) == len(mc.system.arrays) assert isinstance(mc.results.dc_ohmic_losses, tuple) def test_dc_ohmic_model_no_dc_ohmic_loss(cec_dc_snl_ac_system, location, weather, mocker): m = mocker.spy(modelchain.ModelChain, 'no_dc_ohmic_loss') mc = ModelChain(cec_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss', dc_ohmic_model='no_loss') mc.run_model(weather) assert mc.dc_ohmic_model == mc.no_dc_ohmic_loss assert m.call_count == 1 assert mc.results.dc_ohmic_losses is None def test_dc_ohmic_ext_def(cec_dc_snl_ac_system, location, weather, mocker): m = mocker.spy(sys.modules[__name__], 'dc_constant_losses') mc = ModelChain(cec_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss', dc_ohmic_model=dc_constant_losses) mc.run_model(weather) assert m.call_count == 1 assert isinstance(mc.results.ac, (pd.Series, pd.DataFrame)) assert not mc.results.ac.empty def test_dc_ohmic_not_a_model(cec_dc_snl_ac_system, location, weather, mocker): exc_text = 'not_a_dc_model is not a valid losses model' with pytest.raises(ValueError, match=exc_text): ModelChain(cec_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss', dc_ohmic_model='not_a_dc_model') def test_losses_models_pvwatts(pvwatts_dc_pvwatts_ac_system, location, weather, mocker): age = 1 pvwatts_dc_pvwatts_ac_system.losses_parameters = dict(age=age) m = mocker.spy(pvsystem, 'pvwatts_losses') mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts', aoi_model='no_loss', spectral_model='no_loss', losses_model='pvwatts') mc.run_model(weather) assert m.call_count == 1 m.assert_called_with(age=age) assert isinstance(mc.results.ac, (pd.Series, pd.DataFrame)) assert not mc.results.ac.empty # check that we're applying correction to dc # GH 696 dc_with_loss = mc.results.dc mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts', aoi_model='no_loss', spectral_model='no_loss', losses_model='no_loss') mc.run_model(weather) assert not np.allclose(mc.results.dc, dc_with_loss, equal_nan=True) def test_losses_models_pvwatts_arrays(multi_array_sapm_dc_snl_ac_system, location, weather): age = 1 system_both = multi_array_sapm_dc_snl_ac_system['two_array_system'] system_both.losses_parameters = dict(age=age) mc = ModelChain(system_both, location, aoi_model='no_loss', spectral_model='no_loss', losses_model='pvwatts') mc.run_model(weather) dc_with_loss = mc.results.dc mc = ModelChain(system_both, location, aoi_model='no_loss', spectral_model='no_loss', losses_model='no_loss') mc.run_model(weather) assert not np.allclose(mc.results.dc[0], dc_with_loss[0], equal_nan=True) assert not np.allclose(mc.results.dc[1], dc_with_loss[1], equal_nan=True) assert not mc.results.ac.empty def test_losses_models_ext_def(pvwatts_dc_pvwatts_ac_system, location, weather, mocker): m = mocker.spy(sys.modules[__name__], 'constant_losses') mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts', aoi_model='no_loss', spectral_model='no_loss', losses_model=constant_losses) mc.run_model(weather) assert m.call_count == 1 assert isinstance(mc.results.ac, (pd.Series, pd.DataFrame)) assert mc.results.losses == 0.9 assert not mc.results.ac.empty def test_losses_models_no_loss(pvwatts_dc_pvwatts_ac_system, location, weather, mocker): m = mocker.spy(pvsystem, 'pvwatts_losses') mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts', aoi_model='no_loss', spectral_model='no_loss', losses_model='no_loss') assert mc.losses_model == mc.no_extra_losses mc.run_model(weather) assert m.call_count == 0 assert mc.results.losses == 1 def test_invalid_dc_model_params(sapm_dc_snl_ac_system, cec_dc_snl_ac_system, pvwatts_dc_pvwatts_ac_system, location): kwargs = {'dc_model': 'sapm', 'ac_model': 'sandia', 'aoi_model': 'no_loss', 'spectral_model': 'no_loss', 'temperature_model': 'sapm', 'losses_model': 'no_loss'} for array in sapm_dc_snl_ac_system.arrays: array.module_parameters.pop('A0') # remove a parameter with pytest.raises(ValueError): ModelChain(sapm_dc_snl_ac_system, location, **kwargs) kwargs['dc_model'] = 'singlediode' for array in cec_dc_snl_ac_system.arrays: array.module_parameters.pop('a_ref') # remove a parameter with pytest.raises(ValueError): ModelChain(cec_dc_snl_ac_system, location, **kwargs) kwargs['dc_model'] = 'pvwatts' kwargs['ac_model'] = 'pvwatts' for array in pvwatts_dc_pvwatts_ac_system.arrays: array.module_parameters.pop('pdc0') match = 'one or more Arrays are missing one or more required parameters' with pytest.raises(ValueError, match=match): ModelChain(pvwatts_dc_pvwatts_ac_system, location, **kwargs) @pytest.mark.parametrize('model', [ 'dc_model', 'ac_model', 'aoi_model', 'spectral_model', 'temperature_model', 'losses_model' ]) def test_invalid_models(model, sapm_dc_snl_ac_system, location): kwargs = {'dc_model': 'pvwatts', 'ac_model': 'pvwatts', 'aoi_model': 'no_loss', 'spectral_model': 'no_loss', 'temperature_model': 'sapm', 'losses_model': 'no_loss'} kwargs[model] = 'invalid' with pytest.raises(ValueError): ModelChain(sapm_dc_snl_ac_system, location, **kwargs) def test_bad_get_orientation(): with pytest.raises(ValueError): modelchain.get_orientation('bad value') # tests for PVSystem with multiple Arrays def test_with_sapm_pvsystem_arrays(sapm_dc_snl_ac_system_Array, location, weather): mc = ModelChain.with_sapm(sapm_dc_snl_ac_system_Array, location, ac_model='sandia') assert mc.dc_model == mc.sapm assert mc.ac_model == mc.sandia_inverter mc.run_model(weather) assert mc.results def test_ModelChain_no_extra_kwargs(sapm_dc_snl_ac_system, location): with pytest.raises(TypeError, match="arbitrary_kwarg"): ModelChain(sapm_dc_snl_ac_system, location, arbitrary_kwarg='value') @fail_on_pvlib_version('0.10') def test_ModelChain_attributes_deprecated_10(sapm_dc_snl_ac_system, location): match = 'Use ModelChain.results' mc = ModelChain(sapm_dc_snl_ac_system, location) with pytest.warns(pvlibDeprecationWarning, match=match): mc.aoi with pytest.warns(pvlibDeprecationWarning, match=match): mc.aoi = 5 def test_basic_chain_alt_az(sam_data, cec_inverter_parameters, sapm_temperature_cs5p_220m): times = pd.date_range(start='20160101 1200-0700', end='20160101 1800-0700', freq='6H') latitude = 32.2 longitude = -111 surface_tilt = 0 surface_azimuth = 0 modules = sam_data['sandiamod'] module_parameters = modules['Canadian_Solar_CS5P_220M___2009_'] temp_model_params = sapm_temperature_cs5p_220m.copy() dc, ac = modelchain.basic_chain(times, latitude, longitude, surface_tilt, surface_azimuth, module_parameters, temp_model_params, cec_inverter_parameters) expected = pd.Series(np.array([111.621405, -2.00000000e-02]), index=times) assert_series_equal(ac, expected) def test_basic_chain_altitude_pressure(sam_data, cec_inverter_parameters, sapm_temperature_cs5p_220m): times = pd.date_range(start='20160101 1200-0700', end='20160101 1800-0700', freq='6H') latitude = 32.2 longitude = -111 altitude = 700 surface_tilt = 0 surface_azimuth = 0 modules = sam_data['sandiamod'] module_parameters = modules['Canadian_Solar_CS5P_220M___2009_'] temp_model_params = sapm_temperature_cs5p_220m.copy() dc, ac = modelchain.basic_chain(times, latitude, longitude, surface_tilt, surface_azimuth, module_parameters, temp_model_params, cec_inverter_parameters, pressure=93194) expected = pd.Series(np.array([113.190045, -2.00000000e-02]), index=times) assert_series_equal(ac, expected) dc, ac = modelchain.basic_chain(times, latitude, longitude, surface_tilt, surface_azimuth, module_parameters, temp_model_params, cec_inverter_parameters, altitude=altitude) expected = pd.Series(np.array([113.189814, -2.00000000e-02]), index=times) assert_series_equal(ac, expected) def test_complete_irradiance_clean_run(sapm_dc_snl_ac_system, location): """The DataFrame should not change if all columns are passed""" mc = ModelChain(sapm_dc_snl_ac_system, location) times = pd.date_range('2010-07-05 9:00:00', periods=2, freq='H') i = pd.DataFrame( {'dni': [2, 3], 'dhi': [4, 6], 'ghi': [9, 5]}, index=times) mc.complete_irradiance(i) assert_series_equal(mc.results.weather['dni'], pd.Series([2, 3], index=times, name='dni')) assert_series_equal(mc.results.weather['dhi'], pd.Series([4, 6], index=times, name='dhi')) assert_series_equal(mc.results.weather['ghi'], pd.Series([9, 5], index=times, name='ghi')) def test_complete_irradiance(sapm_dc_snl_ac_system, location): """Check calculations""" mc = ModelChain(sapm_dc_snl_ac_system, location) times = pd.date_range('2010-07-05 7:00:00-0700', periods=2, freq='H') i = pd.DataFrame({'dni': [49.756966, 62.153947], 'ghi': [372.103976116, 497.087579068], 'dhi': [356.543700, 465.44400]}, index=times) with pytest.warns(UserWarning): mc.complete_irradiance(i[['ghi', 'dni']]) assert_series_equal(mc.results.weather['dhi'], pd.Series([356.543700, 465.44400], index=times, name='dhi')) with pytest.warns(UserWarning): mc.complete_irradiance(i[['dhi', 'dni']]) assert_series_equal(mc.results.weather['ghi'], pd.Series([372.103976116, 497.087579068], index=times, name='ghi')) mc.complete_irradiance(i[['dhi', 'ghi']]) assert_series_equal(mc.results.weather['dni'], pd.Series([49.756966, 62.153947], index=times, name='dni')) @pytest.mark.filterwarnings("ignore:This function is not safe at the moment") @pytest.mark.parametrize("input_type", [tuple, list]) def test_complete_irradiance_arrays( sapm_dc_snl_ac_system_same_arrays, location, input_type): """ModelChain.complete_irradiance can accept a tuple of weather DataFrames.""" times = pd.date_range(start='2020-01-01 0700-0700', periods=2, freq='H') weather = pd.DataFrame({'dni': [2, 3], 'dhi': [4, 6], 'ghi': [9, 5]}, index=times) mc = ModelChain(sapm_dc_snl_ac_system_same_arrays, location) with pytest.raises(ValueError, match=r"Input DataFrames must have same index\."): mc.complete_irradiance(input_type((weather, weather[1:]))) mc.complete_irradiance(input_type((weather, weather))) for mc_weather in mc.results.weather: assert_series_equal(mc_weather['dni'], pd.Series([2, 3], index=times, name='dni')) assert_series_equal(mc_weather['dhi'], pd.Series([4, 6], index=times, name='dhi')) assert_series_equal(mc_weather['ghi'], pd.Series([9, 5], index=times, name='ghi')) mc = ModelChain(sapm_dc_snl_ac_system_same_arrays, location) mc.complete_irradiance(input_type((weather[['ghi', 'dhi']], weather[['dhi', 'dni']]))) assert 'dni' in mc.results.weather[0].columns assert 'ghi' in mc.results.weather[1].columns mc.complete_irradiance(input_type((weather, weather[['ghi', 'dni']]))) assert_series_equal(mc.results.weather[0]['dhi'], pd.Series([4, 6], index=times, name='dhi')) assert_series_equal(mc.results.weather[0]['ghi'], pd.Series([9, 5], index=times, name='ghi')) assert_series_equal(mc.results.weather[0]['dni'], pd.Series([2, 3], index=times, name='dni')) assert 'dhi' in mc.results.weather[1].columns @pytest.mark.parametrize("input_type", [tuple, list]) def test_complete_irradiance_arrays_wrong_length( sapm_dc_snl_ac_system_same_arrays, location, input_type): mc = ModelChain(sapm_dc_snl_ac_system_same_arrays, location) times = pd.date_range(start='2020-01-01 0700-0700', periods=2, freq='H') weather = pd.DataFrame({'dni': [2, 3], 'dhi': [4, 6], 'ghi': [9, 5]}, index=times) error_str = "Input must be same length as number " \ r"of Arrays in system\. Expected 2, got [0-9]+\." with pytest.raises(ValueError, match=error_str): mc.complete_irradiance(input_type((weather,))) with pytest.raises(ValueError, match=error_str): mc.complete_irradiance(input_type((weather, weather, weather))) def test_unknown_attribute(sapm_dc_snl_ac_system, location): mc = ModelChain(sapm_dc_snl_ac_system, location) with pytest.raises(AttributeError): mc.unknown_attribute def test_inconsistent_array_params(location, sapm_module_params, cec_module_params): module_error = ".* selected for the DC model but one or more Arrays are " \ "missing one or more required parameters" temperature_error = "could not infer temperature model from " \ r"system\.temperature_model_parameters\. Check " \ r"that all Arrays in system\.arrays have " \ r"parameters for the same temperature model\. " \ r"Common temperature model parameters: .*" different_module_system = pvsystem.PVSystem( arrays=[ pvsystem.Array( mount=pvsystem.FixedMount(0, 180), module_parameters=sapm_module_params), pvsystem.Array( mount=pvsystem.FixedMount(0, 180), module_parameters=cec_module_params), pvsystem.Array( mount=pvsystem.FixedMount(0, 180), module_parameters=cec_module_params)] ) with pytest.raises(ValueError, match=module_error): ModelChain(different_module_system, location, dc_model='cec') different_temp_system = pvsystem.PVSystem( arrays=[ pvsystem.Array( mount=pvsystem.FixedMount(0, 180), module_parameters=cec_module_params, temperature_model_parameters={'a': 1, 'b': 1, 'deltaT': 1}), pvsystem.Array( mount=pvsystem.FixedMount(0, 180), module_parameters=cec_module_params, temperature_model_parameters={'a': 2, 'b': 2, 'deltaT': 2}), pvsystem.Array( mount=pvsystem.FixedMount(0, 180), module_parameters=cec_module_params, temperature_model_parameters={'b': 3, 'deltaT': 3})] ) with pytest.raises(ValueError, match=temperature_error): ModelChain(different_temp_system, location, ac_model='sandia', aoi_model='no_loss', spectral_model='no_loss', temperature_model='sapm') def test_modelchain__common_keys(): dictionary = {'a': 1, 'b': 1} series = pd.Series(dictionary) assert {'a', 'b'} == modelchain._common_keys( {'a': 1, 'b': 1} ) assert {'a', 'b'} == modelchain._common_keys( pd.Series({'a': 1, 'b': 1}) ) assert {'a', 'b'} == modelchain._common_keys( (dictionary, series) ) no_a = dictionary.copy() del no_a['a'] assert {'b'} == modelchain._common_keys( (dictionary, no_a) ) assert {'b'} == modelchain._common_keys( (series, pd.Series(no_a)) ) assert {'b'} == modelchain._common_keys( (series, no_a) ) def test__irrad_for_celltemp(): total_irrad = pd.DataFrame(index=[0, 1], columns=['poa_global'], data=[10., 20.]) empty = total_irrad.drop('poa_global', axis=1) effect_irrad = pd.Series(index=total_irrad.index, data=[5., 8.]) # test with single array inputs poa = modelchain._irrad_for_celltemp(total_irrad, effect_irrad) assert_series_equal(poa, total_irrad['poa_global']) poa = modelchain._irrad_for_celltemp(empty, effect_irrad) assert_series_equal(poa, effect_irrad) # test with tuples poa = modelchain._irrad_for_celltemp( (total_irrad, total_irrad), (effect_irrad, effect_irrad)) assert len(poa) == 2 assert_series_equal(poa[0], total_irrad['poa_global']) assert_series_equal(poa[1], total_irrad['poa_global']) poa = modelchain._irrad_for_celltemp( (empty, empty), (effect_irrad, effect_irrad)) assert len(poa) == 2 assert_series_equal(poa[0], effect_irrad) assert_series_equal(poa[1], effect_irrad)
mikofski/pvlib-python
pvlib/tests/test_modelchain.py
Python
bsd-3-clause
88,076
"""Utilities and extensions for use with `argparse`.""" import os import argparse def directory(arg): """ An argument type (for use with the ``type=`` argument to `argparse.ArgumentParser.add_argument` which determines if the argument is an existing directory (and returns the absolute path). """ if not isinstance(arg, str) and os.path.isdir(arg): raise argparse.ArgumentTypeError( "{} is not a directory or does not exist (the directory must " "be created first)".format(arg)) return os.path.abspath(arg) def readable_directory(arg): """ An argument type (for use with the ``type=`` argument to `argparse.ArgumentParser.add_argument` which determines if the argument is a directory that exists and is readable (and returns the absolute path). """ arg = directory(arg) if not os.access(arg, os.R_OK): raise argparse.ArgumentTypeError( "{} exists but is not readable with its current " "permissions".format(arg)) return arg def writeable_directory(arg): """ An argument type (for use with the ``type=`` argument to `argparse.ArgumentParser.add_argument` which determines if the argument is a directory that exists and is writeable (and returns the absolute path). """ arg = directory(arg) if not os.access(arg, os.W_OK): raise argparse.ArgumentTypeError( "{} exists but is not writeable with its current " "permissions".format(arg)) return arg
stargaser/astropy
astropy/utils/argparse.py
Python
bsd-3-clause
1,549
from __future__ import absolute_import from sentry.api.serializers.rest_framework import OriginField from rest_framework import serializers from sentry.testutils import TestCase class DummySerializer(serializers.Serializer): origin_field = OriginField() class OriginFieldTest(TestCase): def test_valid_origin(self): urls = ["https://www.foo.com", "*"] for url in urls: serializer = DummySerializer(data={"origin_field": url}) assert serializer.is_valid() def test_invalid_origin(self): url = "https://www.foo.com:88" serializer = DummySerializer(data={"origin_field": url}) assert serializer.is_valid() is False
mvaled/sentry
tests/sentry/api/serializers/rest_framework/test_origin.py
Python
bsd-3-clause
697
"""Compare the speed of exact one-norm calculation vs. its estimation. """ import numpy as np from .common import Benchmark, safe_import with safe_import(): import scipy.sparse import scipy.special # import cycle workaround for some versions import scipy.sparse.linalg class BenchmarkOneNormEst(Benchmark): params = [ [2, 3, 5, 10, 30, 100, 300, 500, 1000, 1e4, 1e5, 1e6], ['exact', 'onenormest'] ] param_names = ['n', 'solver'] def setup(self, n, solver): rng = np.random.default_rng(1234) nrepeats = 100 shape = (int(n), int(n)) if solver == 'exact' and n >= 300: # skip: slow, and not useful to benchmark raise NotImplementedError() if n <= 1000: # Sample the matrices. self.matrices = [] for i in range(nrepeats): M = rng.standard_normal(shape) self.matrices.append(M) else: max_nnz = 100000 nrepeats = 1 self.matrices = [] for i in range(nrepeats): M = scipy.sparse.rand(shape[0], shape[1], min(max_nnz/(shape[0]*shape[1]), 1e-5), random_state=rng) self.matrices.append(M) def time_onenormest(self, n, solver): if solver == 'exact': # Get the exact values of one-norms of squares. for M in self.matrices: M.dot(M) scipy.sparse.linalg.matfuncs._onenorm(M) elif solver == 'onenormest': # Get the estimates of one-norms of squares. for M in self.matrices: scipy.sparse.linalg.matfuncs._onenormest_matrix_power(M, 2) # Retain old benchmark results (remove this if changing the benchmark) time_onenormest.version = "f7b31b4bf5caa50d435465e78dab6e133f3c263a52c4523eec785446185fdb6f"
grlee77/scipy
benchmarks/benchmarks/sparse_linalg_onenormest.py
Python
bsd-3-clause
1,881
from datetime import datetime from django.conf import settings from django.template.loader import render_to_string from django.utils.timezone import now from remo.base.tasks import send_remo_mail from remo.base.utils import get_date from remo.reports.models import NGReport def count_user_ng_reports(user, current_streak=False, longest_streak=False, period=0): """Return the number of reports of a user over a period of time. If current_streak is True return the current streak of a user. Arg period expects weeks eg 2 means 2 * 7 = 14 days. """ end_period = now() start_period = datetime(2011, 01, 01) if current_streak: start_period = user.userprofile.current_streak_start elif longest_streak: start_period = user.userprofile.longest_streak_start end_period = user.userprofile.longest_streak_end elif period > 0: start_period = get_date(-(period * 7)) query = user.ng_reports.filter(report_date__range=(start_period, end_period)) return query.count() def get_last_report(user): """Return user's last report in the past.""" today = now().date() try: reports = user.ng_reports.filter(report_date__lte=today) return reports.latest('report_date') except NGReport.DoesNotExist: return None def send_report_notification(reps, weeks): """Send notification to inactive reps.""" rep_subject = '[Reminder] Please share your recent activities' rep_mail_body = 'emails/reps_ng_report_notification.txt' mentor_subject = ('[Report] Mentee without report for the last %d weeks' % weeks) mentor_mail_body = 'emails/mentor_ng_report_notification.txt' for rep in reps: mentor = rep.userprofile.mentor ctx_data = {'mentor': mentor, 'user': rep, 'SITE_URL': settings.SITE_URL, 'weeks': weeks} rep_message = render_to_string(rep_mail_body, ctx_data) mentor_message = render_to_string(mentor_mail_body, ctx_data) if mentor: send_remo_mail(rep_subject, [rep.email], message=rep_message, headers={'Reply-To': mentor.email}) send_remo_mail(mentor_subject, [mentor.email], message=mentor_message, headers={'Reply-To': rep.email}) else: send_remo_mail(rep_subject, [rep.email], message=rep_message)
chirilo/remo
remo/reports/utils.py
Python
bsd-3-clause
2,561
from __future__ import absolute_import import json import uuid import copy from IPython.display import display, publish_display_data from . import utils class VegaBase(object): """A custom Vega-Lite display object.""" JS_TEMPLATE = "static/vega.js" render_type = '' # vega or vega-lite def __init__(self, spec, data=None, opt=None): """Initialize the visualization object.""" spec = spec self.opt = opt or {} self.spec = self._prepare_spec(spec, data) def _prepare_spec(self, spec, data): return spec def _generate_js(self, id, **kwds): template = utils.get_content(self.JS_TEMPLATE) payload = template.format( id=id, spec=json.dumps(self.spec, **kwds), opt=json.dumps(self.opt, **kwds), type=self.render_type ) return payload def _repr_mimebundle_(self, include=None, exclude=None): """Display the visualization in the Jupyter notebook.""" id = uuid.uuid4() return ( {'application/javascript': self._generate_js(id)}, {'jupyter-vega': '#{0}'.format(id)}, ) def display(self): """Render the visualization.""" display(self)
uwdata/ipython-vega
vega/base.py
Python
bsd-3-clause
1,259
from datetime import datetime import io import os from pathlib import Path import dateutil.parser import numpy as np import pytest from pandas.errors import EmptyDataError import pandas.util._test_decorators as td import pandas as pd import pandas._testing as tm # https://github.com/cython/cython/issues/1720 @pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning") class TestSAS7BDAT: @pytest.fixture(autouse=True) def setup_method(self, datapath): self.dirpath = datapath("io", "sas", "data") self.data = [] self.test_ix = [list(range(1, 16)), [16]] for j in 1, 2: fname = os.path.join(self.dirpath, f"test_sas7bdat_{j}.csv") df = pd.read_csv(fname) epoch = datetime(1960, 1, 1) t1 = pd.to_timedelta(df["Column4"], unit="d") df["Column4"] = epoch + t1 t2 = pd.to_timedelta(df["Column12"], unit="d") df["Column12"] = epoch + t2 for k in range(df.shape[1]): col = df.iloc[:, k] if col.dtype == np.int64: df.iloc[:, k] = df.iloc[:, k].astype(np.float64) self.data.append(df) @pytest.mark.slow def test_from_file(self): for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = os.path.join(self.dirpath, f"test{k}.sas7bdat") df = pd.read_sas(fname, encoding="utf-8") tm.assert_frame_equal(df, df0) @pytest.mark.slow def test_from_buffer(self): for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = os.path.join(self.dirpath, f"test{k}.sas7bdat") with open(fname, "rb") as f: byts = f.read() buf = io.BytesIO(byts) with pd.read_sas( buf, format="sas7bdat", iterator=True, encoding="utf-8" ) as rdr: df = rdr.read() tm.assert_frame_equal(df, df0, check_exact=False) @pytest.mark.slow def test_from_iterator(self): for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = os.path.join(self.dirpath, f"test{k}.sas7bdat") with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr: df = rdr.read(2) tm.assert_frame_equal(df, df0.iloc[0:2, :]) df = rdr.read(3) tm.assert_frame_equal(df, df0.iloc[2:5, :]) @pytest.mark.slow def test_path_pathlib(self): for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = Path(os.path.join(self.dirpath, f"test{k}.sas7bdat")) df = pd.read_sas(fname, encoding="utf-8") tm.assert_frame_equal(df, df0) @td.skip_if_no("py.path") @pytest.mark.slow def test_path_localpath(self): from py.path import local as LocalPath for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = LocalPath(os.path.join(self.dirpath, f"test{k}.sas7bdat")) df = pd.read_sas(fname, encoding="utf-8") tm.assert_frame_equal(df, df0) @pytest.mark.slow def test_iterator_loop(self): # github #13654 for j in 0, 1: for k in self.test_ix[j]: for chunksize in (3, 5, 10, 11): fname = os.path.join(self.dirpath, f"test{k}.sas7bdat") with pd.read_sas( fname, chunksize=chunksize, encoding="utf-8" ) as rdr: y = 0 for x in rdr: y += x.shape[0] assert y == rdr.row_count def test_iterator_read_too_much(self): # github #14734 k = self.test_ix[0][0] fname = os.path.join(self.dirpath, f"test{k}.sas7bdat") with pd.read_sas( fname, format="sas7bdat", iterator=True, encoding="utf-8" ) as rdr: d1 = rdr.read(rdr.row_count + 20) with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr: d2 = rdr.read(rdr.row_count + 20) tm.assert_frame_equal(d1, d2) def test_encoding_options(datapath): fname = datapath("io", "sas", "data", "test1.sas7bdat") df1 = pd.read_sas(fname) df2 = pd.read_sas(fname, encoding="utf-8") for col in df1.columns: try: df1[col] = df1[col].str.decode("utf-8") except AttributeError: pass tm.assert_frame_equal(df1, df2) from pandas.io.sas.sas7bdat import SAS7BDATReader rdr = SAS7BDATReader(fname, convert_header_text=False) df3 = rdr.read() rdr.close() for x, y in zip(df1.columns, df3.columns): assert x == y.decode() def test_productsales(datapath): fname = datapath("io", "sas", "data", "productsales.sas7bdat") df = pd.read_sas(fname, encoding="utf-8") fname = datapath("io", "sas", "data", "productsales.csv") df0 = pd.read_csv(fname, parse_dates=["MONTH"]) vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR"] df0[vn] = df0[vn].astype(np.float64) tm.assert_frame_equal(df, df0) def test_12659(datapath): fname = datapath("io", "sas", "data", "test_12659.sas7bdat") df = pd.read_sas(fname) fname = datapath("io", "sas", "data", "test_12659.csv") df0 = pd.read_csv(fname) df0 = df0.astype(np.float64) tm.assert_frame_equal(df, df0) def test_airline(datapath): fname = datapath("io", "sas", "data", "airline.sas7bdat") df = pd.read_sas(fname) fname = datapath("io", "sas", "data", "airline.csv") df0 = pd.read_csv(fname) df0 = df0.astype(np.float64) tm.assert_frame_equal(df, df0, check_exact=False) def test_date_time(datapath): # Support of different SAS date/datetime formats (PR #15871) fname = datapath("io", "sas", "data", "datetime.sas7bdat") df = pd.read_sas(fname) fname = datapath("io", "sas", "data", "datetime.csv") df0 = pd.read_csv( fname, parse_dates=["Date1", "Date2", "DateTime", "DateTimeHi", "Taiw"] ) # GH 19732: Timestamps imported from sas will incur floating point errors df.iloc[:, 3] = df.iloc[:, 3].dt.round("us") tm.assert_frame_equal(df, df0) def test_compact_numerical_values(datapath): # Regression test for #21616 fname = datapath("io", "sas", "data", "cars.sas7bdat") df = pd.read_sas(fname, encoding="latin-1") # The two columns CYL and WGT in cars.sas7bdat have column # width < 8 and only contain integral values. # Test that pandas doesn't corrupt the numbers by adding # decimals. result = df["WGT"] expected = df["WGT"].round() tm.assert_series_equal(result, expected, check_exact=True) result = df["CYL"] expected = df["CYL"].round() tm.assert_series_equal(result, expected, check_exact=True) def test_many_columns(datapath): # Test for looking for column information in more places (PR #22628) fname = datapath("io", "sas", "data", "many_columns.sas7bdat") df = pd.read_sas(fname, encoding="latin-1") fname = datapath("io", "sas", "data", "many_columns.csv") df0 = pd.read_csv(fname, encoding="latin-1") tm.assert_frame_equal(df, df0) def test_inconsistent_number_of_rows(datapath): # Regression test for issue #16615. (PR #22628) fname = datapath("io", "sas", "data", "load_log.sas7bdat") df = pd.read_sas(fname, encoding="latin-1") assert len(df) == 2097 def test_zero_variables(datapath): # Check if the SAS file has zero variables (PR #18184) fname = datapath("io", "sas", "data", "zero_variables.sas7bdat") with pytest.raises(EmptyDataError, match="No columns to parse from file"): pd.read_sas(fname) def test_corrupt_read(datapath): # We don't really care about the exact failure, the important thing is # that the resource should be cleaned up afterwards (BUG #35566) fname = datapath("io", "sas", "data", "corrupt.sas7bdat") msg = "'SAS7BDATReader' object has no attribute 'row_count'" with pytest.raises(AttributeError, match=msg): pd.read_sas(fname) def round_datetime_to_ms(ts): if isinstance(ts, datetime): return ts.replace(microsecond=int(round(ts.microsecond, -3) / 1000) * 1000) elif isinstance(ts, str): _ts = dateutil.parser.parse(timestr=ts) return _ts.replace(microsecond=int(round(_ts.microsecond, -3) / 1000) * 1000) else: return ts def test_max_sas_date(datapath): # GH 20927 # NB. max datetime in SAS dataset is 31DEC9999:23:59:59.999 # but this is read as 29DEC9999:23:59:59.998993 by a buggy # sas7bdat module fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat") df = pd.read_sas(fname, encoding="iso-8859-1") # SAS likes to left pad strings with spaces - lstrip before comparing df = df.applymap(lambda x: x.lstrip() if isinstance(x, str) else x) # GH 19732: Timestamps imported from sas will incur floating point errors try: df["dt_as_dt"] = df["dt_as_dt"].dt.round("us") except pd._libs.tslibs.np_datetime.OutOfBoundsDatetime: df = df.applymap(round_datetime_to_ms) except AttributeError: df["dt_as_dt"] = df["dt_as_dt"].apply(round_datetime_to_ms) # if there are any date/times > pandas.Timestamp.max then ALL in that chunk # are returned as datetime.datetime expected = pd.DataFrame( { "text": ["max", "normal"], "dt_as_float": [253717747199.999, 1880323199.999], "dt_as_dt": [ datetime(9999, 12, 29, 23, 59, 59, 999000), datetime(2019, 8, 1, 23, 59, 59, 999000), ], "date_as_float": [2936547.0, 21762.0], "date_as_date": [datetime(9999, 12, 29), datetime(2019, 8, 1)], }, columns=["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"], ) tm.assert_frame_equal(df, expected) def test_max_sas_date_iterator(datapath): # GH 20927 # when called as an iterator, only those chunks with a date > pd.Timestamp.max # are returned as datetime.datetime, if this happens that whole chunk is returned # as datetime.datetime col_order = ["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"] fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat") results = [] for df in pd.read_sas(fname, encoding="iso-8859-1", chunksize=1): # SAS likes to left pad strings with spaces - lstrip before comparing df = df.applymap(lambda x: x.lstrip() if isinstance(x, str) else x) # GH 19732: Timestamps imported from sas will incur floating point errors try: df["dt_as_dt"] = df["dt_as_dt"].dt.round("us") except pd._libs.tslibs.np_datetime.OutOfBoundsDatetime: df = df.applymap(round_datetime_to_ms) except AttributeError: df["dt_as_dt"] = df["dt_as_dt"].apply(round_datetime_to_ms) df.reset_index(inplace=True, drop=True) results.append(df) expected = [ pd.DataFrame( { "text": ["max"], "dt_as_float": [253717747199.999], "dt_as_dt": [datetime(9999, 12, 29, 23, 59, 59, 999000)], "date_as_float": [2936547.0], "date_as_date": [datetime(9999, 12, 29)], }, columns=col_order, ), pd.DataFrame( { "text": ["normal"], "dt_as_float": [1880323199.999], "dt_as_dt": [np.datetime64("2019-08-01 23:59:59.999")], "date_as_float": [21762.0], "date_as_date": [np.datetime64("2019-08-01")], }, columns=col_order, ), ] for result, expected in zip(results, expected): tm.assert_frame_equal(result, expected) def test_null_date(datapath): fname = datapath("io", "sas", "data", "dates_null.sas7bdat") df = pd.read_sas(fname, encoding="utf-8") expected = pd.DataFrame( { "datecol": [ datetime(9999, 12, 29), pd.NaT, ], "datetimecol": [ datetime(9999, 12, 29, 23, 59, 59, 998993), pd.NaT, ], }, ) tm.assert_frame_equal(df, expected)
rs2/pandas
pandas/tests/io/sas/test_sas7bdat.py
Python
bsd-3-clause
12,576
""" Unit tests for Cantera's Cython-based Python module. This script gathers all the tests defined 'cantera.test' module, runs them, and prints a report. Extra command line arguments can be used to run subsets of the test suite, e.g.: all tests from 'test_thermo.py' and 'test_kinetics.py': python runCythonTests.py thermo kinetics all tests from the 'test_reactor.TesTIdealGasReactor' class: python runCythonTests.py reactor.TestIdealGasReactor a single test: python runCythonTests.py onedim.TestDiffusionFlame.test_mixture_averaged """ from __future__ import print_function import sys import os cantera_root = os.path.relpath(__file__).split(os.sep)[:-1] + ['..', '..'] py_version = 'python3' if sys.version_info[0] == 3 else 'python2' module_path = os.path.abspath(os.sep.join(cantera_root + ['build', py_version])) if 'PYTHONPATH' in os.environ: os.environ['PYTHONPATH'] = module_path + os.path.pathsep + os.environ['PYTHONPATH'] else: os.environ['PYTHONPATH'] = module_path sys.path.insert(0, module_path) os.chdir(os.sep.join(cantera_root + ['test', 'work'])) from cantera.test.utilities import unittest import cantera import cantera.test class TestResult(unittest.TextTestResult): def __init__(self, *args, **kwargs): unittest.TextTestResult.__init__(self, *args, **kwargs) self.outName = 'python%d-results.txt' % sys.version_info[0] with open(self.outName, 'w') as f: pass # just create an empty output file def reformat(self, test_string): name, cls = test_string.split() cls = cls.replace('(cantera.test.', '').replace(')','') return '%s.%s' % (cls, name) def addSuccess(self, test): with open(self.outName, 'a') as f: f.write('PASS: %s\n' % self.reformat(str(test))) unittest.TextTestResult.addSuccess(self, test) def addFailure(self, test, err): with open(self.outName, 'a') as f: f.write('FAIL: %s\n' % self.reformat(str(test))) unittest.TextTestResult.addFailure(self, test, err) def addError(self, test, err): with open(self.outName, 'a') as f: f.write('ERROR: %s\n' % self.reformat(str(test))) unittest.TextTestResult.addFailure(self, test, err) if __name__ == '__main__': print('\n* INFO: using Cantera module found at this location:') print('* ', repr(cantera.__file__), '\n') sys.stdout.flush() loader = unittest.TestLoader() runner = unittest.TextTestRunner(verbosity=2, resultclass=TestResult) suite = unittest.TestSuite() subsets = [] for name in sys.argv[1:]: subsets.append('cantera.test.test_' + name) if not subsets: subsets.append('cantera.test') suite = loader.loadTestsFromNames(subsets) results = runner.run(suite) sys.exit(len(results.errors) + len(results.failures))
Heathckliff/cantera
test/python/runCythonTests.py
Python
bsd-3-clause
2,874
from django.core.management.base import NoArgsCommand from time import sleep, time from django_bitcoin.utils import bitcoind from django_bitcoin.models import BitcoinAddress from django_bitcoin.models import Wallet from django.conf import settings from decimal import Decimal class Command(NoArgsCommand): help = """fix balances """ def handle_noargs(self, **options): print "starting..." for w in Wallet.objects.all(): w.last_balance = w.total_balance() w.save()
texib/bitcoin-zoo
django_bitcoin/management/commands/FixLastBalancesConcurrency.py
Python
mit
518
from . import sale_currency from . import pricelist from . import multico
sidzan/netforce
netforce_sale/netforce_sale/migrations/__init__.py
Python
mit
74
# The MIT License (MIT) # # Copyright (c) 2013 Numenta, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import hashlib import os import json import requests DEFAULT_BASE_URL = "http://api.cept.at" DEFAULT_VERSION = "v1" DEFAULT_CACHE_DIR = "/tmp/pycept" DEFAULT_VERBOSITY = 0 class Cept(object): """ Main class for the Cept API. """ def __init__(self, app_id, app_key, base_url=DEFAULT_BASE_URL, version=DEFAULT_VERSION, cache_dir=DEFAULT_CACHE_DIR, verbosity=DEFAULT_VERBOSITY): self.app_id = app_id self.app_key = app_key self.api_url = "%s/%s" % (base_url, version) # Create the cache directory if necessary. if not os.path.exists(cache_dir): os.mkdir(cache_dir) self.cache_dir = cache_dir self.verbosity = verbosity def getBitmap(self, term): urlParams = self._buildUrlParams() urlParams['term'] = term url = "%s/term2bitmap" % (self.api_url,) # Create a cache location for each term, where it will either be read in from # or cached within if we have to go to the CEPT API to get the SDR. cache_file = os.path.join(self.cache_dir, term + '.json') # Get it from the cache if it's there. if os.path.exists(cache_file): cached_sdr = json.loads(open(cache_file).read()) # Get it from CEPT API if it's not cached. else: if self.verbosity > 0: print '\tfetching %s from CEPT API' % term response = requests.get(url, params=urlParams) cached_sdr = json.loads(response.content)['bitmap'] # attach the sparsity for reference total = float(cached_sdr['width']) * float(cached_sdr['height']) on = len(cached_sdr['positions']) sparsity = round((on / total) * 100) cached_sdr['sparsity'] = sparsity # write to cache with open(cache_file, 'w') as f: f.write(json.dumps(cached_sdr)) return cached_sdr def getSdr(self, term): return self._bitmapToSdr(self.getBitmap(term)) def bitmapToTerms(self, width, height, onBits): if len(onBits) is 0: raise(Exception("Cannot convert empty bitmap to term!")) response = self.bitmapToTermsRaw(width, height, onBits) similar = [] for term in response['similarterms']: similar.append( {'term': term['term'], 'rank': term['rank']} ) return similar def bitmapToTermsRaw(self, width, height, onBits): urlParams = self._buildUrlParams() data = json.dumps({'width': width, 'height': height, 'positions': onBits}) cache_path = 'bitmap-' + hashlib.sha224(data).hexdigest() + '.json' cache_file = os.path.join(self.cache_dir, cache_path) # Get it from the cache if it's there. if os.path.exists(cache_file): return json.loads(open(cache_file).read()) else: url = "%s/bitmap2terms" % (self.api_url) headers = {'Content-Type': 'application/json'} response = requests.post(url, params=urlParams, headers=headers, data=data) with open(cache_file, 'w') as f: f.write(response.content) return json.loads(response.content) def _buildUrlParams(self): return { 'app_id': self.app_id, 'app_key': self.app_key } def _bitmapToSdr(self, bitmap): width = bitmap['width'] height = bitmap['height'] total = width * height positions = bitmap['positions'] sdr = "" if len(positions) is 0: nextOn = None else: nextOn = positions.pop(0) for sdrIndex in range(0, total): if nextOn is None or nextOn != sdrIndex: sdr += "0" else: sdr += "1" if len(positions) is 0: nextOn = None else: nextOn = positions.pop(0) return sdr
ilblackdragon/nupic-hackathon-2014
pycept/pycept/cept.py
Python
mit
4,743
#!/usr/bin/env python class MergedCell(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.""" def __init__(self): """ Attributes: swaggerTypes (dict): The key is attribute name and the value is attribute type. attributeMap (dict): The key is attribute name and the value is json key in definition. """ self.swaggerTypes = { 'EndColumn': 'int', 'EndRow': 'int', 'StartColumn': 'int', 'StartRow': 'int', 'link': 'Link' } self.attributeMap = { 'EndColumn': 'EndColumn','EndRow': 'EndRow','StartColumn': 'StartColumn','StartRow': 'StartRow','link': 'link'} self.EndColumn = None # int self.EndRow = None # int self.StartColumn = None # int self.StartRow = None # int self.link = None # Link
aspose-cells/Aspose.Cells-for-Cloud
SDKs/Aspose.Cells-Cloud-SDK-for-Python/asposecellscloud/models/MergedCell.py
Python
mit
967
# # e32calendar.py # # Copyright (c) 2006-2009 Nokia Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import _calendar def revdict(d): return dict([(d[k],k) for k in d.keys()]) # maps replicationmap={"open":_calendar.rep_open, "private":_calendar.rep_private, "restricted":_calendar.rep_restricted} _replicationreversemap=revdict(replicationmap) entrytypemap={"appointment":_calendar.entry_type_appt, "event":_calendar.entry_type_event, "anniversary":_calendar.entry_type_anniv, "todo":_calendar.entry_type_todo, "reminder":_calendar.entry_type_reminder} _entrytypereversemap=revdict(entrytypemap) # Calendar database class class CalendarDb(object): def __init__(self,dbfile=None,mode=None): if dbfile is None: self._db=_calendar.open() else: dbfile = unicode(dbfile) if len(dbfile)==0: raise RuntimeError, "invalid filename" if len(dbfile)<2 or dbfile[1]!=':': dbfile = u"c:" + dbfile if len(dbfile)==2: raise RuntimeError, "invalid filename" if mode is None: self._db=_calendar.open(dbfile) else: self._db=_calendar.open(dbfile,mode) def __iter__(self): entry_ids=list() for id in self._db: entry_ids.append(id) return iter(entry_ids) def __len__(self): return self._db.entry_count() def __delitem__(self,key): self._db.delete_entry(key) def __getitem__(self,key): _entry = self._db.get_entry(key) if _entry.type()==_calendar.entry_type_appt: return CalendarDb.AppointmentEntry(_entry,self) elif _entry.type()==_calendar.entry_type_event: return CalendarDb.EventEntry(_entry,self) elif _entry.type()==_calendar.entry_type_anniv: return CalendarDb.AnniversaryEntry(_entry,self) elif _entry.type()==_calendar.entry_type_reminder: return CalendarDb.ReminderEntry(_entry,self) elif _entry.type()==_calendar.entry_type_todo: return CalendarDb.TodoEntry(_entry,self) def add_appointment(self): return CalendarDb.AppointmentEntry(self._db.add_entry(_calendar.entry_type_appt),self,locked='as_new_entry') def add_event(self): return CalendarDb.EventEntry(self._db.add_entry(_calendar.entry_type_event),self,locked='as_new_entry') def add_anniversary(self): return CalendarDb.AnniversaryEntry(self._db.add_entry(_calendar.entry_type_anniv),self,locked='as_new_entry') def add_reminder(self): return CalendarDb.ReminderEntry(self._db.add_entry(_calendar.entry_type_reminder),self,locked='as_new_entry') def add_todo(self): return CalendarDb.TodoEntry(self._db.add_entry(_calendar.entry_type_todo),self,locked='as_new_entry') def _create_filter(self,appointments,events,anniversaries,todos,reminders): filter=0 if appointments: filter|=_calendar.appts_inc_filter if events: filter|=_calendar.events_inc_filter if anniversaries: filter|=_calendar.annivs_inc_filter if todos: filter|=_calendar.todos_inc_filter if reminders: filter|=_calendar.reminders_inc_filter return filter def monthly_instances(self,month,appointments=0,events=0,anniversaries=0,todos=0,reminders=0): return self._db.monthly_instances(month,self._create_filter(appointments,events,anniversaries,todos,reminders)) def daily_instances(self,day,appointments=0,events=0,anniversaries=0,todos=0,reminders=0): return self._db.daily_instances(day,self._create_filter(appointments,events,anniversaries,todos,reminders)) def find_instances(self,start_date,end_date,search_string=u'',appointments=0,events=0,anniversaries=0,todos=0,reminders=0): return self._db.find_instances(start_date,end_date,unicode(search_string),self._create_filter(appointments,events,anniversaries,todos,reminders)) def export_vcalendars(self,entry_ids): return self._db.export_vcals(entry_ids) def import_vcalendars(self,vcalendar_string): return list(self._db.import_vcals(vcalendar_string)) def compact(self): raise RuntimeError, "compacting no more supported" def add_todo_list(self,name=None): raise RuntimeError, "todo lists no more supported" # Entry class class Entry(object): # PRIVATE functions def __init__(self,_entry,db,locked=0): self._entry=_entry self._db=db self._locked=locked self._available=1 def __del__(self): if self._locked: import warnings warnings.warn("entry still locked in destructor", RuntimeWarning) def _set_content(self,content): self._fetch_entry() self._entry.set_content(unicode(content)) self._autocommit() def _content(self): return self._entry.content() def _set_description(self,description): self._fetch_entry() self._entry.set_description(unicode(description)) self._autocommit() def _description(self): return self._entry.description() def _get_type(self): return _entrytypereversemap[self._entry.type()] def _unique_id(self): return self._entry.unique_id() def _set_location(self,location): self._fetch_entry() self._entry.set_location(unicode(location)) self._autocommit() def _location(self): return self._entry.location() def _last_modified(self): return self._entry.last_modified() def _set_priority(self,priority): self._fetch_entry() self._entry.set_priority(priority) self._autocommit() def _priority(self): return self._entry.priority() def _set_alarm(self,alarm_datetime): self._fetch_entry() if alarm_datetime is None: self._entry.cancel_alarm() else: self._entry.set_alarm(alarm_datetime) self._autocommit() def _get_alarm(self): return self._entry.alarm_datetime() def _set_replication(self, status): self._fetch_entry() self._entry.set_replication(replicationmap[status]) self._autocommit() def _replication(self): if _replicationreversemap.has_key(self._entry.replication()): return _replicationreversemap[self._entry.replication()] return "unknown" def _cross_out(self,value): self._fetch_entry() import time if value: self._entry.set_crossed_out(1,time.time()) else: self._entry.set_crossed_out(0,time.time()) self._autocommit() def _is_crossed_out(self): if self._entry.crossed_out_date() is None: return 0 else: return 1 def _start_datetime(self): return self._entry.start_datetime() def _end_datetime(self): return self._entry.end_datetime() def _originating_entry(self): return self._entry.originating_entry() def _autocommit(self): if not self._locked: self._entry.commit() self._available=0 #A workarround caused by platform bug: #1665914 modifying calendar event with long event content field fails. #This function will reload entry . Once the bug is fixed, function _fetch_entry(self) and it's calls should #be deleted. The same goes for flag self._available. def _fetch_entry(self): if self._available==0 and self._locked==0: self._entry=self._db[self.id]._entry # PUBLIC functions def begin(self): self._fetch_entry() if self._locked: raise RuntimeError('entry already open') self._locked=1 def commit(self): if not self._locked: raise RuntimeError('entry not open') self._entry.commit() self._locked=0 def rollback(self): if not self._locked: raise RuntimeError('entry not open') if self._locked == 'as_new_entry': # clear the content of new uncommited entry by creating a new _entry. self._entry=self._db._db.add_entry(self._entry.type()) else: # clear the content of old committed entry by fetching the last committed data from the database. self._entry=self._db._db.get_entry(self._entry.unique_id()) self._locked=0 def as_vcalendar(self): return self._db.export_vcalendars((self.id,)) def set_repeat(self,repeat): self._fetch_entry() if not repeat: repeat={"type":"no_repeat"} self._entry.set_repeat_data(repeat) self._autocommit() def get_repeat(self): repeat=self._entry.repeat_data() if repeat["type"]=="no_repeat": return None return self._entry.repeat_data() def set_time(self,start=None,end=None): self._fetch_entry() if start is None: start=end if end is None: end=start if end is None and start is None: if self._entry.type()==_calendar.entry_type_todo: self._entry.make_undated() # TODO: THIS DOES NOTHING??? return None else: raise RuntimeError,"only todos can be made undated" self._entry.set_start_and_end_datetime(start,end) self._autocommit() # PROPERTIES content=property(_content,_set_content) description=property(_description,_set_description) type=property(_get_type) location=property(_location,_set_location) last_modified=property(_last_modified) priority=property(_priority,_set_priority) id=property(_unique_id) crossed_out=property(_is_crossed_out,_cross_out) alarm=property(_get_alarm,_set_alarm) replication=property(_replication,_set_replication) end_time=property(_end_datetime) originating=property(_originating_entry) start_time=property(_start_datetime) # AppointmentEntry class class AppointmentEntry(Entry): def __init__(self,_entry,db,locked=0): CalendarDb.Entry.__init__(self,_entry,db,locked) def __str__(self): return '<AppointmentEntry #%d: "%s">'%(self.id,self.content) # EventEntry class class EventEntry(Entry): def __init__(self,_entry,db,locked=0): CalendarDb.Entry.__init__(self,_entry,db,locked) def __str__(self): return '<EventEntry #%d: "%s">'%(self.id,self.content) # AnniversaryEntry class class AnniversaryEntry(Entry): def __init__(self,_entry,db,locked=0): CalendarDb.Entry.__init__(self,_entry,db,locked) def __str__(self): return '<AnniversaryEntry #%d: "%s">'%(self.id,self.content) # ReminderEntry class class ReminderEntry(Entry): def __init__(self,_entry,db,locked=0): CalendarDb.Entry.__init__(self,_entry,db,locked) def __str__(self): return '<ReminderEntry #%d: "%s">'%(self.id,self.content) # TodoEntry class class TodoEntry(Entry): def __init__(self,_entry,db,locked=0): CalendarDb.Entry.__init__(self,_entry,db,locked) def __str__(self): return '<TodoEntry #%d: "%s">'%(self.id,self.content) def _get_cross_out_time(self): return self._entry.crossed_out_date() def _set_cross_out_time(self,cross_out_datetime): if cross_out_datetime==0: raise ValueError, "illegal datetime value" self._entry.set_crossed_out(1,cross_out_datetime) self._autocommit() cross_out_time=property(_get_cross_out_time,_set_cross_out_time) def _set_todo_list(self,list_id): raise RuntimeError, "todo lists no more supported" def _todo_list_id(self): raise RuntimeError, "todo lists no more supported" todo_list=property(_todo_list_id,_set_todo_list) # Todo list handling def _todo_lists(self): raise RuntimeError, "todo lists no more supported" todo_lists=property(_todo_lists) # Module methods def open(dbfile=None,mode=None): return CalendarDb(dbfile,mode)
pymo/pymo
symbian/PythonForS60_1.9.6/module-repo/dev-modules/e32calendar/e32calendar.py
Python
mit
13,582
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys sys.path.append('..') from tamil.txt2unicode import auto2unicode, tscii2unicode BUF_SIZE = 100 infile = 'sample_encode_documents/tscii.sample1.txt' outfile = 'sample_encode_documents/tscii.sample1.unicode.txt' inf = open(infile) outf = open(outfile, 'w') def converte2unicode(lines): for line in lines: uni = tscii2unicode(line) outf.write(uni) # end of def converte2unicode(lines, outf): tmp_lines = inf.readlines(BUF_SIZE) while tmp_lines: # converrt 100 lines at a time converte2unicode(tmp_lines) tmp_lines = inf.readlines(BUF_SIZE) # end of while tmp_lines: inf.close() outf.close() print("converted unicode stored in file", outfile)
atvKumar/open-tamil
examples/txt2unicode/demo_bigfiles_auto2unicode.py
Python
mit
738
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals, print_function import warnings import os import numpy as np from monty.serialization import loadfn from pymatgen.analysis.bond_valence import BVAnalyzer from pymatgen.analysis.structure_matcher import StructureMatcher from pymatgen.core import Structure MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) bond_params = loadfn(os.path.join(MODULE_DIR, 'DLS_bond_params.yaml')) def is_ox(structure): comp = structure.composition for k in comp.keys(): try: k.oxi_state except AttributeError: return False return True class RLSVolumePredictor: """ Reference lattice scaling (RLS) scheme that predicts the volume of a structure based on a known crystal structure. """ def __init__(self, check_isostructural=True, radii_type="ionic-atomic", use_bv=True): """ Args: check_isostructural: Whether to test that the two structures are isostructural. This algo works best for isostructural compounds. Defaults to True. radii_type (str): Types of radii to use. You can specify "ionic" (only uses ionic radii), "atomic" (only uses atomic radii) or "ionic-atomic" (uses either ionic or atomic radii, with a preference for ionic where possible). use_bv (bool): Whether to use BVAnalyzer to determine oxidation states if not present. """ self.check_isostructural = check_isostructural self.radii_type = radii_type self.use_bv = use_bv def predict(self, structure, ref_structure): """ Given a structure, returns the predicted volume. Args: structure (Structure): structure w/unknown volume ref_structure (Structure): A reference structure with a similar structure but different species. Returns: a float value of the predicted volume """ if self.check_isostructural: m = StructureMatcher() mapping = m.get_best_electronegativity_anonymous_mapping( structure, ref_structure) if mapping is None: raise ValueError("Input structures do not match!") if "ionic" in self.radii_type: try: # Use BV analyzer to determine oxidation states only if the # oxidation states are not already specified in the structure # and use_bv is true. if (not is_ox(structure)) and self.use_bv: a = BVAnalyzer() structure = a.get_oxi_state_decorated_structure(structure) if (not is_ox(ref_structure)) and self.use_bv: a = BVAnalyzer() ref_structure = a.get_oxi_state_decorated_structure( ref_structure) comp = structure.composition ref_comp = ref_structure.composition # Check if all the associated ionic radii are available. if any([k.ionic_radius is None for k in list(comp.keys())]) or \ any([k.ionic_radius is None for k in list(ref_comp.keys())]): raise ValueError("Not all the ionic radii are available!") numerator = 0 denominator = 0 # Here, the 1/3 factor on the composition accounts for atomic # packing. We want the number per unit length. for k, v in comp.items(): numerator += k.ionic_radius * v ** (1 / 3) for k, v in ref_comp.items(): denominator += k.ionic_radius * v ** (1 / 3) return ref_structure.volume * (numerator / denominator) ** 3 except Exception as ex: warnings.warn("Exception occured. Will attempt atomic radii.") # If error occurs during use of ionic radii scheme, pass # and see if we can resolve it using atomic radii. pass if "atomic" in self.radii_type: comp = structure.composition ref_comp = ref_structure.composition # Here, the 1/3 factor on the composition accounts for atomic # packing. We want the number per unit length. numerator = 0 denominator = 0 for k, v in comp.items(): numerator += k.atomic_radius * v ** (1 / 3) for k, v in ref_comp.items(): denominator += k.atomic_radius * v ** (1 / 3) return ref_structure.volume * (numerator / denominator) ** 3 raise ValueError("Cannot find volume scaling based on radii choices " "specified!") def get_predicted_structure(self, structure, ref_structure): """ Given a structure, returns back the structure scaled to predicted volume. Args: structure (Structure): structure w/unknown volume ref_structure (Structure): A reference structure with a similar structure but different species. Returns: a Structure object with predicted volume """ new_structure = structure.copy() new_structure.scale_lattice(self.predict(structure, ref_structure)) return new_structure class DLSVolumePredictor: """ Data-mined lattice scaling (DLS) scheme that relies on data-mined bond lengths to predict the crystal volume of a given structure. """ def __init__(self, cutoff=4.0): """ Args: cutoff (float): cutoff radius added to site radius for finding site pairs. Necessary to increase only if your initial structure guess is extremely bad (atoms way too far apart). In all other instances, increasing cutoff gives same answer but takes more time. """ self.cutoff = cutoff def predict(self, structure, icsd_vol=False): """ Given a structure, returns the predicted volume. Args: structure (Structure) : a crystal structure with an unknown volume. icsd_vol (bool) : True if the input structure's volume comes from ICSD. Returns: a float value of the predicted volume. """ # Get standard deviation of electronnegativity in the structure. std_x = np.std([site.specie.X for site in structure]) # Sites that have atomic radii sub_sites = [] # Record the "DLS estimated radius" from bond_params. bp_dict = {} for sp in list(structure.composition.keys()): if sp.atomic_radius: sub_sites.extend([site for site in structure if site.specie == sp]) else: warnings.warn("VolumePredictor: no atomic radius data for " "{}".format(sp)) if sp.symbol not in bond_params: warnings.warn("VolumePredictor: bond parameters not found, " "used atomic radii for {}".format(sp)) else: r, k = bond_params[sp.symbol]["r"], bond_params[sp.symbol]["k"] bp_dict[sp] = float(r) + float(k) * std_x # Structure object that include only sites with known atomic radii. reduced_structure = Structure.from_sites(sub_sites) smallest_ratio = None for site1 in reduced_structure: sp1 = site1.specie neighbors = reduced_structure.get_neighbors(site1, sp1.atomic_radius + self.cutoff) for site2, dist in neighbors: sp2 = site2.specie if sp1 in bp_dict and sp2 in bp_dict: expected_dist = bp_dict[sp1] + bp_dict[sp2] else: expected_dist = sp1.atomic_radius + sp2.atomic_radius if not smallest_ratio or dist / expected_dist < smallest_ratio: smallest_ratio = dist / expected_dist if not smallest_ratio: raise ValueError("Could not find any bonds within the given cutoff " "in this structure.") volume_factor = (1 / smallest_ratio) ** 3 # icsd volume fudge factor if icsd_vol: volume_factor *= 1.05 return structure.volume * volume_factor def get_predicted_structure(self, structure, icsd_vol=False): """ Given a structure, returns back the structure scaled to predicted volume. Args: structure (Structure): structure w/unknown volume Returns: a Structure object with predicted volume """ new_structure = structure.copy() new_structure.scale_lattice(self.predict(structure, icsd_vol=icsd_vol)) return new_structure
czhengsci/pymatgen
pymatgen/analysis/structure_prediction/volume_predictor.py
Python
mit
9,318
# # Copyright (c) 2012,2013 Big Switch Networks, Inc. # # Licensed under the Eclipse Public License, Version 1.0 (the # "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.eclipse.org/legal/epl-v10.html # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # import re import utif # # VNSW -- any functions used to support vns # def init_vnsw(bs, modi): global sdnsh, mi sdnsh = bs mi = modi MAC_RE = re.compile(r'^(([A-Fa-f\d]){2}:?){5}[A-Fa-f\d]{2}$') DIGITS_RE = re.compile(r'^\d+$') # # -------------------------------------------------------------------------------- def create_default_vns(): """ In some situations, for example, when a blank cassandra db/store is having a running-config pushed. """ (error, created) = sdnsh.find_or_create_row('vns-definition', 'default') if error: return False return True # # -------------------------------------------------------------------------------- def associate_foreign_key_for_vns_interface(create_dict): """ Association of the foreign key 'rule' for vns-interfaces requires some substantial work, along with additional validation """ # determine the best candidate for the current rule if not 'interface' in create_dict or not 'vns' in create_dict: return create_dict rule = None items = create_dict['interface'].split('/') mac = None port = None if len(items) == 1: if items[0].startswith('Eth'): rule = items[0][3:] elif items[0].startswith('VEth'): rule = items[0][4:] else: rule = items[0] elif len(items) == 2 and MAC_RE.match(items[1]): mac = items[1] if items[0].startswith('VEth'): rule = items[0][4:] else: rule = items[0] elif len(items) == 2 and DIGITS_RE.match(items[1]): port = items[1] if items[0].startswith('Eth'): rule = items[0][3:] else: rule = items[0] else: # Assume this is some abstract port name port = items[1] rule = items[0] if not rule: return create_dict if rule and rule == 'default': if not create_default_vns(): return # try to look up the particular rule rule_id = sdnsh.unique_key_from_non_unique([create_dict['vns'], rule]) if_obj_type = 'vns-interface-rule' if_key = mi.pk(if_obj_type) try: row = sdnsh.get_table_from_store(if_obj_type, if_key, rule_id) errors = None except Exception, e: errors = sdnsh.rest_error_to_dict(e) print sdnsh.rest_error_dict_to_message(errors) if errors: return create_dict # # validate the rule referenced matches the expected # 'id' created here. # if len(row) == 0: return create_dict if_rule = row[0] # # Perform various validations to ensure the created interface # would makes sense to sdnplatform rule_is_number = False if DIGITS_RE.match(if_rule['id'].split('|')[1]): rule_is_number = True if mac and 'mac' in if_rule and mac != if_rule['mac']: sdnsh.warning('mac %s doesn\'t match mac for interface rule %s' % (mac, if_rule['mac'])) return create_dict # # If the rule is a mac rule, and the associated rule is a number, # make sure the 'Eth' vs 'VEth' lines up with the kind of rule # if ('mac' or 'ip-subnet' in if_rule or 'tags' in if_rule) and rule_is_number: if not items[0].startswith('VEth'): sdnsh.warning('interface name %s ought to start with VEth' % items[0]) return create_dict if ('switch' in if_rule or 'vlans' in if_rule) and rule_is_number: if not items[0].startswith('Eth'): sdnsh.warning('interface name %s must ought to start with Eth' % items[0]) return create_dict # # if ('switch' in if_rule and 'ports' in if_rule) and port: if not sdnsh.switch_port_match(port, if_rule['ports']): sdnsh.warning('port name %s does not match interface-rule ports %s' % (port, if_rule['ports'])) return create_dict # # associate the rule_id with the interface create_dict['rule'] = rule_id return create_dict # # -------------------------------------------------------------------------------- def port_ntoa(op, port): """ Pass in the op and the port number, and return a string for the pair, Both parameters are strings. (note the leading space) """ if not op in ['eq', 'neq']: return '' return '%s %s ' % (op, port) # # -------------------------------------------------------------------------------- def vns_acl_entry_to_text(acl): """ Return a short string for a specific acl entry. Used for both short acl display formats (vns-access-list-entry within a vns subconfig mode), and for show running config """ if acl['type'] in ['tcp', 'udp']: if not acl.get('src-ip') or not acl.get('src-ip-mask'): return '[broken src ip or mask (a) ]' if not acl.get('dst-ip') or not acl.get('dst-ip-mask'): return '[broken src ip or mask (b) ]' return "%s%s%s%s" % (utif.ip_and_neg_mask(acl['src-ip'], acl['src-ip-mask']), port_ntoa(acl.get('src-tp-port-op', ''), acl.get('src-tp-port', '')), utif.ip_and_neg_mask(acl['dst-ip'], acl['dst-ip-mask']), port_ntoa(acl.get('dst-tp-port-op', ''), acl.get('dst-tp-port', ''))) elif acl['type'] == 'ip' or DIGITS_RE.match(acl['type']): if not acl.get('src-ip') or not acl.get('src-ip-mask'): return '[broken src ip or mask (c)]' if not acl.get('dst-ip') or not acl.get('dst-ip-mask'): return '[broken src ip or mask (d)]' return "%s%s" % (utif.ip_and_neg_mask(acl['src-ip'], acl['src-ip-mask']), utif.ip_and_neg_mask(acl['dst-ip'], acl['dst-ip-mask'])) elif acl['type'] == 'icmp': if not acl.get('src-ip') or not acl.get('src-ip-mask'): return '[broken src ip or mask (e)]' if not acl.get('dst-ip') or not acl.get('dst-ip-mask'): return '[broken src ip or mask (f)]' return "%s%s%s" % (utif.ip_and_neg_mask(acl['src-ip'], acl['src-ip-mask']), utif.ip_and_neg_mask(acl['dst-ip'], acl['dst-ip-mask']), acl.get('icmp-type', "")) elif acl['type'] == 'mac': if 'vlan' in acl and acl['vlan'] != None and acl['vlan'] != '': if 'ether-type' in acl and\ acl['ether-type'] != None and acl['ether-type'] != '': return "%s %s %s vlan %s" % (acl.get('src-mac', 'any'), acl.get('dst-mac', 'any'), acl.get('ether-type'), acl['vlan']) else: return "%s %s vlan %s" % (acl.get('src-mac', 'any'), acl.get('dst-mac', 'any'), acl['vlan']) else: return "%s %s %s" % (acl.get('src-mac', 'any'), acl.get('dst-mac', 'any'), acl.get('ether-type', '')) else: return '[unrecognized acl format]' # # -------------------------------------------------------------------------------- def vns_acl_entries_to_brief(entries): for acl in entries: acl['acl-text'] = vns_acl_entry_to_text(acl)
mandeepdhami/netvirt-ctrl
cli/vnsw.py
Python
epl-1.0
8,594
#!/usr/bin/env python from distutils.core import setup from distutils.extension import Extension from distutils.sysconfig import get_config_vars from build_config import * if local_compiler is not None: # Kludge: Force compiler of choice for building _rho_j_k.c. # _rho_j_k.c only contains a plain c-function with no python- # dependencies at all. Hence, just blatantly set simple # compiler, linker and flags. # (inspired by GPAW setup.py) config_vars = get_config_vars() for key in ['BASECFLAGS', 'CFLAGS', 'OPT', 'PY_CFLAGS', 'CCSHARED', 'CFLAGSFORSHARED', 'LINKFORSHARED', 'LIBS', 'SHLIBS']: config_vars[key] = '' config_vars['CC'] = local_compiler config_vars['LDSHARED'] = ' '.join([local_linker] + local_link_shared) rho_j_k_d_ext = Extension('dsf._rho_j_k_d', sources=['src/_rho_j_k.c'], define_macros=[('RHOPREC', 'double')], extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, ) rho_j_k_s_ext = Extension('dsf._rho_j_k_s', sources=['src/_rho_j_k.c'], define_macros=[('RHOPREC', 'float')], extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, ) setup(name = 'python-dynsf', version = '0.2', description = 'Tool for calculating the dynamical structure factor', author = 'Mattias Slabanja', author_email = 'slabanja@chalmers.se', packages = ['dsf'], ext_modules = [rho_j_k_d_ext, rho_j_k_s_ext], scripts = ['dynsf'], data_files = [('share/man/man1', ['dynsf.1'])], requires = ['numpy'], license = "GPL2+", classifiers = ['Development Status :: 3 - Alpha', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Programming Language :: Python', 'Programming Language :: C', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic :: Scientific/Engineering :: Physics' ] )
slabanja/dynsf
setup.py
Python
gpl-2.0
2,461
# Copyright 2018 Christoph Reiter # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. from .gnome import GnomeSessionClient class XfceSessionClient(GnomeSessionClient): DBUS_NAME = 'org.xfce.SessionManager' DBUS_OBJECT_PATH = '/org/xfce/SessionManager' DBUS_MAIN_INTERFACE = 'org.xfce.Session.Manager' DBUS_CLIENT_INTERFACE = 'org.xfce.Session.Client'
ptitjes/quodlibet
quodlibet/session/xfce.py
Python
gpl-2.0
570
import os import re import logging from tempfile import mktemp from autotest.client.shared import error from virttest import virsh from virttest.libvirt_xml.nodedev_xml import NodedevXML from provider import libvirt_version _FC_HOST_PATH = "/sys/class/fc_host" def check_nodedev(dev_name, dev_parent=None): """ Check node device relevant values :params dev_name: name of the device :params dev_parent: parent name of the device, None is default """ host = dev_name.split("_")[1] fc_host_path = os.path.join(_FC_HOST_PATH, host) # Check if the /sys/class/fc_host/host$NUM exists if not os.access(fc_host_path, os.R_OK): logging.debug("Can't access %s", fc_host_path) return False dev_xml = NodedevXML.new_from_dumpxml(dev_name) if not dev_xml: logging.error("Can't dumpxml %s XML", dev_name) return False # Check device parent name if dev_parent != dev_xml.parent: logging.error("The parent name is different: %s is not %s", dev_parent, dev_xml.parent) return False wwnn_from_xml = dev_xml.wwnn wwpn_from_xml = dev_xml.wwpn fabric_wwn_from_xml = dev_xml.fabric_wwn fc_dict = {} name_list = ["node_name", "port_name", "fabric_name"] for name in name_list: fc_file = os.path.join(fc_host_path, name) fc_dict[name] = open(fc_file, "r").read().strip().split("0x")[1] # Check wwnn, wwpn and fabric_wwn if len(wwnn_from_xml) != 16 or \ len(wwpn_from_xml) != 16 or \ fc_dict["node_name"] != wwnn_from_xml or \ fc_dict["port_name"] != wwpn_from_xml or \ fc_dict["fabric_name"] != fabric_wwn_from_xml: logging.debug("The fc_dict is: %s", fc_dict) return False fc_type_from_xml = dev_xml.fc_type cap_type_from_xml = dev_xml.cap_type # Check capability type if cap_type_from_xml != "scsi_host" or fc_type_from_xml != "fc_host": logging.debug("The capability type isn't 'scsi_host' or 'fc_host'") return False return True def create_nodedev_from_xml(params): """ Create a device defined by an XML file on the node :params: the parameter dictionary """ scsi_host = params.get("nodedev_scsi_host") options = params.get("nodedev_options") status_error = params.get("status_error", "no") # libvirt acl polkit related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = 'testacl' vhba_xml = """ <device> <parent>%s</parent> <capability type='scsi_host'> <capability type='fc_host'> </capability> </capability> </device> """ % scsi_host logging.debug("Prepare the nodedev XML: %s", vhba_xml) vhba_file = mktemp() xml_object = open(vhba_file, 'w') xml_object.write(vhba_xml) xml_object.close() result = virsh.nodedev_create(vhba_file, options, uri=uri, debug=True, unprivileged_user=unprivileged_user) status = result.exit_status # Remove temprorary file os.unlink(vhba_file) # Check status_error if status_error == "yes": if status: logging.info("It's an expected %s", result.stderr) else: raise error.TestFail("%d not a expected command " "return value", status) elif status_error == "no": if status: raise error.TestFail(result.stderr) else: output = result.stdout logging.info(output) for scsi in output.split(): if scsi.startswith('scsi_host'): # Check node device if check_nodedev(scsi, scsi_host): return scsi else: raise error.TestFail("Can't find %s" % scsi) def destroy_nodedev(params): """ Destroy (stop) a device on the node :params: the parameter dictionary """ dev_name = params.get("nodedev_new_dev") options = params.get("nodedev_options") status_error = params.get("status_error", "no") # libvirt acl polkit related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = 'testacl' result = virsh.nodedev_destroy(dev_name, options, uri=uri, debug=True, unprivileged_user=unprivileged_user) status = result.exit_status # Check status_error if status_error == "yes": if status: logging.info("It's an expected %s", result.stderr) else: raise error.TestFail("%d not a expected command " "return value", status) elif status_error == "no": if status: raise error.TestFail(result.stderr) else: # Check nodedev value if not check_nodedev(dev_name): logging.info(result.stdout) else: raise error.TestFail("The relevant directory still exists" "or mismatch with result") def find_devices_by_cap(cap_type="scsi_host"): """ Find device by capability :params cap_type: capability type """ result = virsh.nodedev_list(cap=cap_type) if result.exit_status: raise error.TestFail(result.stderr) scsi_hosts = result.stdout.strip().splitlines() return scsi_hosts def check_vport_ops_cap(scsi_hosts): """ Check vport operation capability :params scsi_hosts: list of the scsi_host """ vport_ops_list = [] for scsi_host in scsi_hosts: result = virsh.nodedev_dumpxml(scsi_host) if result.exit_status: raise error.TestFail(result.stderr) if re.search('vport_ops', result.stdout.strip()): vport_ops_list.append(scsi_host) logging.debug("The vport_ops list: %s", vport_ops_list) return vport_ops_list def check_port_connectivity(vport_ops_list): """ Check port connectivity :params vport_ops_list: list of the vport operation """ port_state_dict = {} port_linkup = [] port_linkdown = [] fc_path = "/sys/class/fc_host" for scsi_host in vport_ops_list: port_state = scsi_host.split('_')[1] + "/port_state" port_state_file = os.path.join(fc_path, port_state) logging.debug("The port_state file: %s", port_state_file) state = open(port_state_file).read().strip() logging.debug("The port state: %s", state) if state == "Online" or state == "Linkup": port_linkup.append(scsi_host) if state == "Offline" or state == "Linkdown": port_linkdown.append(scsi_host) port_state_dict["online"] = port_linkup port_state_dict["offline"] = port_linkdown return port_state_dict def run(test, params, env): """ Test create/destroy node device 1) Positive testing 1.1) create node device from XML file 1.2) destroy node device 2) Negative testing 2.1) create node device with noexist name of the parent HBA 2.2) create node device with offline port 2.3) create node device with invalid option 2.4) destroy noexist node device 2.5) destroy node device with invalid option 2.6) destroy node device without capable of vport operations """ # Run test case options = params.get("nodedev_options") dev_name = params.get("nodedev_dev_name") status_error = params.get("status_error", "no") no_vport_ops = params.get("nodedev_no_vport_ops", "no") port_state = params.get("nodedev_port_state", "offline") create_device = params.get("nodedev_create_device", "no") if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") # Find available HBAs scsi_hosts = find_devices_by_cap() # Find available vHBA vport_ops_list = check_vport_ops_cap(scsi_hosts) # No HBA or no vHBA supporting if not vport_ops_list: raise error.TestNAError("No HBAs to support vHBA on the host!") # Check ports connectivity port_state_dict = check_port_connectivity(vport_ops_list) # Get ports list of the online and offline port_online_list = port_state_dict["online"] port_offline_list = port_state_dict["offline"] # No online port is available if not port_online_list: raise error.TestNAError("No port is active!") if dev_name: # Negative testing for creating device params["nodedev_scsi_host"] = dev_name # Negative testing for destroying device params["nodedev_new_dev"] = dev_name elif port_state == "online" or options: # Pick up one online port for positive testing params["nodedev_scsi_host"] = port_online_list[0] # Negative testing with invalid option params["nodedev_new_dev"] = port_online_list[0] elif no_vport_ops == "yes": # Negative testing for not capable of vport operations if port_offline_list: params["nodedev_new_dev"] = port_offline_list[0] else: # Pick up one offline port for negative testing if port_offline_list: params["nodedev_scsi_host"] = port_offline_list[0] # positive and negative testing ######### if status_error == "no": try: # Create device from XML params["nodedev_new_dev"] = create_nodedev_from_xml(params) # Destroy the device destroy_nodedev(params) except error.TestFail, detail: raise error.TestFail("Failed to create/destroy node device.\n" "Detail: %s." % detail) if status_error == "yes": if create_device == "yes": # Create device from XML create_nodedev_from_xml(params) if create_device == "no": # Destroy the device destroy_nodedev(params)
waynesun09/tp-libvirt
libvirt/tests/src/virsh_cmd/nodedev/virsh_nodedev_create_destroy.py
Python
gpl-2.0
10,403
# Copyright (c) 2005 VMware, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Python translation from wrapLabel.{cc|h} by Gian Mario Tagliaretti import gtk import gobject import pango class WrapLabel(gtk.Label): __gtype_name__ = 'WrapLabel' def __init__(self, str=None): gtk.Label.__init__(self) self.__wrap_width = 0 self.layout = self.get_layout() self.layout.set_wrap(pango.WRAP_WORD_CHAR) if str != None: self.set_text(str) self.set_alignment(0.0, 0.0) def do_size_request(self, requisition): layout = self.get_layout() width, height = layout.get_pixel_size() requisition.width = 0 requisition.height = height def do_size_allocate(self, allocation): gtk.Label.do_size_allocate(self, allocation) self.__set_wrap_width(allocation.width) def set_text(self, str): gtk.Label.set_text(self, str) self.__set_wrap_width(self.__wrap_width) def set_markup(self, str): gtk.Label.set_markup(self, str) self.__set_wrap_width(self.__wrap_width) def __set_wrap_width(self, width): if width == 0: return layout = self.get_layout() layout.set_width(width * pango.SCALE) if self.__wrap_width != width: self.__wrap_width = width self.queue_resize()
kaiw/meld
meld/ui/wraplabel.py
Python
gpl-2.0
2,398
# SETS up everything you need for tweepy. Just edit 4 lines below to match your api. import tweepy consumer_key = 'Yours' consumer_secret = 'Yours' access_key = 'Yours' access_secret = 'Yours' #authorize twitter, initialize tweepy auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_key, access_secret) api = tweepy.API(auth)
junwoo091400/MyCODES
small_Projects(memory)/Twitter/Screen_name_to_CSV/credentials.py
Python
gpl-3.0
363
# Copyright (C) 2017,2018 # Max Planck Institute for Polymer Research # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ******************************** espressopp.analysis.SubregionTracking ******************************** Class to compute the number of (coarse-grained) particles that belong to a specified particle list and that reside in a specified subregion of the simulation box (when specifying a list of particles that reside in a certain subregion at the beginning of the simulation, the routine can be used, for example, to track how many of these particles still stay in the same region after some simulation time). Examples: >>> subregiontracking_instance = espressopp.analysis.SubregionTracking(system, span=0.75, geometry=1, pidlist=tracklist, center=[Lx/2, Ly/2, Lz/2]) >>> # creates instance of the class for calculating number of particles that belong to particle id list tracklist and reside in a subregion which is centered in the simulation box and bounded within +-0.75 in x-direction from the center >>> number_of_particles = subregiontracking_instance.compute() >>> # computes the number of particles belonging to specified particle id list in specified subregion of the simulation box .. function:: espressopp.analysis.SubregionTracking(self, system, span, geometry, center, pidlist) Constructs the SubregionTracking object. :param system: system object :param span: radius of the subregion to be considered :param geometry: geometry of the subregion. Can only be in ['spherical', 'bounded-x', 'bounded-y', 'bounded-z'] :param center: center of the subregion :param pidlist: list of particle ids of coarse-grained particles that are counted in the specified subregion :type system: shared_ptr<System> :type span: real :type geometry: str in ['spherical', 'bounded-x', 'bounded-y', 'bounded-z'] :type center: list of 3 reals (x,y,z coordinates of center) :type pidlist: list of ints .. function:: espressopp.analysis.SubregionTracking.compute(): Calculates the number of particles that are present in specified subregion and that belong to specified particle id list. :rtype: real """ from espressopp.esutil import cxxinit from espressopp import pmi from espressopp.analysis.Observable import * from _espressopp import analysis_SubregionTracking class SubregionTrackingLocal(ObservableLocal, analysis_SubregionTracking): 'The (local) class for computing the number of particles that are present in a specified subregion of the system and that belong to a specified group of particles.' def __init__(self, system, span, geometry, center, pidlist): if geometry not in ['spherical', 'bounded-x', 'bounded-y', 'bounded-z']: raise ValueError('Error: Geometry must be in ["spherical", "bounded-x", "bounded-y", "bounded-z"]. Your input: {}'.format(geometry)) if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): geometrydict = {'spherical': 0, 'bounded-x': 1, 'bounded-y': 2, 'bounded-z': 3} cxxinit(self, analysis_SubregionTracking, system, span, geometrydict[geometry]) self.cxxclass.setCenter(self, center[0], center[1], center[2]) for pid in pidlist: self.cxxclass.addPID(self, pid) if pmi.isController : class SubregionTracking(Observable): __metaclass__ = pmi.Proxy pmiproxydefs = dict( cls = 'espressopp.analysis.SubregionTrackingLocal' )
kkreis/espressopp
src/analysis/SubregionTracking.py
Python
gpl-3.0
4,220
#!/usr/bin/env python # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai __license__ = 'GPL v3' __copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import itertools, operator from functools import partial from future_builtins import map from collections import OrderedDict from PyQt4.Qt import ( QTableView, Qt, QAbstractItemView, QMenu, pyqtSignal, QFont, QModelIndex, QIcon, QItemSelection, QMimeData, QDrag, QStyle, QPoint, QUrl, QHeaderView, QStyleOptionHeader) from calibre.gui2.library.delegates import (RatingDelegate, PubDateDelegate, TextDelegate, DateDelegate, CompleteDelegate, CcTextDelegate, CcBoolDelegate, CcCommentsDelegate, CcDateDelegate, CcTemplateDelegate, CcEnumDelegate, CcNumberDelegate, LanguagesDelegate) from calibre.gui2.library.models import BooksModel, DeviceBooksModel from calibre.gui2.library.alternate_views import AlternateViews, setup_dnd_interface from calibre.utils.config import tweaks, prefs from calibre.gui2 import error_dialog, gprefs, FunctionDispatcher from calibre.gui2.library import DEFAULT_SORT from calibre.constants import filesystem_encoding from calibre import force_unicode class HeaderView(QHeaderView): # {{{ def __init__(self, *args): QHeaderView.__init__(self, *args) self.hover = -1 self.current_font = QFont(self.font()) self.current_font.setBold(True) self.current_font.setItalic(True) def event(self, e): if e.type() in (e.HoverMove, e.HoverEnter): self.hover = self.logicalIndexAt(e.pos()) elif e.type() in (e.Leave, e.HoverLeave): self.hover = -1 return QHeaderView.event(self, e) def paintSection(self, painter, rect, logical_index): opt = QStyleOptionHeader() self.initStyleOption(opt) opt.rect = rect opt.section = logical_index opt.orientation = self.orientation() opt.textAlignment = Qt.AlignHCenter | Qt.AlignVCenter model = self.parent().model() opt.text = model.headerData(logical_index, opt.orientation, Qt.DisplayRole).toString() if self.isSortIndicatorShown() and self.sortIndicatorSection() == logical_index: opt.sortIndicator = QStyleOptionHeader.SortDown if self.sortIndicatorOrder() == Qt.AscendingOrder else QStyleOptionHeader.SortUp opt.text = opt.fontMetrics.elidedText(opt.text, Qt.ElideRight, rect.width() - 4) if self.isEnabled(): opt.state |= QStyle.State_Enabled if self.window().isActiveWindow(): opt.state |= QStyle.State_Active if self.hover == logical_index: opt.state |= QStyle.State_MouseOver sm = self.selectionModel() if opt.orientation == Qt.Vertical: try: opt.icon = model.headerData(logical_index, opt.orientation, Qt.DecorationRole) opt.iconAlignment = Qt.AlignVCenter except (IndexError, ValueError, TypeError): pass if sm.isRowSelected(logical_index, QModelIndex()): opt.state |= QStyle.State_Sunken painter.save() if ( (opt.orientation == Qt.Horizontal and sm.currentIndex().column() == logical_index) or (opt.orientation == Qt.Vertical and sm.currentIndex().row() == logical_index)): painter.setFont(self.current_font) self.style().drawControl(QStyle.CE_Header, opt, painter, self) painter.restore() # }}} class PreserveViewState(object): # {{{ ''' Save the set of selected books at enter time. If at exit time there are no selected books, restore the previous selection, the previous current index and dont affect the scroll position. ''' def __init__(self, view, preserve_hpos=True, preserve_vpos=True, require_selected_ids=True): self.view = view self.require_selected_ids = require_selected_ids self.preserve_hpos = preserve_hpos self.preserve_vpos = preserve_vpos self.init_vals() def init_vals(self): self.selected_ids = set() self.current_id = None self.vscroll = self.hscroll = 0 self.original_view = None def __enter__(self): self.init_vals() try: view = self.original_view = self.view.alternate_views.current_view self.selected_ids = self.view.get_selected_ids() self.current_id = self.view.current_id self.vscroll = view.verticalScrollBar().value() self.hscroll = view.horizontalScrollBar().value() except: import traceback traceback.print_exc() def __exit__(self, *args): if self.selected_ids or not self.require_selected_ids: if self.current_id is not None: self.view.current_id = self.current_id if self.selected_ids: self.view.select_rows(self.selected_ids, using_ids=True, scroll=False, change_current=self.current_id is None) view = self.original_view if self.view.alternate_views.current_view is view: if self.preserve_vpos: if hasattr(view, 'restore_vpos'): view.restore_vpos(self.vscroll) else: view.verticalScrollBar().setValue(self.vscroll) if self.preserve_hpos: if hasattr(view, 'restore_hpos'): view.restore_hpos(self.hscroll) else: view.horizontalScrollBar().setValue(self.hscroll) self.init_vals() @dynamic_property def state(self): def fget(self): self.__enter__() return {x:getattr(self, x) for x in ('selected_ids', 'current_id', 'vscroll', 'hscroll')} def fset(self, state): for k, v in state.iteritems(): setattr(self, k, v) self.__exit__() return property(fget=fget, fset=fset) # }}} @setup_dnd_interface class BooksView(QTableView): # {{{ files_dropped = pyqtSignal(object) add_column_signal = pyqtSignal() is_library_view = True def viewportEvent(self, event): if (event.type() == event.ToolTip and not gprefs['book_list_tooltips']): return False return QTableView.viewportEvent(self, event) def __init__(self, parent, modelcls=BooksModel, use_edit_metadata_dialog=True): QTableView.__init__(self, parent) self.gui = parent self.setProperty('highlight_current_item', 150) self.row_sizing_done = False self.alternate_views = AlternateViews(self) if not tweaks['horizontal_scrolling_per_column']: self.setHorizontalScrollMode(self.ScrollPerPixel) self.setEditTriggers(self.EditKeyPressed) if tweaks['doubleclick_on_library_view'] == 'edit_cell': self.setEditTriggers(self.DoubleClicked|self.editTriggers()) elif tweaks['doubleclick_on_library_view'] == 'open_viewer': self.setEditTriggers(self.SelectedClicked|self.editTriggers()) self.doubleClicked.connect(parent.iactions['View'].view_triggered) elif tweaks['doubleclick_on_library_view'] == 'edit_metadata': # Must not enable single-click to edit, or the field will remain # open in edit mode underneath the edit metadata dialog if use_edit_metadata_dialog: self.doubleClicked.connect( partial(parent.iactions['Edit Metadata'].edit_metadata, checked=False)) else: self.setEditTriggers(self.DoubleClicked|self.editTriggers()) setup_dnd_interface(self) self.setAlternatingRowColors(True) self.setShowGrid(False) self.setWordWrap(False) self.rating_delegate = RatingDelegate(self) self.timestamp_delegate = DateDelegate(self) self.pubdate_delegate = PubDateDelegate(self) self.last_modified_delegate = DateDelegate(self, tweak_name='gui_last_modified_display_format') self.languages_delegate = LanguagesDelegate(self) self.tags_delegate = CompleteDelegate(self, ',', 'all_tag_names') self.authors_delegate = CompleteDelegate(self, '&', 'all_author_names', True) self.cc_names_delegate = CompleteDelegate(self, '&', 'all_custom', True) self.series_delegate = TextDelegate(self) self.publisher_delegate = TextDelegate(self) self.text_delegate = TextDelegate(self) self.cc_text_delegate = CcTextDelegate(self) self.cc_enum_delegate = CcEnumDelegate(self) self.cc_bool_delegate = CcBoolDelegate(self) self.cc_comments_delegate = CcCommentsDelegate(self) self.cc_template_delegate = CcTemplateDelegate(self) self.cc_number_delegate = CcNumberDelegate(self) self.display_parent = parent self._model = modelcls(self) self.setModel(self._model) self._model.count_changed_signal.connect(self.do_row_sizing, type=Qt.QueuedConnection) self.setSelectionBehavior(QAbstractItemView.SelectRows) self.setSortingEnabled(True) self.selectionModel().currentRowChanged.connect(self._model.current_changed) self.preserve_state = partial(PreserveViewState, self) self.marked_changed_listener = FunctionDispatcher(self.marked_changed) # {{{ Column Header setup self.can_add_columns = True self.was_restored = False self.column_header = HeaderView(Qt.Horizontal, self) self.setHorizontalHeader(self.column_header) self.column_header.sortIndicatorChanged.disconnect() self.column_header.sortIndicatorChanged.connect(self.user_sort_requested) self.column_header.setMovable(True) self.column_header.setClickable(True) self.column_header.sectionMoved.connect(self.save_state) self.column_header.setContextMenuPolicy(Qt.CustomContextMenu) self.column_header.customContextMenuRequested.connect(self.show_column_header_context_menu) self.column_header.sectionResized.connect(self.column_resized, Qt.QueuedConnection) self.row_header = HeaderView(Qt.Vertical, self) self.row_header.setResizeMode(self.row_header.Fixed) self.setVerticalHeader(self.row_header) # }}} self._model.database_changed.connect(self.database_changed) hv = self.verticalHeader() hv.setClickable(True) hv.setCursor(Qt.PointingHandCursor) self.selected_ids = [] self._model.about_to_be_sorted.connect(self.about_to_be_sorted) self._model.sorting_done.connect(self.sorting_done, type=Qt.QueuedConnection) # Column Header Context Menu {{{ def column_header_context_handler(self, action=None, column=None): if not action or not column: return try: idx = self.column_map.index(column) except: return h = self.column_header if action == 'hide': h.setSectionHidden(idx, True) elif action == 'show': h.setSectionHidden(idx, False) if h.sectionSize(idx) < 3: sz = h.sectionSizeHint(idx) h.resizeSection(idx, sz) elif action == 'ascending': self.sort_by_column_and_order(idx, True) elif action == 'descending': self.sort_by_column_and_order(idx, False) elif action == 'defaults': self.apply_state(self.get_default_state()) elif action == 'addcustcol': self.add_column_signal.emit() elif action.startswith('align_'): alignment = action.partition('_')[-1] self._model.change_alignment(column, alignment) elif action == 'quickview': from calibre.customize.ui import find_plugin qv = find_plugin('Show Quickview') if qv: rows = self.selectionModel().selectedRows() if len(rows) > 0: current_row = rows[0].row() current_col = self.column_map.index(column) index = self.model().index(current_row, current_col) qv.actual_plugin_.change_quickview_column(index) self.save_state() def show_column_header_context_menu(self, pos): idx = self.column_header.logicalIndexAt(pos) if idx > -1 and idx < len(self.column_map): col = self.column_map[idx] name = unicode(self.model().headerData(idx, Qt.Horizontal, Qt.DisplayRole).toString()) self.column_header_context_menu = QMenu(self) if col != 'ondevice': self.column_header_context_menu.addAction(_('Hide column %s') % name, partial(self.column_header_context_handler, action='hide', column=col)) m = self.column_header_context_menu.addMenu( _('Sort on %s') % name) a = m.addAction(_('Ascending'), partial(self.column_header_context_handler, action='ascending', column=col)) d = m.addAction(_('Descending'), partial(self.column_header_context_handler, action='descending', column=col)) if self._model.sorted_on[0] == col: ac = a if self._model.sorted_on[1] else d ac.setCheckable(True) ac.setChecked(True) if col not in ('ondevice', 'inlibrary') and \ (not self.model().is_custom_column(col) or self.model().custom_columns[col]['datatype'] not in ('bool', )): m = self.column_header_context_menu.addMenu( _('Change text alignment for %s') % name) al = self._model.alignment_map.get(col, 'left') for x, t in (('left', _('Left')), ('right', _('Right')), ('center', _('Center'))): a = m.addAction(t, partial(self.column_header_context_handler, action='align_'+x, column=col)) if al == x: a.setCheckable(True) a.setChecked(True) if not isinstance(self, DeviceBooksView): if self._model.db.field_metadata[col]['is_category']: act = self.column_header_context_menu.addAction(_('Quickview column %s') % name, partial(self.column_header_context_handler, action='quickview', column=col)) rows = self.selectionModel().selectedRows() if len(rows) > 1: act.setEnabled(False) hidden_cols = [self.column_map[i] for i in range(self.column_header.count()) if self.column_header.isSectionHidden(i)] try: hidden_cols.remove('ondevice') except: pass if hidden_cols: self.column_header_context_menu.addSeparator() m = self.column_header_context_menu.addMenu(_('Show column')) for col in hidden_cols: hidx = self.column_map.index(col) name = unicode(self.model().headerData(hidx, Qt.Horizontal, Qt.DisplayRole).toString()) m.addAction(name, partial(self.column_header_context_handler, action='show', column=col)) self.column_header_context_menu.addSeparator() self.column_header_context_menu.addAction( _('Shrink column if it is too wide to fit'), partial(self.resize_column_to_fit, column=self.column_map[idx])) self.column_header_context_menu.addAction( _('Restore default layout'), partial(self.column_header_context_handler, action='defaults', column=col)) if self.can_add_columns: self.column_header_context_menu.addAction( QIcon(I('column.png')), _('Add your own columns'), partial(self.column_header_context_handler, action='addcustcol', column=col)) self.column_header_context_menu.popup(self.column_header.mapToGlobal(pos)) # }}} # Sorting {{{ def sort_by_column_and_order(self, col, ascending): self.column_header.blockSignals(True) self.sortByColumn(col, Qt.AscendingOrder if ascending else Qt.DescendingOrder) self.column_header.blockSignals(False) def user_sort_requested(self, col, order=Qt.AscendingOrder): if col >= len(self.column_map) or col < 0: return QTableView.sortByColumn(self, col) field = self.column_map[col] self.intelligent_sort(field, order == Qt.AscendingOrder) def intelligent_sort(self, field, ascending): m = self.model() pname = 'previous_sort_order_' + self.__class__.__name__ previous = gprefs.get(pname, {}) if field == m.sorted_on[0] or field not in previous: self.sort_by_named_field(field, ascending) previous[field] = ascending gprefs[pname] = previous return previous[m.sorted_on[0]] = m.sorted_on[1] gprefs[pname] = previous self.sort_by_named_field(field, previous[field]) def about_to_be_sorted(self, idc): selected_rows = [r.row() for r in self.selectionModel().selectedRows()] self.selected_ids = [idc(r) for r in selected_rows] def sorting_done(self, indexc): pos = self.horizontalScrollBar().value() self.select_rows(self.selected_ids, using_ids=True, change_current=True, scroll=True) self.selected_ids = [] self.horizontalScrollBar().setValue(pos) def sort_by_named_field(self, field, order, reset=True): if field in self.column_map: idx = self.column_map.index(field) self.sort_by_column_and_order(idx, order) else: self._model.sort_by_named_field(field, order, reset) self.column_header.blockSignals(True) self.column_header.setSortIndicator(-1, Qt.AscendingOrder) self.column_header.blockSignals(False) def multisort(self, fields, reset=True, only_if_different=False): if len(fields) == 0: return sh = self.cleanup_sort_history(self._model.sort_history, ignore_column_map=True) if only_if_different and len(sh) >= len(fields): ret=True for i,t in enumerate(fields): if t[0] != sh[i][0]: ret = False break if ret: return for n,d in reversed(fields): if n in self._model.db.field_metadata.keys(): sh.insert(0, (n, d)) sh = self.cleanup_sort_history(sh, ignore_column_map=True) self._model.sort_history = [tuple(x) for x in sh] self._model.resort(reset=reset) col = fields[0][0] dir = Qt.AscendingOrder if fields[0][1] else Qt.DescendingOrder if col in self.column_map: col = self.column_map.index(col) self.column_header.blockSignals(True) try: self.column_header.setSortIndicator(col, dir) finally: self.column_header.blockSignals(False) # }}} # Ondevice column {{{ def set_ondevice_column_visibility(self): m = self._model self.column_header.setSectionHidden(m.column_map.index('ondevice'), not m.device_connected) def set_device_connected(self, is_connected): self._model.set_device_connected(is_connected) self.set_ondevice_column_visibility() # }}} # Save/Restore State {{{ def get_state(self): h = self.column_header cm = self.column_map state = {} state['hidden_columns'] = [cm[i] for i in range(h.count()) if h.isSectionHidden(i) and cm[i] != 'ondevice'] state['last_modified_injected'] = True state['languages_injected'] = True state['sort_history'] = \ self.cleanup_sort_history(self.model().sort_history, ignore_column_map=self.is_library_view) state['column_positions'] = {} state['column_sizes'] = {} state['column_alignment'] = self._model.alignment_map for i in range(h.count()): name = cm[i] state['column_positions'][name] = h.visualIndex(i) if name != 'ondevice': state['column_sizes'][name] = h.sectionSize(i) return state def write_state(self, state): db = getattr(self.model(), 'db', None) name = unicode(self.objectName()) if name and db is not None: db.prefs.set(name + ' books view state', state) def save_state(self): # Only save if we have been initialized (set_database called) if len(self.column_map) > 0 and self.was_restored: state = self.get_state() self.write_state(state) def cleanup_sort_history(self, sort_history, ignore_column_map=False): history = [] for col, order in sort_history: if not isinstance(order, bool): continue col = {'date':'timestamp', 'sort':'title'}.get(col, col) if ignore_column_map or col in self.column_map: if (not history or history[-1][0] != col): history.append([col, order]) return history def apply_sort_history(self, saved_history, max_sort_levels=3): if not saved_history: return if self.is_library_view: for col, order in reversed(self.cleanup_sort_history( saved_history, ignore_column_map=True)[:max_sort_levels]): self.sort_by_named_field(col, order) else: for col, order in reversed(self.cleanup_sort_history( saved_history)[:max_sort_levels]): self.sort_by_column_and_order(self.column_map.index(col), order) def apply_state(self, state, max_sort_levels=3): h = self.column_header cmap = {} hidden = state.get('hidden_columns', []) for i, c in enumerate(self.column_map): cmap[c] = i if c != 'ondevice': h.setSectionHidden(i, c in hidden) positions = state.get('column_positions', {}) pmap = {} for col, pos in positions.items(): if col in cmap: pmap[pos] = col for pos in sorted(pmap.keys()): col = pmap[pos] idx = cmap[col] current_pos = h.visualIndex(idx) if current_pos != pos: h.moveSection(current_pos, pos) sizes = state.get('column_sizes', {}) for col, size in sizes.items(): if col in cmap: sz = sizes[col] if sz < 3: sz = h.sectionSizeHint(cmap[col]) h.resizeSection(cmap[col], sz) self.apply_sort_history(state.get('sort_history', None), max_sort_levels=max_sort_levels) for col, alignment in state.get('column_alignment', {}).items(): self._model.change_alignment(col, alignment) for i in range(h.count()): if not h.isSectionHidden(i) and h.sectionSize(i) < 3: sz = h.sectionSizeHint(i) h.resizeSection(i, sz) def get_default_state(self): old_state = { 'hidden_columns': ['last_modified', 'languages'], 'sort_history':[DEFAULT_SORT], 'column_positions': {}, 'column_sizes': {}, 'column_alignment': { 'size':'center', 'timestamp':'center', 'pubdate':'center'}, 'last_modified_injected': True, 'languages_injected': True, } h = self.column_header cm = self.column_map for i in range(h.count()): name = cm[i] old_state['column_positions'][name] = i if name != 'ondevice': old_state['column_sizes'][name] = \ min(350, max(self.sizeHintForColumn(i), h.sectionSizeHint(i))) if name in ('timestamp', 'last_modified'): old_state['column_sizes'][name] += 12 return old_state def get_old_state(self): ans = None name = unicode(self.objectName()) if name: name += ' books view state' db = getattr(self.model(), 'db', None) if db is not None: ans = db.prefs.get(name, None) if ans is None: ans = gprefs.get(name, None) try: del gprefs[name] except: pass if ans is not None: db.prefs[name] = ans else: injected = False if not ans.get('last_modified_injected', False): injected = True ans['last_modified_injected'] = True hc = ans.get('hidden_columns', []) if 'last_modified' not in hc: hc.append('last_modified') if not ans.get('languages_injected', False): injected = True ans['languages_injected'] = True hc = ans.get('hidden_columns', []) if 'languages' not in hc: hc.append('languages') if injected: db.prefs[name] = ans return ans def restore_state(self): old_state = self.get_old_state() if old_state is None: old_state = self.get_default_state() max_levels = 3 if tweaks['sort_columns_at_startup'] is not None: sh = [] try: for c,d in tweaks['sort_columns_at_startup']: if not isinstance(d, bool): d = True if d == 0 else False sh.append((c, d)) except: # Ignore invalid tweak values as users seem to often get them # wrong print('Ignoring invalid sort_columns_at_startup tweak, with error:') import traceback traceback.print_exc() old_state['sort_history'] = sh max_levels = max(3, len(sh)) self.column_header.blockSignals(True) self.apply_state(old_state, max_sort_levels=max_levels) self.column_header.blockSignals(False) self.do_row_sizing() self.was_restored = True def refresh_row_sizing(self): self.row_sizing_done = False self.do_row_sizing() def do_row_sizing(self): # Resize all rows to have the correct height if not self.row_sizing_done and self.model().rowCount(QModelIndex()) > 0: self.resizeRowToContents(0) self.verticalHeader().setDefaultSectionSize(self.rowHeight(0) + gprefs['extra_row_spacing']) self._model.set_row_height(self.rowHeight(0)) self.row_sizing_done = True def resize_column_to_fit(self, column): col = self.column_map.index(column) self.column_resized(col, self.columnWidth(col), self.columnWidth(col)) def column_resized(self, col, old_size, new_size): # arbitrary: scroll bar + header + some max_width = self.width() - (self.verticalScrollBar().width() + self.verticalHeader().width() + 10) if max_width < 200: max_width = 200 if new_size > max_width: self.column_header.blockSignals(True) self.setColumnWidth(col, max_width) self.column_header.blockSignals(False) # }}} # Initialization/Delegate Setup {{{ def set_database(self, db): self.alternate_views.set_database(db) self.save_state() self._model.set_database(db) self.tags_delegate.set_database(db) self.cc_names_delegate.set_database(db) self.authors_delegate.set_database(db) self.series_delegate.set_auto_complete_function(db.all_series) self.publisher_delegate.set_auto_complete_function(db.all_publishers) self.alternate_views.set_database(db, stage=1) def marked_changed(self, old_marked, current_marked): self.alternate_views.marked_changed(old_marked, current_marked) if bool(old_marked) == bool(current_marked): changed = old_marked | current_marked i = self.model().db.data.id_to_index def f(x): try: return i(x) except ValueError: pass sections = tuple(x for x in map(f, changed) if x is not None) if sections: self.row_header.headerDataChanged(Qt.Vertical, min(sections), max(sections)) else: # Marked items have either appeared or all been removed self.model().set_row_decoration(current_marked) self.row_header.headerDataChanged(Qt.Vertical, 0, self.row_header.count()-1) self.row_header.geometriesChanged.emit() def database_changed(self, db): db.data.add_marked_listener(self.marked_changed_listener) for i in range(self.model().columnCount(None)): if self.itemDelegateForColumn(i) in (self.rating_delegate, self.timestamp_delegate, self.pubdate_delegate, self.last_modified_delegate, self.languages_delegate): self.setItemDelegateForColumn(i, self.itemDelegate()) cm = self.column_map for colhead in cm: if self._model.is_custom_column(colhead): cc = self._model.custom_columns[colhead] if cc['datatype'] == 'datetime': delegate = CcDateDelegate(self) delegate.set_format(cc['display'].get('date_format','')) self.setItemDelegateForColumn(cm.index(colhead), delegate) elif cc['datatype'] == 'comments': self.setItemDelegateForColumn(cm.index(colhead), self.cc_comments_delegate) elif cc['datatype'] == 'text': if cc['is_multiple']: if cc['display'].get('is_names', False): self.setItemDelegateForColumn(cm.index(colhead), self.cc_names_delegate) else: self.setItemDelegateForColumn(cm.index(colhead), self.tags_delegate) else: self.setItemDelegateForColumn(cm.index(colhead), self.cc_text_delegate) elif cc['datatype'] == 'series': self.setItemDelegateForColumn(cm.index(colhead), self.cc_text_delegate) elif cc['datatype'] in ('int', 'float'): self.setItemDelegateForColumn(cm.index(colhead), self.cc_number_delegate) elif cc['datatype'] == 'bool': self.setItemDelegateForColumn(cm.index(colhead), self.cc_bool_delegate) elif cc['datatype'] == 'rating': self.setItemDelegateForColumn(cm.index(colhead), self.rating_delegate) elif cc['datatype'] == 'composite': self.setItemDelegateForColumn(cm.index(colhead), self.cc_template_delegate) elif cc['datatype'] == 'enumeration': self.setItemDelegateForColumn(cm.index(colhead), self.cc_enum_delegate) else: dattr = colhead+'_delegate' delegate = colhead if hasattr(self, dattr) else 'text' self.setItemDelegateForColumn(cm.index(colhead), getattr(self, delegate+'_delegate')) self.restore_state() self.set_ondevice_column_visibility() #}}} # Context Menu {{{ def set_context_menu(self, menu, edit_collections_action): self.setContextMenuPolicy(Qt.DefaultContextMenu) self.context_menu = menu self.alternate_views.set_context_menu(menu) self.edit_collections_action = edit_collections_action def contextMenuEvent(self, event): sac = self.gui.iactions['Sort By'] sort_added = tuple(ac for ac in self.context_menu.actions() if ac is sac.qaction) if sort_added: sac.update_menu() self.context_menu.popup(event.globalPos()) event.accept() # }}} @property def column_map(self): return self._model.column_map @property def visible_columns(self): h = self.horizontalHeader() logical_indices = (x for x in xrange(h.count()) if not h.isSectionHidden(x)) rmap = {i:x for i, x in enumerate(self.column_map)} return (rmap[h.visualIndex(x)] for x in logical_indices if h.visualIndex(x) > -1) def refresh_book_details(self): idx = self.currentIndex() if idx.isValid(): self._model.current_changed(idx, idx) return True return False def scrollContentsBy(self, dx, dy): # Needed as Qt bug causes headerview to not always update when scrolling QTableView.scrollContentsBy(self, dx, dy) if dy != 0: self.column_header.update() def scroll_to_row(self, row): if row > -1: h = self.horizontalHeader() for i in range(h.count()): if not h.isSectionHidden(i) and h.sectionViewportPosition(i) >= 0: self.scrollTo(self.model().index(row, i), self.PositionAtCenter) break @property def current_book(self): ci = self.currentIndex() if ci.isValid(): try: return self.model().db.data.index_to_id(ci.row()) except (IndexError, ValueError, KeyError, TypeError, AttributeError): pass def current_book_state(self): return self.current_book, self.horizontalScrollBar().value() def restore_current_book_state(self, state): book_id, hpos = state try: row = self.model().db.data.id_to_index(book_id) except (IndexError, ValueError, KeyError, TypeError, AttributeError): return self.set_current_row(row) self.scroll_to_row(row) self.horizontalScrollBar().setValue(hpos) def set_current_row(self, row=0, select=True, for_sync=False): if row > -1 and row < self.model().rowCount(QModelIndex()): h = self.horizontalHeader() logical_indices = list(range(h.count())) logical_indices = [x for x in logical_indices if not h.isSectionHidden(x)] pairs = [(x, h.visualIndex(x)) for x in logical_indices if h.visualIndex(x) > -1] if not pairs: pairs = [(0, 0)] pairs.sort(cmp=lambda x,y:cmp(x[1], y[1])) i = pairs[0][0] index = self.model().index(row, i) if for_sync: sm = self.selectionModel() sm.setCurrentIndex(index, sm.NoUpdate) else: self.setCurrentIndex(index) if select: sm = self.selectionModel() sm.select(index, sm.ClearAndSelect|sm.Rows) def row_at_top(self): pos = 0 while pos < 100: ans = self.rowAt(pos) if ans > -1: return ans pos += 5 def row_at_bottom(self): pos = self.viewport().height() limit = pos - 100 while pos > limit: ans = self.rowAt(pos) if ans > -1: return ans pos -= 5 def moveCursor(self, action, modifiers): orig = self.currentIndex() index = QTableView.moveCursor(self, action, modifiers) if action == QTableView.MovePageDown: moved = index.row() - orig.row() try: rows = self.row_at_bottom() - self.row_at_top() except TypeError: rows = moved if moved > rows: index = self.model().index(orig.row() + rows, index.column()) elif action == QTableView.MovePageUp: moved = orig.row() - index.row() try: rows = self.row_at_bottom() - self.row_at_top() except TypeError: rows = moved if moved > rows: index = self.model().index(orig.row() - rows, index.column()) elif action == QTableView.MoveHome and modifiers & Qt.ControlModifier: return self.model().index(0, orig.column()) elif action == QTableView.MoveEnd and modifiers & Qt.ControlModifier: return self.model().index(self.model().rowCount(QModelIndex()) - 1, orig.column()) return index def ids_to_rows(self, ids): row_map = OrderedDict() ids = frozenset(ids) m = self.model() for row in xrange(m.rowCount(QModelIndex())): if len(row_map) >= len(ids): break c = m.id(row) if c in ids: row_map[c] = row return row_map def select_rows(self, identifiers, using_ids=True, change_current=True, scroll=True): ''' Select rows identified by identifiers. identifiers can be a set of ids, row numbers or QModelIndexes. ''' rows = set([x.row() if hasattr(x, 'row') else x for x in identifiers]) if using_ids: rows = set([]) identifiers = set(identifiers) m = self.model() for row in xrange(m.rowCount(QModelIndex())): if m.id(row) in identifiers: rows.add(row) rows = list(sorted(rows)) if rows: row = rows[0] if change_current: self.set_current_row(row, select=False) if scroll: self.scroll_to_row(row) sm = self.selectionModel() sel = QItemSelection() m = self.model() max_col = m.columnCount(QModelIndex()) - 1 # Create a range based selector for each set of contiguous rows # as supplying selectors for each individual row causes very poor # performance if a large number of rows has to be selected. for k, g in itertools.groupby(enumerate(rows), lambda (i,x):i-x): group = list(map(operator.itemgetter(1), g)) sel.merge(QItemSelection(m.index(min(group), 0), m.index(max(group), max_col)), sm.Select) sm.select(sel, sm.ClearAndSelect) def get_selected_ids(self): ans = [] m = self.model() for idx in self.selectedIndexes(): r = idx.row() i = m.id(r) if i not in ans: ans.append(i) return ans @dynamic_property def current_id(self): def fget(self): try: return self.model().id(self.currentIndex()) except: pass return None def fset(self, val): if val is None: return m = self.model() for row in xrange(m.rowCount(QModelIndex())): if m.id(row) == val: self.set_current_row(row, select=False) break return property(fget=fget, fset=fset) @property def next_id(self): ''' Return the id of the 'next' row (i.e. the first unselected row after the current row). ''' ci = self.currentIndex() if not ci.isValid(): return None selected_rows = frozenset([i.row() for i in self.selectedIndexes() if i.isValid()]) column = ci.column() for i in xrange(ci.row()+1, self.row_count()): if i in selected_rows: continue try: return self.model().id(self.model().index(i, column)) except: pass # No unselected rows after the current row, look before for i in xrange(ci.row()-1, -1, -1): if i in selected_rows: continue try: return self.model().id(self.model().index(i, column)) except: pass return None def close(self): self._model.close() def set_editable(self, editable, supports_backloading): self._model.set_editable(editable) def move_highlighted_row(self, forward): rows = self.selectionModel().selectedRows() if len(rows) > 0: current_row = rows[0].row() else: current_row = None id_to_select = self._model.get_next_highlighted_id(current_row, forward) if id_to_select is not None: self.select_rows([id_to_select], using_ids=True) def search_proxy(self, txt): if self.is_library_view: # Save the current book before doing the search, after the search # is completed, this book will become the current book and be # scrolled to if it is present in the search results self.alternate_views.save_current_book_state() self._model.search(txt) id_to_select = self._model.get_current_highlighted_id() if id_to_select is not None: self.select_rows([id_to_select], using_ids=True) elif self._model.highlight_only: self.clearSelection() if self.isVisible(): self.setFocus(Qt.OtherFocusReason) def connect_to_search_box(self, sb, search_done): sb.search.connect(self.search_proxy) self._search_done = search_done self._model.searched.connect(self.search_done) if self.is_library_view: self._model.search_done.connect(self.alternate_views.restore_current_book_state) def connect_to_book_display(self, bd): self._model.new_bookdisplay_data.connect(bd) def search_done(self, ok): self._search_done(self, ok) def row_count(self): return self._model.count() # }}} class DeviceBooksView(BooksView): # {{{ is_library_view = False def __init__(self, parent): BooksView.__init__(self, parent, DeviceBooksModel, use_edit_metadata_dialog=False) self._model.resize_rows.connect(self.do_row_sizing, type=Qt.QueuedConnection) self.can_add_columns = False self.columns_resized = False self.resize_on_select = False self.rating_delegate = None for i in range(10): self.setItemDelegateForColumn(i, TextDelegate(self)) self.setDragDropMode(self.NoDragDrop) self.setAcceptDrops(False) def drag_data(self): m = self.model() rows = self.selectionModel().selectedRows() paths = [force_unicode(p, enc=filesystem_encoding) for p in m.paths(rows) if p] md = QMimeData() md.setData('application/calibre+from_device', 'dummy') md.setUrls([QUrl.fromLocalFile(p) for p in paths]) drag = QDrag(self) drag.setMimeData(md) cover = self.drag_icon(m.cover(self.currentIndex().row()), len(paths) > 1) drag.setHotSpot(QPoint(-15, -15)) drag.setPixmap(cover) return drag def contextMenuEvent(self, event): edit_collections = callable(getattr(self._model.db, 'supports_collections', None)) and \ self._model.db.supports_collections() and \ prefs['manage_device_metadata'] == 'manual' self.edit_collections_action.setVisible(edit_collections) self.context_menu.popup(event.globalPos()) event.accept() def get_old_state(self): ans = None name = unicode(self.objectName()) if name: name += ' books view state' ans = gprefs.get(name, None) return ans def write_state(self, state): name = unicode(self.objectName()) if name: gprefs.set(name + ' books view state', state) def set_database(self, db): self._model.set_database(db) self.restore_state() def resizeColumnsToContents(self): QTableView.resizeColumnsToContents(self) self.columns_resized = True def connect_dirtied_signal(self, slot): self._model.booklist_dirtied.connect(slot) def connect_upload_collections_signal(self, func=None, oncard=None): self._model.upload_collections.connect(partial(func, view=self, oncard=oncard)) def dropEvent(self, *args): error_dialog(self, _('Not allowed'), _('Dropping onto a device is not supported. First add the book to the calibre library.')).exec_() def set_editable(self, editable, supports_backloading): self._model.set_editable(editable) self.drag_allowed = supports_backloading # }}}
nozuono/calibre-webserver
src/calibre/gui2/library/views.py
Python
gpl-3.0
46,406
#!/usr/bin/env python #-*- coding: utf-8 -*- ########################################################################### ## ## ## Copyrights Frédéric Rodrigo 2012 ## ## ## ## This program is free software: you can redistribute it and/or modify ## ## it under the terms of the GNU General Public License as published by ## ## the Free Software Foundation, either version 3 of the License, or ## ## (at your option) any later version. ## ## ## ## This program is distributed in the hope that it will be useful, ## ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## ## GNU General Public License for more details. ## ## ## ## You should have received a copy of the GNU General Public License ## ## along with this program. If not, see <http://www.gnu.org/licenses/>. ## ## ## ########################################################################### from modules.OsmoseTranslation import T_ from .Analyser_Merge import Analyser_Merge, Select class _Analyser_Merge_Street_Number(Analyser_Merge): def __init__(self, config, classs, city, logger, url, name, parser, load, mapping): Analyser_Merge.__init__(self, config, logger) self.def_class_missing_official(item = 8080, id = classs, level = 3, tags = ['addr'], title = T_('Missing address {0}', city), detail = T_( '''Address in an OpenData set was not found. Only the position and street numbers are checked.'''), fix = T_( '''Add or move a number, check the field.'''), trap = T_( '''Pay attention to the data freshness.''')) self.init( url, name, parser, load, mapping) self.conflate.select = Select( types = ["nodes", "ways"], tags = [{"addr:housenumber": None}]) self.conflate.extraJoin = "addr:housenumber" self.conflate.conflationDistance = 100
tkasp/osmose-backend
analysers/disabled/analyser_merge_street_number.py
Python
gpl-3.0
2,397
-1 + 1 1 + 1.0 #! 2 type-error-operation 1 + '1' #! 2 type-error-operation 1 - '1' -1 - - 1 -1 - int() int() - float() float() - 3.0 a = 3 b = '' #! 2 type-error-operation a + b
snakeleon/YouCompleteMe-x86
third_party/ycmd/third_party/JediHTTP/vendor/jedi/test/static_analysis/operations.py
Python
gpl-3.0
180
#!/usr/bin/env python from __future__ import division from nose.tools import * import networkx as nx def example1a_G(): G = nx.Graph() G.add_node(1, percolation=0.1) G.add_node(2, percolation=0.2) G.add_node(3, percolation=0.2) G.add_node(4, percolation=0.2) G.add_node(5, percolation=0.3) G.add_node(6, percolation=0.2) G.add_node(7, percolation=0.5) G.add_node(8, percolation=0.5) G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)]) return G def example1b_G(): G = nx.Graph() G.add_node(1, percolation=0.3) G.add_node(2, percolation=0.5) G.add_node(3, percolation=0.5) G.add_node(4, percolation=0.2) G.add_node(5, percolation=0.3) G.add_node(6, percolation=0.2) G.add_node(7, percolation=0.1) G.add_node(8, percolation=0.1) G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)]) return G class TestPercolationCentrality(object): def test_percolation_example1a(self): """percolation centrality: example 1a""" G = example1a_G() p = nx.percolation_centrality(G) p_answer = {4: 0.625, 6: 0.667} for n in p_answer: assert_almost_equal(p[n], p_answer[n], places=3) def test_percolation_example1b(self): """percolation centrality: example 1a""" G = example1b_G() p = nx.percolation_centrality(G) p_answer = {4: 0.825, 6: 0.4} for n in p_answer: assert_almost_equal(p[n], p_answer[n], places=3) def test_converge_to_betweenness(self): """percolation centrality: should converge to betweenness centrality when all nodes are percolated the same""" # taken from betweenness test test_florentine_families_graph G = nx.florentine_families_graph() b_answer =\ {'Acciaiuoli': 0.000, 'Albizzi': 0.212, 'Barbadori': 0.093, 'Bischeri': 0.104, 'Castellani': 0.055, 'Ginori': 0.000, 'Guadagni': 0.255, 'Lamberteschi': 0.000, 'Medici': 0.522, 'Pazzi': 0.000, 'Peruzzi': 0.022, 'Ridolfi': 0.114, 'Salviati': 0.143, 'Strozzi': 0.103, 'Tornabuoni': 0.092} p_states = {k: 1.0 for k, v in b_answer.items()} p_answer = nx.percolation_centrality(G, states=p_states) for n in sorted(G): assert_almost_equal(p_answer[n], b_answer[n], places=3) p_states = {k: 0.3 for k, v in b_answer.items()} p_answer = nx.percolation_centrality(G, states=p_states) for n in sorted(G): assert_almost_equal(p_answer[n], b_answer[n], places=3)
kenshay/ImageScript
ProgramData/SystemFiles/Python/Lib/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py
Python
gpl-3.0
2,836
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import cint, cstr, flt from frappe import _ from erpnext.setup.utils import get_exchange_rate from frappe.website.website_generator import WebsiteGenerator from erpnext.stock.get_item_details import get_conversion_factor from operator import itemgetter form_grid_templates = { "items": "templates/form_grid/item_grid.html" } class BOM(WebsiteGenerator): website = frappe._dict( # page_title_field = "item_name", condition_field = "show_in_website", template = "templates/generators/bom.html" ) def autoname(self): names = frappe.db.sql_list("""select name from `tabBOM` where item=%s""", self.item) if names: # name can be BOM/ITEM/001, BOM/ITEM/001-1, BOM-ITEM-001, BOM-ITEM-001-1 # split by item names = [name.split(self.item)[-1][1:] for name in names] # split by (-) if cancelled names = [cint(name.split('-')[-1]) for name in names] idx = max(names) + 1 else: idx = 1 self.name = 'BOM-' + self.item + ('-%.3i' % idx) def validate(self): # if not self.route: self.route = frappe.scrub(self.name).replace('_', '-') self.clear_operations() self.validate_main_item() self.validate_currency() self.set_conversion_rate() from erpnext.utilities.transaction_base import validate_uom_is_integer validate_uom_is_integer(self, "stock_uom", "stock_qty", "BOM Item") self.validate_materials() self.set_bom_material_details() self.validate_operations() self.calculate_cost() def get_context(self, context): context.parents = [{'name': 'boms', 'title': _('All BOMs') }] def on_update(self): self.check_recursion() self.update_stock_qty() self.update_exploded_items() def on_submit(self): self.manage_default_bom() def on_cancel(self): frappe.db.set(self, "is_active", 0) frappe.db.set(self, "is_default", 0) # check if used in any other bom self.validate_bom_links() self.manage_default_bom() def on_update_after_submit(self): self.validate_bom_links() self.manage_default_bom() def get_item_det(self, item_code): item = frappe.db.sql("""select name, item_name, docstatus, description, image, is_sub_contracted_item, stock_uom, default_bom, last_purchase_rate from `tabItem` where name=%s""", item_code, as_dict = 1) if not item: frappe.throw(_("Item: {0} does not exist in the system").format(item_code)) return item def validate_rm_item(self, item): if (item[0]['name'] in [it.item_code for it in self.items]) and item[0]['name'] == self.item: frappe.throw(_("Raw material cannot be same as main Item")) def set_bom_material_details(self): for item in self.get("items"): ret = self.get_bom_material_detail({"item_code": item.item_code, "item_name": item.item_name, "bom_no": item.bom_no, "stock_qty": item.stock_qty}) for r in ret: if not item.get(r): item.set(r, ret[r]) self.validate_bom_currecny(item) def get_bom_material_detail(self, args=None): """ Get raw material details like uom, desc and rate""" if not args: args = frappe.form_dict.get('args') if isinstance(args, basestring): import json args = json.loads(args) item = self.get_item_det(args['item_code']) self.validate_rm_item(item) args['bom_no'] = args['bom_no'] or item and cstr(item[0]['default_bom']) or '' args.update(item[0]) rate = self.get_rm_rate(args) ret_item = { 'item_name' : item and args['item_name'] or '', 'description' : item and args['description'] or '', 'image' : item and args['image'] or '', 'stock_uom' : item and args['stock_uom'] or '', 'uom' : item and args['stock_uom'] or '', 'conversion_factor' : 1, 'bom_no' : args['bom_no'], 'rate' : rate, 'stock_qty' : args.get("qty") or args.get("stock_qty") or 1, 'base_rate' : rate if self.company_currency() == self.currency else rate * self.conversion_rate } return ret_item def validate_bom_currecny(self, item): if item.get('bom_no') and frappe.db.get_value('BOM', item.get('bom_no'), 'currency') != self.currency: frappe.throw(_("Row {0}: Currency of the BOM #{1} should be equal to the selected currency {2}").format(item.idx, item.bom_no, self.currency)) def get_rm_rate(self, arg): """ Get raw material rate as per selected method, if bom exists takes bom cost """ rate = 0 if arg.get('scrap_items'): rate = self.get_valuation_rate(arg) elif arg: if self.rm_cost_as_per == 'Valuation Rate': rate = self.get_valuation_rate(arg) elif self.rm_cost_as_per == 'Last Purchase Rate': rate = arg['last_purchase_rate'] elif self.rm_cost_as_per == "Price List": if not self.buying_price_list: frappe.throw(_("Please select Price List")) rate = frappe.db.get_value("Item Price", {"price_list": self.buying_price_list, "item_code": arg["item_code"]}, "price_list_rate") or 0 if not rate and arg['bom_no']: rate = self.get_bom_unitcost(arg['bom_no']) return rate def update_cost(self): if self.docstatus == 2: return for d in self.get("items"): rate = self.get_bom_material_detail({'item_code': d.item_code, 'bom_no': d.bom_no, 'stock_qty': d.stock_qty})["rate"] if rate: d.rate = rate if self.docstatus == 1: self.flags.ignore_validate_update_after_submit = True self.calculate_cost() self.save() self.update_exploded_items() frappe.msgprint(_("Cost Updated")) def get_bom_unitcost(self, bom_no): bom = frappe.db.sql("""select name, total_cost/quantity as unit_cost from `tabBOM` where is_active = 1 and name = %s""", bom_no, as_dict=1) return bom and bom[0]['unit_cost'] or 0 def get_valuation_rate(self, args): """ Get weighted average of valuation rate from all warehouses """ total_qty, total_value, valuation_rate = 0.0, 0.0, 0.0 for d in frappe.db.sql("""select actual_qty, stock_value from `tabBin` where item_code=%s""", args['item_code'], as_dict=1): total_qty += flt(d.actual_qty) total_value += flt(d.stock_value) if total_qty: valuation_rate = total_value / total_qty if valuation_rate <= 0: last_valuation_rate = frappe.db.sql("""select valuation_rate from `tabStock Ledger Entry` where item_code = %s and valuation_rate > 0 order by posting_date desc, posting_time desc, name desc limit 1""", args['item_code']) valuation_rate = flt(last_valuation_rate[0][0]) if last_valuation_rate else 0 return valuation_rate def manage_default_bom(self): """ Uncheck others if current one is selected as default, update default bom in item master """ if self.is_default and self.is_active: from frappe.model.utils import set_default set_default(self, "item") item = frappe.get_doc("Item", self.item) if item.default_bom != self.name: item.default_bom = self.name item.save(ignore_permissions = True) else: frappe.db.set(self, "is_default", 0) item = frappe.get_doc("Item", self.item) if item.default_bom == self.name: item.default_bom = None item.save(ignore_permissions = True) def clear_operations(self): if not self.with_operations: self.set('operations', []) def validate_main_item(self): """ Validate main FG item""" item = self.get_item_det(self.item) if not item: frappe.throw(_("Item {0} does not exist in the system or has expired").format(self.item)) else: ret = frappe.db.get_value("Item", self.item, ["description", "stock_uom", "item_name"]) self.description = ret[0] self.uom = ret[1] self.item_name= ret[2] if not self.quantity: frappe.throw(_("Quantity should be greater than 0")) def validate_currency(self): if self.rm_cost_as_per == 'Price List' and \ frappe.db.get_value('Price List', self.buying_price_list, 'currency') != self.currency: frappe.throw(_("Currency of the price list {0} is not similar with the selected currency {1}").format(self.buying_price_list, self.currency)) def update_stock_qty(self): for m in self.get('items'): if not m.conversion_factor: m.conversion_factor = flt(get_conversion_factor(m.item_code, m.uom)['conversion_factor']) if m.uom and m.qty: m.stock_qty = flt(m.conversion_factor)*flt(m.qty) if not m.uom and m.stock_uom: m.uom = m.stock_uom m.qty = m.stock_qty def set_conversion_rate(self): self.conversion_rate = get_exchange_rate(self.currency, self.company_currency()) def validate_materials(self): """ Validate raw material entries """ def get_duplicates(lst): seen = set() seen_add = seen.add for item in lst: if item.item_code in seen or seen_add(item.item_code): yield item if not self.get('items'): frappe.throw(_("Raw Materials cannot be blank.")) check_list = [] for m in self.get('items'): if m.bom_no: validate_bom_no(m.item_code, m.bom_no) if flt(m.stock_qty) <= 0: frappe.throw(_("Quantity required for Item {0} in row {1}").format(m.item_code, m.idx)) check_list.append(m) duplicate_items = list(get_duplicates(check_list)) if duplicate_items: li = [] for i in duplicate_items: li.append("{0} on row {1}".format(i.item_code, i.idx)) duplicate_list = '<br>' + '<br>'.join(li) frappe.throw(_("Same item has been entered multiple times. {list}").format(list=duplicate_list)) def check_recursion(self): """ Check whether recursion occurs in any bom""" check_list = [['parent', 'bom_no', 'parent'], ['bom_no', 'parent', 'child']] for d in check_list: bom_list, count = [self.name], 0 while (len(bom_list) > count ): boms = frappe.db.sql(" select %s from `tabBOM Item` where %s = %s " % (d[0], d[1], '%s'), cstr(bom_list[count])) count = count + 1 for b in boms: if b[0] == self.name: frappe.throw(_("BOM recursion: {0} cannot be parent or child of {2}").format(b[0], self.name)) if b[0]: bom_list.append(b[0]) def update_cost_and_exploded_items(self, bom_list=[]): bom_list = self.traverse_tree(bom_list) for bom in bom_list: bom_obj = frappe.get_doc("BOM", bom) bom_obj.on_update() return bom_list def traverse_tree(self, bom_list=None): def _get_children(bom_no): return [cstr(d[0]) for d in frappe.db.sql("""select bom_no from `tabBOM Item` where parent = %s and ifnull(bom_no, '') != ''""", bom_no)] count = 0 if not bom_list: bom_list = [] if self.name not in bom_list: bom_list.append(self.name) while(count < len(bom_list)): for child_bom in _get_children(bom_list[count]): if child_bom not in bom_list: bom_list.append(child_bom) count += 1 bom_list.reverse() return bom_list def calculate_cost(self): """Calculate bom totals""" self.calculate_op_cost() self.calculate_rm_cost() self.calculate_sm_cost() self.total_cost = self.operating_cost + self.raw_material_cost - self.scrap_material_cost self.base_total_cost = self.base_operating_cost + self.base_raw_material_cost - self.base_scrap_material_cost def calculate_op_cost(self): """Update workstation rate and calculates totals""" self.operating_cost = 0 self.base_operating_cost = 0 for d in self.get('operations'): if d.workstation: if not d.hour_rate: d.hour_rate = flt(frappe.db.get_value("Workstation", d.workstation, "hour_rate")) if d.hour_rate and d.time_in_mins: d.operating_cost = flt(d.hour_rate) * flt(d.time_in_mins) / 60.0 d.base_hour_rate = flt(d.hour_rate) * flt(self.conversion_rate) d.base_operating_cost = flt(d.base_hour_rate) * flt(d.time_in_mins) / 60.0 self.operating_cost += flt(d.operating_cost) self.base_operating_cost += flt(d.base_operating_cost) def calculate_rm_cost(self): """Fetch RM rate as per today's valuation rate and calculate totals""" total_rm_cost = 0 base_total_rm_cost = 0 for d in self.get('items'): d.base_rate = flt(d.rate) * flt(self.conversion_rate) d.amount = flt(d.rate, self.precision("rate", d)) * flt(d.stock_qty, self.precision("stock_qty", d)) d.base_amount = d.amount * flt(self.conversion_rate) d.qty_consumed_per_unit = flt(d.stock_qty, self.precision("stock_qty", d)) / flt(self.quantity, self.precision("quantity")) total_rm_cost += d.amount base_total_rm_cost += d.base_amount self.raw_material_cost = total_rm_cost self.base_raw_material_cost = base_total_rm_cost def calculate_sm_cost(self): """Fetch RM rate as per today's valuation rate and calculate totals""" total_sm_cost = 0 base_total_sm_cost = 0 for d in self.get('scrap_items'): d.base_rate = d.rate * self.conversion_rate d.amount = flt(d.rate, self.precision("rate", d)) * flt(d.stock_qty, self.precision("stock_qty", d)) d.base_amount = d.amount * self.conversion_rate total_sm_cost += d.amount base_total_sm_cost += d.base_amount self.scrap_material_cost = total_sm_cost self.base_scrap_material_cost = base_total_sm_cost def update_exploded_items(self): """ Update Flat BOM, following will be correct data""" self.get_exploded_items() self.add_exploded_items() def get_exploded_items(self): """ Get all raw materials including items from child bom""" self.cur_exploded_items = {} for d in self.get('items'): if d.bom_no: self.get_child_exploded_items(d.bom_no, d.stock_qty) else: self.add_to_cur_exploded_items(frappe._dict({ 'item_code' : d.item_code, 'item_name' : d.item_name, 'source_warehouse': d.source_warehouse, 'description' : d.description, 'image' : d.image, 'stock_uom' : d.stock_uom, 'stock_qty' : flt(d.stock_qty), 'rate' : d.base_rate, })) def company_currency(self): return frappe.db.get_value('Company', self.company, 'default_currency') def add_to_cur_exploded_items(self, args): if self.cur_exploded_items.get(args.item_code): self.cur_exploded_items[args.item_code]["stock_qty"] += args.stock_qty else: self.cur_exploded_items[args.item_code] = args def get_child_exploded_items(self, bom_no, stock_qty): """ Add all items from Flat BOM of child BOM""" # Did not use qty_consumed_per_unit in the query, as it leads to rounding loss child_fb_items = frappe.db.sql("""select bom_item.item_code, bom_item.item_name, bom_item.description, bom_item.source_warehouse, bom_item.stock_uom, bom_item.stock_qty, bom_item.rate, bom_item.stock_qty / ifnull(bom.quantity, 1) as qty_consumed_per_unit from `tabBOM Explosion Item` bom_item, tabBOM bom where bom_item.parent = bom.name and bom.name = %s and bom.docstatus = 1""", bom_no, as_dict = 1) for d in child_fb_items: self.add_to_cur_exploded_items(frappe._dict({ 'item_code' : d['item_code'], 'item_name' : d['item_name'], 'source_warehouse' : d['source_warehouse'], 'description' : d['description'], 'stock_uom' : d['stock_uom'], 'stock_qty' : d['qty_consumed_per_unit'] * stock_qty, 'rate' : flt(d['rate']), })) def add_exploded_items(self): "Add items to Flat BOM table" frappe.db.sql("""delete from `tabBOM Explosion Item` where parent=%s""", self.name) self.set('exploded_items', []) for d in sorted(self.cur_exploded_items, key=itemgetter(0)): ch = self.append('exploded_items', {}) for i in self.cur_exploded_items[d].keys(): ch.set(i, self.cur_exploded_items[d][i]) ch.amount = flt(ch.stock_qty) * flt(ch.rate) ch.qty_consumed_per_unit = flt(ch.stock_qty) / flt(self.quantity) ch.docstatus = self.docstatus ch.db_insert() def validate_bom_links(self): if not self.is_active: act_pbom = frappe.db.sql("""select distinct bom_item.parent from `tabBOM Item` bom_item where bom_item.bom_no = %s and bom_item.docstatus = 1 and exists (select * from `tabBOM` where name = bom_item.parent and docstatus = 1 and is_active = 1)""", self.name) if act_pbom and act_pbom[0][0]: frappe.throw(_("Cannot deactivate or cancel BOM as it is linked with other BOMs")) def validate_operations(self): if self.with_operations and not self.get('operations'): frappe.throw(_("Operations cannot be left blank")) if self.with_operations: for d in self.operations: if not d.description: d.description = frappe.db.get_value('Operation', d.operation, 'description') def get_list_context(context): context.title = _("Bill of Materials") # context.introduction = _('Boms') def get_bom_items_as_dict(bom, company, qty=1, fetch_exploded=1, fetch_scrap_items=0): item_dict = {} # Did not use qty_consumed_per_unit in the query, as it leads to rounding loss query = """select bom_item.item_code, item.item_name, sum(bom_item.stock_qty/ifnull(bom.quantity, 1)) * %(qty)s as qty, item.description, item.image, item.stock_uom, item.default_warehouse, item.expense_account as expense_account, item.buying_cost_center as cost_center {select_columns} from `tab{table}` bom_item, `tabBOM` bom, `tabItem` item where bom_item.docstatus < 2 and bom.name = %(bom)s and bom_item.parent = bom.name and item.name = bom_item.item_code and is_stock_item = 1 {where_conditions} group by item_code, stock_uom""" if fetch_exploded: query = query.format(table="BOM Explosion Item", where_conditions="""and item.is_sub_contracted_item = 0""", select_columns = ", bom_item.source_warehouse") items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True) elif fetch_scrap_items: query = query.format(table="BOM Scrap Item", where_conditions="", select_columns="") items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True) else: query = query.format(table="BOM Item", where_conditions="", select_columns = ", bom_item.source_warehouse") items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True) for item in items: if item_dict.has_key(item.item_code): item_dict[item.item_code]["qty"] += flt(item.qty) else: item_dict[item.item_code] = item for item, item_details in item_dict.items(): for d in [["Account", "expense_account", "default_expense_account"], ["Cost Center", "cost_center", "cost_center"], ["Warehouse", "default_warehouse", ""]]: company_in_record = frappe.db.get_value(d[0], item_details.get(d[1]), "company") if not item_details.get(d[1]) or (company_in_record and company != company_in_record): item_dict[item][d[1]] = frappe.db.get_value("Company", company, d[2]) if d[2] else None return item_dict @frappe.whitelist() def get_bom_items(bom, company, qty=1, fetch_exploded=1): items = get_bom_items_as_dict(bom, company, qty, fetch_exploded).values() items.sort(lambda a, b: a.item_code > b.item_code and 1 or -1) return items def validate_bom_no(item, bom_no): """Validate BOM No of sub-contracted items""" bom = frappe.get_doc("BOM", bom_no) if not bom.is_active: frappe.throw(_("BOM {0} must be active").format(bom_no)) if bom.docstatus != 1: if not getattr(frappe.flags, "in_test", False): frappe.throw(_("BOM {0} must be submitted").format(bom_no)) if item and not (bom.item.lower() == item.lower() or \ bom.item.lower() == cstr(frappe.db.get_value("Item", item, "variant_of")).lower()): frappe.throw(_("BOM {0} does not belong to Item {1}").format(bom_no, item)) @frappe.whitelist() def get_children(): if frappe.form_dict.parent: return frappe.db.sql("""select bom_item.item_code, bom_item.bom_no as value, bom_item.stock_qty, if(ifnull(bom_item.bom_no, "")!="", 1, 0) as expandable, item.image, item.description from `tabBOM Item` bom_item, tabItem item where bom_item.parent=%s and bom_item.item_code = item.name order by bom_item.idx """, frappe.form_dict.parent, as_dict=True)
emakis/erpnext
erpnext/manufacturing/doctype/bom/bom.py
Python
gpl-3.0
19,770
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2009 Sharoon Thomas # Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## from openerp.osv import fields, osv class email_template_preview(osv.osv_memory): _inherit = "email.template" _name = "email_template.preview" _description = "Email Template Preview" def _get_records(self, cr, uid, context=None): """ Return Records of particular Email Template's Model """ if context is None: context = {} template_id = context.get('template_id', False) if not template_id: return [] email_template = self.pool.get('email.template') template = email_template.browse(cr, uid, int(template_id), context=context) template_object = template.model_id model = self.pool[template_object.model] record_ids = model.search(cr, uid, [], 0, 10, 'id', context=context) default_id = context.get('default_res_id') if default_id and default_id not in record_ids: record_ids.insert(0, default_id) return model.name_get(cr, uid, record_ids, context) def default_get(self, cr, uid, fields, context=None): if context is None: context = {} result = super(email_template_preview, self).default_get(cr, uid, fields, context=context) email_template = self.pool.get('email.template') template_id = context.get('template_id') if 'res_id' in fields and not result.get('res_id'): records = self._get_records(cr, uid, context=context) result['res_id'] = records and records[0][0] or False # select first record as a Default if template_id and 'model_id' in fields and not result.get('model_id'): result['model_id'] = email_template.read(cr, uid, int(template_id), ['model_id'], context).get('model_id', False) return result _columns = { 'res_id': fields.selection(_get_records, 'Sample Document'), } def on_change_res_id(self, cr, uid, ids, res_id, context=None): if context is None: context = {'value': {}} if not res_id or not context.get('template_id'): return {'value': {}} email_template = self.pool.get('email.template') template_id = context.get('template_id') template = email_template.browse(cr, uid, template_id, context=context) # generate and get template values mail_values = email_template.generate_email(cr, uid, template_id, res_id, context=context) vals = dict((field, mail_values.get(field, False)) for field in ('email_from', 'email_to', 'email_cc', 'reply_to', 'subject', 'body_html', 'partner_to')) vals['name'] = template.name return {'value': vals} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
jmesteve/saas3
openerp/addons/email_template/wizard/email_template_preview.py
Python
agpl-3.0
3,741
""" Serializers for all Course Enrollment related return objects. """ from rest_framework import serializers from student.models import CourseEnrollment from course_modes.models import CourseMode class StringListField(serializers.CharField): """Custom Serializer for turning a comma delimited string into a list. This field is designed to take a string such as "1,2,3" and turn it into an actual list [1,2,3] """ def field_to_native(self, obj, field_name): """ Serialize the object's class name. """ if not obj.suggested_prices: return [] items = obj.suggested_prices.split(',') return [int(item) for item in items] class CourseField(serializers.RelatedField): """Read-Only representation of course enrollment information. Aggregates course information from the CourseDescriptor as well as the Course Modes configured for enrolling in the course. """ def to_native(self, course): course_id = unicode(course.id) course_modes = ModeSerializer(CourseMode.modes_for_course(course.id)).data # pylint: disable=no-member return { "course_id": course_id, "enrollment_start": course.enrollment_start, "enrollment_end": course.enrollment_end, "invite_only": course.invitation_only, "course_modes": course_modes, } class CourseEnrollmentSerializer(serializers.ModelSerializer): """Serializes CourseEnrollment models Aggregates all data from the Course Enrollment table, and pulls in the serialization for the Course Descriptor and course modes, to give a complete representation of course enrollment. """ course_details = serializers.SerializerMethodField('get_course_details') user = serializers.SerializerMethodField('get_username') def get_course_details(self, model): field = CourseField() return field.to_native(model.course) def get_username(self, model): """Retrieves the username from the associated model.""" return model.username class Meta: # pylint: disable=missing-docstring model = CourseEnrollment fields = ('created', 'mode', 'is_active', 'course_details', 'user') lookup_field = 'username' class ModeSerializer(serializers.Serializer): """Serializes a course's 'Mode' tuples Returns a serialized representation of the modes available for course enrollment. The course modes models are designed to return a tuple instead of the model object itself. This serializer does not handle the model object itself, but the tuple. """ slug = serializers.CharField(max_length=100) name = serializers.CharField(max_length=255) min_price = serializers.IntegerField() suggested_prices = StringListField(max_length=255) currency = serializers.CharField(max_length=8) expiration_datetime = serializers.DateTimeField() description = serializers.CharField() sku = serializers.CharField()
eestay/edx-platform
common/djangoapps/enrollment/serializers.py
Python
agpl-3.0
3,036
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Imagemagick(AutotoolsPackage): """ImageMagick is a software suite to create, edit, compose, or convert bitmap images.""" homepage = "https://www.imagemagick.org" url = "https://github.com/ImageMagick/ImageMagick/archive/7.0.2-7.tar.gz" version('7.0.8-7', sha256='fadb36b59f310e9eee5249ecb2326b323a64da6cc716dd6d08ece8ea2c780b81') version('7.0.5-9', sha256='b85b269e0ed1628e88e840053823f8a33c314b2271f04762f43d33e9d0b4d264') version('7.0.2-7', sha256='f2f18a97f861c1668befdaff0cc3aaafb2111847aab028a88b4c2cb017acfbaa') version('7.0.2-6', sha256='7d49ca8030f895c683cae69c52d8edfc4876de651f5b8bfdbea907e222480bd3') depends_on('jpeg') depends_on('pango') depends_on('libtool', type='build') depends_on('libtool', when='@7.0.8:', type=('build', 'link')) depends_on('libpng') depends_on('freetype') depends_on('fontconfig') depends_on('libtiff') depends_on('ghostscript') depends_on('ghostscript-fonts') depends_on('libsm') depends_on('pkgconfig', type='build') def configure_args(self): spec = self.spec gs_font_dir = join_path(spec['ghostscript-fonts'].prefix.share, "font") return [ '--with-gs-font-dir={0}'.format(gs_font_dir) ] @property def libs(self): return find_libraries('libMagick*', root=self.prefix, recursive=True)
LLNL/spack
var/spack/repos/builtin/packages/imagemagick/package.py
Python
lgpl-2.1
1,598
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tools for working with object-based checkpoints. Visualization and inspection: @@dot_graph_from_checkpoint @@list_objects @@object_metadata Managing dependencies: @@capture_dependencies @@Checkpointable @@CheckpointableBase @@CheckpointableObjectGraph @@NoDependency @@split_dependency Checkpointable data structures: @@List @@Mapping @@UniqueNameTracker """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.checkpoint.python.containers import UniqueNameTracker from tensorflow.contrib.checkpoint.python.split_dependency import split_dependency from tensorflow.contrib.checkpoint.python.visualize import dot_graph_from_checkpoint from tensorflow.core.protobuf.checkpointable_object_graph_pb2 import CheckpointableObjectGraph from tensorflow.python.training.checkpointable.base import CheckpointableBase from tensorflow.python.training.checkpointable.data_structures import List from tensorflow.python.training.checkpointable.data_structures import Mapping from tensorflow.python.training.checkpointable.tracking import Checkpointable from tensorflow.python.training.checkpointable.tracking import NoDependency from tensorflow.python.training.checkpointable.util import capture_dependencies from tensorflow.python.training.checkpointable.util import list_objects from tensorflow.python.training.checkpointable.util import object_metadata from tensorflow.python.util.all_util import remove_undocumented remove_undocumented(module_name=__name__)
gojira/tensorflow
tensorflow/contrib/checkpoint/__init__.py
Python
apache-2.0
2,226
#!/usr/bin/env python # coding: utf-8 # # Copyright (c) Greenplum Inc 2008. All Rights Reserved. # import sys import unittest2 as unittest import tempfile, os, shutil from gppylib.commands.base import CommandResult, Command, ExecutionError from gppylib.operations.backup_utils import * from gppylib.operations.restore import * from gppylib.operations.restore import _build_gpdbrestore_cmd_line from gppylib.mainUtils import ExceptionNoStackTraceNeeded from mock import patch, MagicMock, Mock, mock_open, call class RestoreTestCase(unittest.TestCase): def setUp(self): context = Context() context.restore_db='testdb' context.include_dump_tables_file='/tmp/table_list.txt' context.master_datadir='/data/master/p1' context.batch_default=None context.timestamp = '20160101010101' context.no_analyze = True context.drop_db = True context.master_port = 5432 self.context = context self.restore = RestoreDatabase(self.context) self.validate_timestamp = ValidateTimestamp(self.context) def test_GetDbName_default(self): """ Basic test """ with tempfile.NamedTemporaryFile() as f: f.write(""" -- -- Database creation -- CREATE DATABASE monkey WITH TEMPLATE = template0 ENCODING = 'UTF8' OWNER = thisguy; """) f.flush() self.assertTrue(GetDbName(f.name).run() == "monkey") def test_GetDbName_line_check(self): """ Verify that GetDbName looks no further than 50 lines. """ with tempfile.NamedTemporaryFile() as f: for i in range(0, 50): f.write("crap\n") f.write("CREATE DATABASE monkey") f.flush() try: GetDbName(f.name).run() except GetDbName.DbNameGiveUp, e: return self.fail("DbNameGiveUp should have been raised.") def test_GetDbName_no_name(self): """ Verify that GetDbName fails when cdatabase file ends prematurely. """ with tempfile.NamedTemporaryFile() as f: f.write("this is the whole file") f.flush() try: GetDbName(f.name).run() except GetDbName.DbNameNotFound, e: return self.fail("DbNameNotFound should have been raised.") @patch('gppylib.operations.restore.RestoreDatabase._process_createdb', side_effect=ExceptionNoStackTraceNeeded('Failed to create database')) @patch('time.sleep') def test_multitry_createdb_create_fails(self, mock1, mock2): self.assertRaises(ExceptionNoStackTraceNeeded, self.restore._multitry_createdb) @patch('gppylib.operations.restore.RestoreDatabase._process_createdb') def test_multitry_createdb_default(self, mock): self.restore._multitry_createdb() @patch('gppylib.operations.restore.get_partition_list', return_value=[('public', 't1'), ('public', 't2'), ('public', 't3')]) @patch('gppylib.operations.restore.get_full_timestamp_for_incremental', return_value='20160101000000') @patch('gppylib.operations.restore.get_incremental_restore_timestamps', return_value=['20160101010101', '20160101010111']) @patch('gppylib.operations.restore.get_dirty_table_file_contents', return_value=['public.t1', 'public.t2']) def test_create_restore_plan_default(self, mock1, mock2, mock3, mock4): expected = ["20160101010111:", "20160101010101:public.t1,public.t2", "20160101000000:public.t3"] m = mock_open() with patch('__builtin__.open', m, create=True): plan_file = create_restore_plan(self.context) result = m() self.assertEqual(len(expected), len(result.write.call_args_list)) for i in range(len(expected)): self.assertEqual(call(expected[i]+'\n'), result.write.call_args_list[i]) @patch('gppylib.operations.restore.get_full_timestamp_for_incremental', return_value='20160101000000') @patch('gppylib.operations.restore.get_incremental_restore_timestamps', return_value=['20160101010101', '20160101010111']) @patch('gppylib.operations.restore.get_dirty_table_file_contents', return_value=['public.t1', 'public.t2']) def test_create_restore_plan_empty_list(self, mock1, mock2, mock3): expected = ["20160101010111:", "20160101010101:", "20160101000000:"] m = mock_open() with patch('__builtin__.open', m, create=True): plan_file = create_restore_plan(self.context) result = m() self.assertEqual(len(expected), len(result.write.call_args_list)) for i in range(len(expected)): self.assertEqual(call(expected[i]+'\n'), result.write.call_args_list[i]) @patch('gppylib.operations.restore.get_partition_list', return_value=[]) @patch('gppylib.operations.restore.get_timestamp_from_increments_filename', return_value=None) def test_create_restore_plan_no_full_dump(self, mock1, mock2): with patch('__builtin__.open', mock_open(), create=True): with self.assertRaisesRegexp(Exception, 'Could not locate full backup associated with timestamp'): create_restore_plan(self.context) @patch('gppylib.operations.restore.get_partition_list', return_value=[]) @patch('gppylib.operations.restore.get_full_timestamp_for_incremental', return_value='20120101000000') @patch('gppylib.operations.restore.get_incremental_restore_timestamps', return_value=['20160101010101', '20160101010111']) @patch('gppylib.operations.restore.get_dirty_table_file_contents', return_value=['public.t1', 'public.t2']) @patch('gppylib.operations.restore.create_plan_file_contents') def test_create_restore_plan_empty_list_with_nbu(self, mock1, mock2, mock3, mock4, mock5): self.context.netbackup_service_host = 'mdw' self.context.netbackup_block_size = '1024' m = mock_open() with patch('__builtin__.open', m, create=True): plan_file = create_restore_plan(self.context) result = m() self.assertEqual(len(result.write.call_args_list), 0) @patch('gppylib.operations.restore.get_partition_list', return_value=[]) @patch('gppylib.operations.backup_utils.get_full_timestamp_for_incremental_with_nbu', return_value=None) def test_create_restore_plan_no_full_dump_with_nbu(self, mock1, mock2): self.context.netbackup_service_host = 'mdw' self.context.netbackup_block_size = '1024' with patch('__builtin__.open', mock_open(), create=True): with self.assertRaisesRegexp(Exception, 'Could not locate full backup associated with timestamp'): create_restore_plan(self.context) @patch('gppylib.operations.restore.get_lines_from_file', return_value=['20160101010110', '20160101010109', '20160101010108', '20160101010107', '20160101010106', '20160101010105', '20160101010104', '20160101010103', '20160101010102', '20160101010101']) def test_get_incremental_restore_timestamps_midway(self, mock): latest_full_timestamp = '20160101010101' self.context.timestamp = '20160101010105' increments = get_incremental_restore_timestamps(self.context, latest_full_timestamp) self.assertEqual(increments, ['20160101010105', '20160101010104', '20160101010103', '20160101010102', '20160101010101']) @patch('gppylib.operations.restore.get_lines_from_file', return_value=['20160101010110', '20160101010109', '20160101010108', '20160101010107', '20160101010106', '20160101010105', '20160101010104', '20160101010103', '20160101010102', '20160101010101']) def test_get_incremental_restore_timestamps_latest(self, mock): latest_full_timestamp = '20160101010101' self.context.timestamp = '20160101010110' increments = get_incremental_restore_timestamps(self.context, latest_full_timestamp) self.assertEqual(increments, ['20160101010110', '20160101010109', '20160101010108', '20160101010107', '20160101010106', '20160101010105', '20160101010104', '20160101010103', '20160101010102', '20160101010101']) @patch('gppylib.operations.restore.get_lines_from_file', return_value=[]) def test_get_incremental_restore_timestamps_earliest(self, mock): latest_full_timestamp = '20160101010101' self.context.timestamp = '20160101010100' increments = get_incremental_restore_timestamps(self.context, latest_full_timestamp) self.assertEqual(increments, []) @patch('gppylib.operations.restore.get_lines_from_file', side_effect=[['public.t1'], ['public.t1', 'public.t2', 'public.t3'], ['public.t2', 'public.t4']]) def test_create_plan_file_contents_with_file(self, mock): table_set_from_metadata_file = ['public.t1', 'public.t2', 'public.t3', 'public.t4'] incremental_restore_timestamps = ['20160101010113', '20160101010101', '20160101010111'] latest_full_timestamp = '20160101010110' expected_output = {'20160101010113': ['public.t1'], '20160101010101': ['public.t2', 'public.t3'], '20160101010111': ['public.t4'], '20160101010110': []} file_contents = create_plan_file_contents(self.context, table_set_from_metadata_file, incremental_restore_timestamps, latest_full_timestamp) self.assertEqual(file_contents, expected_output) def test_create_plan_file_contents_no_file(self): table_set_from_metadata_file = ['public.t1', 'public.t2', 'public.t3', 'public.t4'] incremental_restore_timestamps = [] latest_full_timestamp = '20160101010110' expected_output = {'20160101010110': ['public.t1', 'public.t2', 'public.t3', 'public.t4']} file_contents = create_plan_file_contents(self.context, table_set_from_metadata_file, incremental_restore_timestamps, latest_full_timestamp) self.assertEqual(file_contents, expected_output) @patch('gppylib.operations.restore.get_lines_from_file', side_effect=[['public.t1'], ['public.t1', 'public.t2', 'public.t3'], ['public.t2', 'public.t4']]) def test_create_plan_file_contents_no_metadata(self, mock): table_set_from_metadata_file = [] incremental_restore_timestamps = ['20160101010113', '20160101010101', '20160101010111'] latest_full_timestamp = '20160101010110' expected_output = {'20160101010101': [], '20160101010113': [], '20160101010111': [], '20160101010110': []} file_contents = create_plan_file_contents(self.context, table_set_from_metadata_file, incremental_restore_timestamps, latest_full_timestamp) self.assertEqual(file_contents, expected_output) @patch('gppylib.operations.restore.get_lines_from_file', side_effect=[['public.t1'], ['public.t1', 'public.t2', 'public.t3'], ['public.t2', 'public.t4']]) @patch('gppylib.operations.restore.restore_file_with_nbu') def test_create_plan_file_contents_with_nbu(self, mock1, mock2): self.context.netbackup_service_host = 'mdw' self.context.netbackup_block_size = '1024' table_set_from_metadata_file = [] incremental_restore_timestamps = ['20160101010113', '20160101010101', '20160101010111'] latest_full_timestamp = '20160101010110' expected_output = {'20160101010101': [], '20160101010113': [], '20160101010111': [], '20160101010110': []} file_contents = create_plan_file_contents(self.context, table_set_from_metadata_file, incremental_restore_timestamps, latest_full_timestamp) self.assertEqual(file_contents, expected_output) @patch('gppylib.operations.restore.write_lines_to_file') def test_write_to_plan_file_default(self, mock1): plan_file = 'blah' plan_file_contents = {'20160101010113': ['public.t1'], '20160101010101': ['public.t2', 'public.t3'], '20160101010111': ['public.t4']} expected_output = ['20160101010113:public.t1', '20160101010111:public.t4', '20160101010101:public.t2,public.t3'] file_contents = write_to_plan_file(plan_file_contents, plan_file) self.assertEqual(expected_output, file_contents) @patch('gppylib.operations.restore.write_lines_to_file') def test_write_to_plan_file_empty_list(self, mock1): plan_file = 'blah' plan_file_contents = {} expected_output = [] file_contents = write_to_plan_file(plan_file_contents, plan_file) self.assertEqual(expected_output, file_contents) @patch('gppylib.operations.restore.write_lines_to_file') def test_write_to_plan_file_no_plan_file(self, mock1): plan_file = None plan_file_contents = {} with self.assertRaisesRegexp(Exception, 'Invalid plan file .*'): write_to_plan_file(plan_file_contents, plan_file) @patch('gppylib.operations.restore.get_lines_from_file', return_value=['public.t1', 'public.t2']) def test_get_partition_list_default(self, mock): partition_list = get_partition_list(self.context) self.assertEqual(partition_list, [('public', 't1'), ('public', 't2')]) @patch('gppylib.operations.restore.get_lines_from_file', return_value=[]) def test_get_partition_list_no_partitions(self, mock): partition_list = get_partition_list(self.context) self.assertEqual(partition_list, []) @patch('gppylib.operations.restore.get_lines_from_file', return_value=['Backup Type: Incremental']) @patch('os.path.isfile', return_value=True) def test_is_incremental_restore_default(self, mock1, mock2): self.assertTrue(is_incremental_restore(self.context)) @patch('gppylib.operations.restore.get_lines_from_file') @patch('gppylib.operations.restore.check_backup_type', return_value=True) @patch('os.path.isfile', return_value=True) def test_is_incremental_restore_bypass_file_incremental(self, mock1, mock2, mock3): self.assertTrue(is_incremental_restore(self.context)) @patch('os.path.isfile', return_value=True) @patch('gppylib.operations.restore.get_lines_from_file', return_value=['Backup Type: Full']) def test_is_incremental_restore_full_backup(self, mock1, mock2): self.assertFalse(is_incremental_restore(self.context)) @patch('os.path.isfile', return_value=True) @patch('gppylib.operations.restore.get_lines_from_file') @patch('gppylib.operations.restore.check_backup_type', return_value=False) def test_is_incremental_restore_bypass_file_full(self, mock1, mock2, mock3): self.assertFalse(is_incremental_restore(self.context)) @patch('os.path.isfile', return_value=False) def test_is_incremental_restore_no_file(self, mock1): self.assertFalse(is_incremental_restore(self.context)) @patch('os.path.isfile', return_value=True) @patch('gppylib.operations.restore.get_lines_from_file', return_value=['Backup Type: Full']) @patch('os.path.isfile', return_value=True) def test_is_full_restore_default(self, mock1, mock2, mock3): self.assertTrue(is_full_restore(self.context)) @patch('gppylib.operations.restore.get_lines_from_file') @patch('gppylib.operations.restore.check_backup_type', return_value=True) @patch('os.path.isfile', return_value=True) def test_is_full_restore_bypass_file_full(self, mock1, mock2, mock3): self.assertTrue(is_full_restore(self.context)) @patch('os.path.isfile', return_value=True) @patch('gppylib.operations.restore.get_lines_from_file', return_value=['Backup Type: Incremental']) def test_is_full_restore_incremental(self, mock1, mock2): self.assertFalse(is_full_restore(self.context)) @patch('os.path.isfile', return_value=True) @patch('gppylib.operations.restore.get_lines_from_file') @patch('gppylib.operations.restore.check_backup_type', return_value=False) def test_is_full_restore_bypass_file_incremental(self, mock1, mock2, mock3): self.assertFalse(is_full_restore(self.context)) @patch('os.path.isfile', return_value=False) def test_is_full_restore_no_file(self, mock1): filename = self.context.generate_filename("report") with self.assertRaisesRegexp(Exception, 'Report file %s does not exist' % filename): is_full_restore(self.context) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_schema_only_restore_string_default(self, mock1, mock2): self.context.backup_dir = None table_filter_file = None full_restore_with_filter = False metadata_file = self.context.generate_filename("metadata") expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-d=db_dumps/20160101 --gp-c -d "testdb"' % metadata_file restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_schema_only_restore_string_no_compression(self, mock1, mock2): self.context.backup_dir = None self.context.compress = False table_filter_file = None full_restore_with_filter = False metadata_file = self.context.generate_filename("metadata") expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-d=db_dumps/20160101 -d "testdb"' % metadata_file restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') @patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=True) def test_create_schema_only_restore_string_backup_dir(self, mock1, mock2, mock3): table_filter_file = None full_restore_with_filter = False self.context.report_status_dir = "/data/master/p1/db_dumps/20160101" metadata_file = self.context.generate_filename("metadata") expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-r=/data/master/p1/db_dumps/20160101 --status=/data/master/p1/db_dumps/20160101 --gp-d=db_dumps/20160101 --gp-c -d "testdb"' % metadata_file restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') @patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=False) def test_create_schema_only_restore_string_prefix(self, mock1, mock2, mock3): self.context.dump_prefix = 'bar_' table_filter_file = 'filter_file1' metadata_file = self.context.generate_filename("metadata") full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-d=db_dumps/20160101 --prefix=bar_ --gp-f=%s --gp-c -d "testdb"' % (metadata_file, table_filter_file) restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') @patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=False) def test_create_schema_only_restore_string_no_filter_file(self, mock1, mock2, mock3): table_filter_file = None metadata_file = self.context.generate_filename("metadata") full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-d=db_dumps/20160101 --gp-c -d "testdb"' % metadata_file restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_schema_only_restore_string_different_status_dir(self, mock1, mock2): self.context.report_status_dir = '/tmp' table_filter_file = None full_restore_with_filter = False metadata_file = self.context.generate_filename("metadata") expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-r=/tmp --status=/tmp --gp-d=db_dumps/20160101 --gp-c -d "testdb"' % metadata_file restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_schema_only_restore_string_status_dir_with_filter(self, mock1, mock2): self.context.report_status_dir = '/tmp' table_filter_file = None full_restore_with_filter = True metadata_file = self.context.generate_filename("metadata") expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s -P --gp-r=/tmp --status=/tmp --gp-d=db_dumps/20160101 --gp-c -d "testdb"' % metadata_file restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_schema_only_restore_string_with_nbu(self, mock1, mock2): table_filter_file = None full_restore_with_filter = False self.context.netbackup_service_host = "mdw" metadata_file = self.context.generate_filename("metadata") expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-d=db_dumps/20160101 --gp-c -d "testdb" --netbackup-service-host=mdw' % metadata_file restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_schema_only_restore_string_with_ddboost(self, mock1, mock2): self.context.report_status_dir = '/tmp' table_filter_file = None full_restore_with_filter = True self.context.ddboost = True self.context.dump_dir = '/backup/DCA-35' metadata_file = self.context.generate_filename("metadata") expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s -P --gp-r=/tmp --status=/tmp --gp-d=/backup/DCA-35/20160101 --gp-c -d "testdb" --ddboost' % metadata_file restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_post_data_schema_only_restore_string_default(self, mock1, mock2): table_filter_file = None full_restore_with_filter = True expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p -P --gp-c -d "testdb"' restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') @patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=True) def test_create_post_data_schema_only_restore_string_no_filter(self, mock1, mock2, mock3): table_filter_file = None full_restore_with_filter = False self.context.report_status_dir="/data/master/p1/db_dumps/20160101" expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p --gp-r=/data/master/p1/db_dumps/20160101 --status=/data/master/p1/db_dumps/20160101 --gp-c -d "testdb"' restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') @patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=False) def test_create_post_data_schema_only_restore_string_with_prefix(self, mock1, mock2, mock3): self.context.dump_prefix = 'bar_' table_filter_file = None full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p --prefix=bar_ --gp-c -d "testdb"' restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') @patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=False) def test_create_post_data_schema_only_restore_string_with_prefix_and_filter(self, mock1, mock2, mock3): self.context.dump_prefix = 'bar_' table_filter_file = 'filter_file1' full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p --prefix=bar_ --gp-f=%s --gp-c -d "testdb"' % (table_filter_file) restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') @patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=False) def test_create_post_data_schema_only_restore_string_no_backup_dir(self, mock1, mock2, mock3): table_filter_file = None full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p --gp-c -d "testdb"' restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_post_data_schema_only_restore_string_different_status_dir(self, mock1, mock2): self.context.report_status_dir = '/tmp' table_filter_file = None full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p --gp-r=/tmp --status=/tmp --gp-c -d "testdb"' restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_post_data_schema_only_restore_string_status_dir_and_filter(self, mock1, mock2): self.context.report_status_dir = '/tmp' table_filter_file = None full_restore_with_filter = True expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p -P --gp-r=/tmp --status=/tmp --gp-c -d "testdb"' restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_post_data_schema_only_restore_string_with_ddboost(self, mock1, mock2): self.context.report_status_dir = '/tmp' table_filter_file = None full_restore_with_filter = True self.context.ddboost = True expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p -P --gp-r=/tmp --status=/tmp --gp-c -d "testdb" --ddboost' restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_post_data_schema_only_restore_string_with_nbu(self, mock1, mock2): table_filter_file = None full_restore_with_filter = True self.context.backup_dir = None self.context.netbackup_service_host = "mdw" self.context.netbackup_block_size = 1024 expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p -P --gp-c -d "testdb" --netbackup-service-host=mdw --netbackup-block-size=1024' restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_build_gpdbrestore_cmd_line_default(self, mock1, mock2): ts = '20160101010101' self.context.backup_dir = None expected_output = 'gpdbrestore -t 20160101010101 --table-file foo -a -v --noplan --noanalyze --noaostats --no-validate-table-name' restore_line = _build_gpdbrestore_cmd_line(self.context, ts, 'foo') self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_build_gpdbrestore_cmd_line_backup_dir(self, mock1, mock2): ts = '20160101010101' self.context.backup_dir = '/tmp' expected_output = 'gpdbrestore -t 20160101010101 --table-file foo -a -v --noplan --noanalyze --noaostats --no-validate-table-name -u /tmp' restore_line = _build_gpdbrestore_cmd_line(self.context, ts, 'foo') self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_build_gpdbrestore_cmd_line_report_status_dir(self, mock1, mock2): ts = '20160101010101' self.context.backup_dir = None self.context.report_status_dir = '/tmp' expected_output = 'gpdbrestore -t 20160101010101 --table-file foo -a -v --noplan --noanalyze --noaostats --no-validate-table-name --report-status-dir=/tmp' restore_line = _build_gpdbrestore_cmd_line(self.context, ts, 'foo') self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_build_gpdbrestore_cmd_line_redirected_restore(self, mock1, mock2): ts = '20160101010101' self.context.backup_dir = None self.context.redirected_restore_db = "redb" expected_output = 'gpdbrestore -t 20160101010101 --table-file foo -a -v --noplan --noanalyze --noaostats --no-validate-table-name --redirect=redb' restore_line = _build_gpdbrestore_cmd_line(self.context, ts, 'foo') self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_build_gpdbrestore_cmd_line_with_ddboost(self, mock1, mock2): ts = '20160101010101' self.context.backup_dir = None self.context.ddboost = True self.context.report_status_dir = '/tmp' expected_output = 'gpdbrestore -t 20160101010101 --table-file foo -a -v --noplan --noanalyze --noaostats --no-validate-table-name --report-status-dir=/tmp --ddboost' ddboost = True restore_line = _build_gpdbrestore_cmd_line(self.context, ts, 'foo') self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_restore_string_no_filter_file(self, mock1, mock2): self.context.no_plan = True table_filter_file = None full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-c -d "testdb" -a' restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_restore_string_default(self, mock1, mock2): self.context.no_plan = True table_filter_file = '/tmp/foo' full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-f=/tmp/foo --gp-c -d "testdb" -a' restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_restore_string_with_ddboost(self, mock1, mock2): self.context.no_plan = True table_filter_file = None full_restore_with_filter = False self.context.ddboost = True expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-c -d "testdb" -a --ddboost' restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_restore_string_different_status_dir(self, mock1, mock2): self.context.no_plan = True self.context.report_status_dir = '/tmp' table_filter_file = None full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-r=/tmp --status=/tmp --gp-c -d "testdb" -a' restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_restore_string_no_filter(self, mock1, mock2): self.context.no_plan = True table_filter_file = None full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-c -d "testdb" -a' restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_restore_string_no_filter_file(self, mock1, mock2): self.context.no_plan = True table_filter_file = '/tmp/foo' full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-f=/tmp/foo --gp-c -d "testdb" -a' restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_restore_string_ddboost_and_prefix(self, mock1, mock2): self.context.no_plan = True table_filter_file = None self.context.dump_prefix = 'bar_' full_restore_with_filter = False self.context.ddboost = True expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --prefix=bar_ --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-c -d "testdb" -a --ddboost' restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') @patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=True) def test_create_restore_string_backup_dir(self, mock1, mock2, mock3): self.context.no_plan = True table_filter_file = None self.context.backup_dir = '/tmp' full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=/tmp/db_dumps/20160101 --gp-r=/tmp/db_dumps/20160101 --status=/tmp/db_dumps/20160101 --gp-c -d "testdb" -a' restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_restore_string_no_ao_stats(self, mock1, mock2): self.context.no_plan = True self.context.no_ao_stats = True table_filter_file = None self.context.report_status_dir = '/tmp' self.context.backup_dir = '/foo' full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=/foo/db_dumps/20160101 --gp-r=/tmp --status=/tmp --gp-c -d "testdb" -a --gp-nostats' restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_restore_string_with_plan(self, mock1, mock2): table_filter_file = None self.context.report_status_dir = '/tmp' self.context.backup_dir = '/foo' full_restore_with_filter = True expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=/foo/db_dumps/20160101 --gp-r=/tmp --status=/tmp --gp-c -d "testdb" -a' restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_restore_string_with_nbu(self, mock1, mock2): self.context.no_plan = True table_filter_file = None self.context.report_status_dir = '/tmp' self.context.backup_dir = '/foo' self.context.netbackup_service_host = "mdw" full_restore_with_filter = False expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=/foo/db_dumps/20160101 --gp-r=/tmp --status=/tmp --gp-c -d "testdb" -a --netbackup-service-host=mdw' restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter) self.assertEqual(restore_line, expected_output) # Test to verify the command line for gp_restore @patch('gppylib.operations.restore.socket.gethostname', return_value='host') @patch('gppylib.operations.restore.getpass.getuser', return_value='user') def test_create_restore_string_change_schema(self, mock1, mock2): self.context.no_plan = True table_filter_file = None full_restore_with_filter = False change_schema_file = 'newschema' expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-c -d "testdb" -a --change-schema-file=newschema' restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter, change_schema_file) self.assertEqual(restore_line, expected_output) @patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='foo') def test_get_plan_file_contents_no_file(self, mock1): with self.assertRaisesRegexp(Exception, 'Plan file foo does not exist'): get_plan_file_contents(self.context) @patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='foo') @patch('gppylib.operations.restore.get_lines_from_file', return_value=[]) @patch('os.path.isfile', return_value=True) def test_get_plan_file_contents_empty_file(self, mock1, mock2, mock3): with self.assertRaisesRegexp(Exception, 'Plan file foo has no contents'): get_plan_file_contents(self.context) @patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='foo') @patch('gppylib.operations.restore.get_lines_from_file', return_value=['20160101010101:t1,t2', '20160101010111:t3,t4', '20160101121210:t5,t6,t7']) @patch('os.path.isfile', return_value=True) def test_get_plan_file_contents_default(self, mock1, mock2, mock3): expected_output = [('20160101010101','t1,t2'), ('20160101010111','t3,t4'), ('20160101121210','t5,t6,t7')] output = get_plan_file_contents(self.context) self.assertEqual(output, expected_output) @patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='foo') @patch('gppylib.operations.restore.get_lines_from_file', return_value=['20160101010101:', '20160101010111', '20160101121210:']) @patch('os.path.isfile', return_value=True) def test_get_plan_file_contents_invalid_format(self, mock1, mock2, mock3): with self.assertRaisesRegexp(Exception, 'Invalid plan file format'): get_plan_file_contents(self.context) @patch('gppylib.operations.restore.get_plan_file_contents', return_value=[('20160101010101', 't1,t2'), ('20160101010111', 't3,t4'), ('20160101121210', 't5,t6,t7')]) @patch('gppylib.operations.restore.Command.run') @patch('gppylib.operations.restore.update_ao_statistics') def test_restore_incremental_data_only_default(self, mock1, mock2, mock3): results = self.restore.restore_incremental_data_only() self.assertTrue(results) @patch('gppylib.operations.restore.get_plan_file_contents', return_value=[('20160101010101', ''), ('20160101010111', ''), ('20160101121210', '')]) @patch('os.path.isfile', return_value=True) @patch('gppylib.operations.restore.update_ao_statistics') def test_restore_incremental_data_only_no_tables(self, mock1, mock2, mock3): with self.assertRaisesRegexp(Exception, 'There were no tables to restore. Check the plan file contents for restore timestamp 20160101010101'): self.restore.restore_incremental_data_only() @patch('gppylib.operations.restore.get_plan_file_contents', return_value=[('20160101010101', 't1,t2'), ('20160101010111', 't3,t4'), ('20160101121210', 't5,t6,t7')]) @patch('gppylib.operations.restore.Command.run', side_effect=Exception('Error executing gpdbrestore')) @patch('gppylib.operations.restore.update_ao_statistics') def test_restore_incremental_data_only_error(self, mock1, mock2, mock3): with self.assertRaisesRegexp(Exception, 'Error executing gpdbrestore'): self.restore.restore_incremental_data_only() def test_create_filter_file_no_tables(self): self.context.restore_tables = None self.assertEquals(self.restore.create_filter_file(), None) @patch('gppylib.operations.restore.get_all_segment_addresses', return_value=['host1']) @patch('gppylib.operations.restore.scp_file_to_hosts') def test_create_filter_file_default(self, m1, m2): self.context.restore_tables = ['public.ao1', 'testschema.heap1'] m = mock_open() with patch('tempfile.NamedTemporaryFile', m, create=True): fname = self.restore.create_filter_file() result = m() self.assertEqual(len(self.context.restore_tables), len(result.write.call_args_list)) for i in range(len(self.context.restore_tables)): self.assertEqual(call(self.context.restore_tables[i]+'\n'), result.write.call_args_list[i]) @patch('gppylib.operations.restore.get_lines_from_file', return_value = ['public.t1', 'public.t2', 'public.t3']) @patch('os.path.isfile', return_value = True) def test_get_restore_tables_from_table_file_default(self, mock1, mock2): table_file = '/foo' expected_result = ['public.t1', 'public.t2', 'public.t3'] result = get_restore_tables_from_table_file(table_file) self.assertEqual(expected_result, result) @patch('os.path.isfile', return_value = False) def test_get_restore_tables_from_table_file_no_file(self, mock): table_file = '/foo' expected_result = ['public.t1', 'public.t2', 'public.t3'] with self.assertRaisesRegexp(Exception, 'Table file does not exist'): result = get_restore_tables_from_table_file(table_file) def test_check_table_name_format_and_duplicate_missing_schema(self): table_list = ['publicao1', 'public.ao2'] with self.assertRaisesRegexp(Exception, 'No schema name supplied'): check_table_name_format_and_duplicate(table_list, None) def test_check_table_name_format_and_duplicate_default(self): table_list = ['public.ao1', 'public.ao2'] check_table_name_format_and_duplicate(table_list, []) def test_check_table_name_format_and_duplicate_no_tables(self): table_list = [] schema_list = [] check_table_name_format_and_duplicate(table_list, schema_list) def test_check_table_name_format_and_duplicate_duplicate_tables(self): table_list = ['public.ao1', 'public.ao1'] resolved_list, _ = check_table_name_format_and_duplicate(table_list, []) self.assertEqual(resolved_list, ['public.ao1']) def test_check_table_name_format_and_duplicate_funny_chars(self): table_list = [' `"@#$%^&( )_|:;<>?/-+={}[]*1Aa . `"@#$%^&( )_|:;<>?/-+={}[]*1Aa ', 'schema.ao1'] schema_list = ['schema'] resolved_table_list, resolved_schema_list = check_table_name_format_and_duplicate(table_list, schema_list) self.assertEqual(resolved_table_list, [' `"@#$%^&( )_|:;<>?/-+={}[]*1Aa . `"@#$%^&( )_|:;<>?/-+={}[]*1Aa ']) self.assertEqual(resolved_schema_list, ['schema']) def test_validate_tablenames_exist_in_dump_file_no_tables(self): dumped_tables = [] table_list = ['schema.ao'] with self.assertRaisesRegexp(Exception, 'No dumped tables to restore.'): validate_tablenames_exist_in_dump_file(table_list, dumped_tables) def test_validate_tablenames_exist_in_dump_file_one_table(self): dumped_tables = [('schema', 'ao', 'gpadmin')] table_list = ['schema.ao'] validate_tablenames_exist_in_dump_file(table_list, dumped_tables) def test_validate_tablenames_exist_in_dump_file_nonexistent_table(self): dumped_tables = [('schema', 'ao', 'gpadmin')] table_list = ['schema.ao', 'schema.co'] with self.assertRaisesRegexp(Exception, "Tables \['schema.co'\] not found in backup"): validate_tablenames_exist_in_dump_file(table_list, dumped_tables) def test_get_restore_table_list_default(self): table_list = ['public.ao_table', 'public.ao_table2', 'public.co_table', 'public.heap_table'] restore_tables = ['public.ao_table2', 'public.co_table'] m = mock_open() with patch('tempfile.NamedTemporaryFile', m, create=True): result = get_restore_table_list(table_list, restore_tables) result = m() self.assertEqual(len(restore_tables), len(result.write.call_args_list)) for i in range(len(restore_tables)): self.assertEqual(call(restore_tables[i]+'\n'), result.write.call_args_list[i]) def test_get_restore_table_list_no_restore_tables(self): table_list = ['public.ao_table', 'public.ao_table2', 'public.co_table', 'public.heap_table'] restore_tables = None m = mock_open() with patch('tempfile.NamedTemporaryFile', m, create=True): result = get_restore_table_list(table_list, restore_tables) result = m() self.assertEqual(len(table_list), len(result.write.call_args_list)) for i in range(len(table_list)): self.assertEqual(call(table_list[i]+'\n'), result.write.call_args_list[i]) def test_get_restore_table_list_extra_restore_tables(self): table_list = ['public.ao_table', 'public.ao_table2', 'public.co_table', 'public.heap_table'] restore_tables = ['public.ao_table2', 'public.co_table', 'public.ao_table3'] expected = ['public.ao_table2', 'public.co_table'] m = mock_open() with patch('tempfile.NamedTemporaryFile', m, create=True): result = get_restore_table_list(table_list, restore_tables) result = m() self.assertEqual(len(expected), len(result.write.call_args_list)) for i in range(len(expected)): self.assertEqual(call(expected[i]+'\n'), result.write.call_args_list[i]) def test_validate_restore_tables_list_default(self): plan_file_contents = [('20160101121213', 'public.t1'), ('20160101010101', 'public.t2,public.t3'), ('20160101010101', 'public.t4')] restore_tables = ['public.t1', 'public.t2'] validate_restore_tables_list(plan_file_contents, restore_tables) def test_validate_restore_tables_list_invalid_tables(self): plan_file_contents = [('20160101121213', 'public.t1'), ('20160101010101', 'public.t2,public.t3'), ('20160101010101', 'public.t4')] restore_tables = ['public.t5', 'public.t2'] with self.assertRaisesRegexp(Exception, 'Invalid tables for -T option: The following tables were not found in plan file'): validate_restore_tables_list(plan_file_contents, restore_tables) @patch('os.path.exists', return_value=False) def test_restore_global_no_file(self, mock): with self.assertRaisesRegexp(Exception, 'Unable to locate global file /data/master/p1/db_dumps/20160101/gp_global_1_1_20160101010101 in dump set'): self.restore._restore_global(self.context) @patch('os.path.exists', return_value=True) @patch('gppylib.commands.gp.Psql.run') def test_restore_global_default(self, mock1, mock2): self.restore._restore_global(self.context) # should not error out @patch('gppylib.operations.restore.execSQLForSingleton') @patch('pygresql.pgdb.pgdbCnx.commit') def test_update_ao_stat_func_default(self, m1, m2): conn = None ao_schema = 'schema' ao_table = 'table' counter = 1 batch_size = 1000 update_ao_stat_func(conn, ao_schema, ao_table, counter, batch_size) @patch('pygresql.pgdb.pgdbCnx.commit') @patch('gppylib.operations.restore.execSQLForSingleton') def test_update_ao_stat_func_near_batch_size(self, m1, m2): conn = None ao_table = 'table' ao_schema = 'schema' counter = 999 batch_size = 1000 update_ao_stat_func(conn, ao_schema, ao_table, counter, batch_size) @patch('gppylib.operations.restore.execSQLForSingleton') @patch('pygresql.pgdb.pgdbCnx.commit') def test_update_ao_stat_func_equal_batch_size(self, m1, m2): conn = None ao_table = 'table' ao_schema = 'schema' counter = 1000 batch_size = 1000 with self.assertRaisesRegexp(AttributeError, "'NoneType' object has no attribute 'commit'"): update_ao_stat_func(conn, ao_schema, ao_table, counter, batch_size) @patch('gppylib.operations.restore.execSQLForSingleton') @patch('pygresql.pgdb.pgdbCnx.commit') def test_update_ao_stat_func_over_batch_size(self, m1, m2): conn = None ao_table = 'table' ao_schema = 'schema' counter = 1001 batch_size = 1000 update_ao_stat_func(conn, ao_schema, ao_table, counter, batch_size) @patch('gppylib.operations.restore.execSQLForSingleton') @patch('pygresql.pgdb.pgdbCnx.commit') def test_update_ao_stat_func_double_batch_size(self, m1, m2): conn = None ao_table = 'table' ao_schema = 'schema' counter = 2000 batch_size = 1000 with self.assertRaisesRegexp(AttributeError, "'NoneType' object has no attribute 'commit'"): update_ao_stat_func(conn, ao_schema, ao_table, counter, batch_size) @patch('gppylib.operations.restore.execute_sql', return_value=[['t1', 'public']]) @patch('gppylib.operations.restore.dbconn.connect') @patch('gppylib.operations.restore.update_ao_stat_func') def test_update_ao_statistics_default(self, m1, m2, m3): restored_tables = [] update_ao_statistics(self.context, restored_tables) update_ao_statistics(self.context, restored_tables=['public.t1'], restored_schema=[], restore_all=False) update_ao_statistics(self.context, restored_tables=[], restored_schema=['public'], restore_all=False) update_ao_statistics(self.context, restored_tables=[], restored_schema=[], restore_all=True) def test_generate_restored_tables_no_table(self): results = [['t1','public'], ['t2', 'public'], ['foo', 'bar']] tables = generate_restored_tables(results, restored_tables=[], restored_schema=[], restore_all=False) self.assertEqual(tables, set()) def test_generate_restored_tables_specified_table(self): results = [['t1','public'], ['t2', 'public'], ['foo', 'bar']] tables = generate_restored_tables(results, restored_tables=['public.t1'], restored_schema=[], restore_all=False) self.assertEqual(tables, set([('public','t1')])) def test_generate_restored_tables_specified_schema(self): results = [['t1','public'], ['t2', 'public'], ['foo', 'bar']] tables = generate_restored_tables(results, restored_tables=[], restored_schema=['public'], restore_all=False) self.assertEqual(tables, set([('public','t1'), ('public', 't2')])) def test_generate_restored_tables_full_restore(self): results = [['t1','public'], ['t2', 'public'], ['foo', 'bar']] tables = generate_restored_tables(results, restored_tables=[], restored_schema=[], restore_all=True) self.assertEqual(tables, set([('public','t1'), ('public', 't2'), ('bar', 'foo')])) @patch('gppylib.operations.restore.dbconn.connect') @patch('gppylib.db.dbconn.execSQLForSingleton', return_value=5) def test_check_gp_toolkit_true(self, m1, m2): self.assertTrue(self.restore.check_gp_toolkit()) @patch('gppylib.operations.restore.dbconn.connect') @patch('gppylib.db.dbconn.execSQLForSingleton', return_value=0) def test_check_gp_toolkit_false(self, m1, m2): self.assertFalse(self.restore.check_gp_toolkit()) @patch('gppylib.operations.backup_utils.dbconn.DbURL') @patch('gppylib.operations.backup_utils.dbconn.connect') @patch('gppylib.operations.restore.execSQL') def test_analyze_restore_tables_default(self, mock1, mock2, mock3): self.context.restore_tables = ['public.t1', 'public.t2'] self.restore._analyze_restore_tables() @patch('gppylib.operations.restore.execSQL', side_effect=Exception('analyze failed')) @patch('gppylib.operations.backup_utils.dbconn.DbURL') @patch('gppylib.operations.backup_utils.dbconn.connect') def test_analyze_restore_tables_analyze_failed(self, mock1, mock2, mock3): self.context.restore_tables = ['public.t1', 'public.t2'] self.assertRaises(Exception, self.restore._analyze_restore_tables) @patch('gppylib.operations.backup_utils.execSQL') @patch('gppylib.operations.backup_utils.dbconn.DbURL', side_effect=Exception('Failed')) @patch('gppylib.operations.backup_utils.dbconn.connect') def test_analyze_restore_tables_connection_failed(self, mock1, mock2, mock3): self.context.restore_tables = ['public.t1', 'public.t2'] self.assertRaises(Exception, self.restore._analyze_restore_tables) @patch('gppylib.operations.backup_utils.dbconn.DbURL') @patch('gppylib.operations.backup_utils.dbconn.connect') @patch('gppylib.operations.restore.execSQL') def test_analyze_restore_tables_three_batches(self, mock1, mock2, mock3): self.context.restore_tables = ['public.t%d' % i for i in range(3002)] expected_batch_count = 3 batch_count = self.restore._analyze_restore_tables() self.assertEqual(batch_count, expected_batch_count) @patch('gppylib.operations.backup_utils.dbconn.DbURL') @patch('gppylib.operations.backup_utils.dbconn.connect') @patch('gppylib.operations.backup_utils.dbconn.execSQL') def test_analyze_restore_tables_change_schema(self, mock1, mock2, mock3): self.context.restore_tables = ['public.t1', 'public.t2'] self.context.change_schema = 'newschema' self.restore._analyze_restore_tables() @patch('os.path.exists', side_effect=[True, False]) def test_validate_metadata_file_with_compression_exists(self, mock): compressed_file = 'compressed_file.gz' self.assertTrue(self.validate_timestamp.validate_metadata_file(compressed_file)) @patch('os.path.exists', side_effect=[False, False]) def test_validate_metadata_file_with_compression_doesnt_exists(self, mock): compressed_file = 'compressed_file.gz' with self.assertRaisesRegexp(ExceptionNoStackTraceNeeded, 'Unable to find compressed_file or compressed_file.gz'): self.validate_timestamp.validate_metadata_file(compressed_file) @patch('os.path.exists', side_effect=[False, True]) def test_validate_metadata_file_without_compression_exists(self, mock): compressed_file = 'compressed_file.gz' self.assertFalse(self.validate_timestamp.validate_metadata_file(compressed_file)) @patch('os.path.exists', side_effect=[False, False]) def test_validate_metadata_file_without_compression_doesnt_exist(self, mock): compressed_file = 'compressed_file.gz' with self.assertRaisesRegexp(ExceptionNoStackTraceNeeded, 'Unable to find compressed_file or compressed_file.gz'): self.validate_timestamp.validate_metadata_file(compressed_file) @patch('gppylib.operations.restore.restore_file_with_nbu') def test_restore_state_files_with_nbu_default(self, mock1): self.context.netbackup_service_host = "mdw" restore_state_files_with_nbu(self.context) self.assertEqual(mock1.call_count, 3) calls = ["ao", "co", "last_operation"] for i in range(len(mock1.call_args_list)): self.assertEqual(mock1.call_args_list[i], call(self.context, calls[i])) @patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='/tmp/foo_schema') @patch('gppylib.commands.base.Command.run') def test_restore_file_with_nbu_default(self, mock1, mock2): self.context.netbackup_service_host = "mdw" cmdStr = "gp_bsa_restore_agent --netbackup-service-host mdw --netbackup-filename /tmp/foo_schema > /tmp/foo_schema" with patch.object(Command, '__init__', return_value=None) as cmd: restore_file_with_nbu(self.context, "schema") cmd.assert_called_with("restoring metadata files to master", cmdStr) self.assertEqual(mock2.call_count, 1) @patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='') @patch('gppylib.commands.base.Command.run') def test_restore_file_with_nbu_no_filetype(self, mock1, mock2): self.context.netbackup_service_host = "mdw" self.context.netbackup_block_size = 100 cmdStr = "gp_bsa_restore_agent --netbackup-service-host mdw --netbackup-block-size 100 --netbackup-filename /tmp/foo_schema > /tmp/foo_schema" with patch.object(Command, '__init__', return_value=None) as cmd: restore_file_with_nbu(self.context, path="/tmp/foo_schema") cmd.assert_called_with("restoring metadata files to master", cmdStr) @patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='/tmp/foo_schema') @patch('gppylib.commands.base.Command.run') def test_restore_file_with_nbu_no_path(self, mock1, mock2): self.context.netbackup_service_host = "mdw" self.context.netbackup_block_size = 100 cmdStr = "gp_bsa_restore_agent --netbackup-service-host mdw --netbackup-block-size 100 --netbackup-filename /tmp/foo_schema > /tmp/foo_schema" with patch.object(Command, '__init__', return_value=None) as cmd: restore_file_with_nbu(self.context, "schema") cmd.assert_called_with("restoring metadata files to master", cmdStr) @patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='foo_schema') @patch('gppylib.commands.base.Command.run') def test_restore_file_with_nbu_both_args(self, mock1, mock2): with self.assertRaisesRegexp(Exception, 'Cannot supply both a file type and a file path to restore_file_with_nbu'): restore_file_with_nbu(self.context, "schema", "/tmp/foo_schema") @patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='foo_schema') @patch('gppylib.commands.base.Command.run') def test_restore_file_with_nbu_neither_arg(self, mock1, mock2): with self.assertRaisesRegexp(Exception, 'Cannot call restore_file_with_nbu with no type or path argument'): restore_file_with_nbu(self.context) @patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='/tmp/foo_schema') @patch('gppylib.commands.base.Command.run') def test_restore_file_with_nbu_block_size(self, mock1, mock2): self.context.netbackup_service_host = "mdw" self.context.netbackup_block_size = 1024 cmdStr = "gp_bsa_restore_agent --netbackup-service-host mdw --netbackup-block-size 1024 --netbackup-filename /tmp/foo_schema > /tmp/foo_schema" with patch.object(Command, '__init__', return_value=None) as cmd: restore_file_with_nbu(self.context, "schema") cmd.assert_called_with("restoring metadata files to master", cmdStr) @patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='/tmp/foo_schema') @patch('gppylib.commands.base.Command.run') def test_restore_file_with_nbu_keyword(self, mock1, mock2): self.context.netbackup_service_host = "mdw" self.context.netbackup_keyword = "foo" cmdStr = "gp_bsa_restore_agent --netbackup-service-host mdw --netbackup-filename /tmp/foo_schema > /tmp/foo_schema" with patch.object(Command, '__init__', return_value=None) as cmd: restore_file_with_nbu(self.context, "schema") cmd.assert_called_with("restoring metadata files to master", cmdStr) @patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='/tmp/foo_schema') @patch('gppylib.commands.base.Command.run') def test_restore_file_with_nbu_segment(self, mock1, mock2): self.context.netbackup_service_host = "mdw" cmdStr = "gp_bsa_restore_agent --netbackup-service-host mdw --netbackup-filename /tmp/foo_schema > /tmp/foo_schema" with patch.object(Command, '__init__', return_value=None) as cmd: restore_file_with_nbu(self.context, "schema", hostname="sdw") from gppylib.commands.base import REMOTE cmd.assert_called_with("restoring metadata files to segment", cmdStr, ctxt=REMOTE, remoteHost="sdw") class MyMock(MagicMock): def __init__(self, num_segs): super(MagicMock, self).__init__() self.mock_segs = [] for i in range(num_segs): self.mock_segs.append(Mock()) def getSegmentList(self): for id, seg in enumerate(self.mock_segs): seg.get_active_primary.getSegmentHostName.return_value = Mock() seg.get_primary_dbid.return_value = id + 2 return self.mock_segs @patch('gppylib.operations.dump.GpArray.initFromCatalog', return_value=MyMock(1)) @patch('gppylib.gparray.GpDB.getSegmentHostName', return_value='sdw') def test_restore_config_files_with_nbu_single_segment(self, mock1, mock2): with patch('gppylib.operations.restore.restore_file_with_nbu', side_effect=my_counter) as nbu_mock: global i i = 0 self.context.netbackup_service_host = "mdw" self.context.netbackup_policy = "test_policy" self.context.netbackup_schedule = "test_schedule" restore_config_files_with_nbu(self.context) args, _ = nbu_mock.call_args_list[0] self.assertEqual(args[1], "master_config") for id, seg in enumerate(mock2.mock_segs): self.assertEqual(seg.get_active_primary.call_count, 1) self.assertEqual(seg.get_primary_dbid.call_count, 1) args, _ = nbu_mock.call_args_list[id] self.assertEqual(args, ("segment_config", id+2, "sdw")) self.assertEqual(i, 2) @patch('gppylib.operations.dump.GpArray.initFromCatalog', return_value=MyMock(3)) @patch('gppylib.gparray.GpDB.getSegmentHostName', return_value='sdw') def test_restore_config_files_with_nbu_multiple_segments(self, mock1, mock2): with patch('gppylib.operations.restore.restore_file_with_nbu', side_effect=my_counter) as nbu_mock: global i i = 0 self.context.netbackup_service_host = "mdw" self.context.netbackup_policy = "test_policy" self.context.netbackup_schedule = "test_schedule" restore_config_files_with_nbu(self.context) args, _ = nbu_mock.call_args_list[0] self.assertEqual(args[1], "master_config") for id, seg in enumerate(mock2.mock_segs): self.assertEqual(seg.get_active_primary.call_count, 1) self.assertEqual(seg.get_primary_dbid.call_count, 1) args, _ = nbu_mock.call_args_list[id] self.assertEqual(i, 4) if __name__ == '__main__': unittest.main() i=0 def my_counter(*args, **kwargs): global i i += 1 return Mock()
rubikloud/gpdb
gpMgmt/bin/gppylib/operations/test/unit/test_unit_restore.py
Python
apache-2.0
69,482
# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.unittests import trove_testtools class DatastoreManagerTest(trove_testtools.TestCase): def setUp(self, manager_name): super(DatastoreManagerTest, self).setUp() self.patch_datastore_manager(manager_name) self.context = trove_testtools.TroveTestContext(self)
zhangg/trove
trove/tests/unittests/guestagent/test_datastore_manager.py
Python
apache-2.0
932
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Converts ImageNet data to TFRecords file format with Example protos. The raw ImageNet data set is expected to reside in JPEG files located in the following directory structure. data_dir/n01440764/ILSVRC2012_val_00000293.JPEG data_dir/n01440764/ILSVRC2012_val_00000543.JPEG ... where 'n01440764' is the unique synset label associated with these images. The training data set consists of 1000 sub-directories (i.e. labels) each containing 1200 JPEG images for a total of 1.2M JPEG images. The evaluation data set consists of 1000 sub-directories (i.e. labels) each containing 50 JPEG images for a total of 50K JPEG images. This TensorFlow script converts the training and evaluation data into a sharded data set consisting of 1024 and 128 TFRecord files, respectively. train_directory/train-00000-of-01024 train_directory/train-00001-of-01024 ... train_directory/train-00127-of-01024 and validation_directory/validation-00000-of-00128 validation_directory/validation-00001-of-00128 ... validation_directory/validation-00127-of-00128 Each validation TFRecord file contains ~390 records. Each training TFREcord file contains ~1250 records. Each record within the TFRecord file is a serialized Example proto. The Example proto contains the following fields: image/encoded: string containing JPEG encoded image in RGB colorspace image/height: integer, image height in pixels image/width: integer, image width in pixels image/colorspace: string, specifying the colorspace, always 'RGB' image/channels: integer, specifying the number of channels, always 3 image/format: string, specifying the format, always'JPEG' image/filename: string containing the basename of the image file e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG' image/class/label: integer specifying the index in a classification layer. The label ranges from [1, 1000] where 0 is not used. image/class/synset: string specifying the unique ID of the label, e.g. 'n01440764' image/class/text: string specifying the human-readable version of the label e.g. 'red fox, Vulpes vulpes' image/object/bbox/xmin: list of integers specifying the 0+ human annotated bounding boxes image/object/bbox/xmax: list of integers specifying the 0+ human annotated bounding boxes image/object/bbox/ymin: list of integers specifying the 0+ human annotated bounding boxes image/object/bbox/ymax: list of integers specifying the 0+ human annotated bounding boxes image/object/bbox/label: integer specifying the index in a classification layer. The label ranges from [1, 1000] where 0 is not used. Note this is always identical to the image label. Note that the length of xmin is identical to the length of xmax, ymin and ymax for each example. Running this script using 16 threads may take around ~2.5 hours on a HP Z420. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime import os import random import sys import threading import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow.compat.v1 as tf tf.app.flags.DEFINE_string('train_directory', '/tmp/', 'Training data directory') tf.app.flags.DEFINE_string('validation_directory', '/tmp/', 'Validation data directory') tf.app.flags.DEFINE_string('output_directory', '/tmp/', 'Output data directory') tf.app.flags.DEFINE_integer('train_shards', 1024, 'Number of shards in training TFRecord files.') tf.app.flags.DEFINE_integer('validation_shards', 128, 'Number of shards in validation TFRecord files.') tf.app.flags.DEFINE_integer('num_threads', 8, 'Number of threads to preprocess the images.') # The labels file contains a list of valid labels are held in this file. # Assumes that the file contains entries as such: # n01440764 # n01443537 # n01484850 # where each line corresponds to a label expressed as a synset. We map # each synset contained in the file to an integer (based on the alphabetical # ordering). See below for details. tf.app.flags.DEFINE_string('labels_file', 'imagenet_lsvrc_2015_synsets.txt', 'Labels file') # This file containing mapping from synset to human-readable label. # Assumes each line of the file looks like: # # n02119247 black fox # n02119359 silver fox # n02119477 red fox, Vulpes fulva # # where each line corresponds to a unique mapping. Note that each line is # formatted as <synset>\t<human readable label>. tf.app.flags.DEFINE_string('imagenet_metadata_file', 'imagenet_metadata.txt', 'ImageNet metadata file') # This file is the output of process_bounding_box.py # Assumes each line of the file looks like: # # n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940 # # where each line corresponds to one bounding box annotation associated # with an image. Each line can be parsed as: # # <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax> # # Note that there might exist mulitple bounding box annotations associated # with an image file. tf.app.flags.DEFINE_string('bounding_box_file', './imagenet_2012_bounding_boxes.csv', 'Bounding box file') FLAGS = tf.app.flags.FLAGS def _int64_feature(value): """Wrapper for inserting int64 features into Example proto.""" if not isinstance(value, list): value = [value] return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) def _float_feature(value): """Wrapper for inserting float features into Example proto.""" if not isinstance(value, list): value = [value] return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def _bytes_feature(value): """Wrapper for inserting bytes features into Example proto.""" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _convert_to_example(filename, image_buffer, label, synset, human, bbox, height, width): """Build an Example proto for an example. Args: filename: string, path to an image file, e.g., '/path/to/example.JPG' image_buffer: string, JPEG encoding of RGB image label: integer, identifier for the ground truth for the network synset: string, unique WordNet ID specifying the label, e.g., 'n02323233' human: string, human-readable label, e.g., 'red fox, Vulpes vulpes' bbox: list of bounding boxes; each box is a list of integers specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to the same label as the image label. height: integer, image height in pixels width: integer, image width in pixels Returns: Example proto """ xmin = [] ymin = [] xmax = [] ymax = [] for b in bbox: assert len(b) == 4 # pylint: disable=expression-not-assigned [l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)] # pylint: enable=expression-not-assigned colorspace = 'RGB' channels = 3 image_format = 'JPEG' example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': _int64_feature(height), 'image/width': _int64_feature(width), 'image/colorspace': _bytes_feature(colorspace), 'image/channels': _int64_feature(channels), 'image/class/label': _int64_feature(label), 'image/class/synset': _bytes_feature(synset), 'image/class/text': _bytes_feature(human), 'image/object/bbox/xmin': _float_feature(xmin), 'image/object/bbox/xmax': _float_feature(xmax), 'image/object/bbox/ymin': _float_feature(ymin), 'image/object/bbox/ymax': _float_feature(ymax), 'image/object/bbox/label': _int64_feature([label] * len(xmin)), 'image/format': _bytes_feature(image_format), 'image/filename': _bytes_feature(os.path.basename(filename)), 'image/encoded': _bytes_feature(image_buffer)})) return example class ImageCoder(object): """Helper class that provides TensorFlow image coding utilities.""" def __init__(self): # Create a single Session to run all image coding calls. self._sess = tf.Session() # Initializes function that converts PNG to JPEG data. self._png_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that converts CMYK JPEG data to RGB JPEG data. self._cmyk_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) def png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def cmyk_to_rgb(self, image_data): return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}) def decode_jpeg(self, image_data): image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape) == 3 assert image.shape[2] == 3 return image def _is_png(filename): """Determine if a file contains a PNG format image. Args: filename: string, path of the image file. Returns: boolean indicating if the image is a PNG. """ # File list from: # https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU return 'n02105855_2933.JPEG' in filename def _is_cmyk(filename): """Determine if file contains a CMYK JPEG format image. Args: filename: string, path of the image file. Returns: boolean indicating if the image is a JPEG encoded with CMYK color space. """ # File list from: # https://github.com/cytsai/ilsvrc-cmyk-image-list blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG', 'n02447366_23489.JPEG', 'n02492035_15739.JPEG', 'n02747177_10752.JPEG', 'n03018349_4028.JPEG', 'n03062245_4620.JPEG', 'n03347037_9675.JPEG', 'n03467068_12171.JPEG', 'n03529860_11437.JPEG', 'n03544143_17228.JPEG', 'n03633091_5218.JPEG', 'n03710637_5125.JPEG', 'n03961711_5286.JPEG', 'n04033995_2932.JPEG', 'n04258138_17003.JPEG', 'n04264628_27969.JPEG', 'n04336792_7448.JPEG', 'n04371774_5854.JPEG', 'n04596742_4225.JPEG', 'n07583066_647.JPEG', 'n13037406_4650.JPEG'] return filename.split('/')[-1] in blacklist def _process_image(filename, coder): """Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. coder: instance of ImageCoder to provide TensorFlow image coding utils. Returns: image_buffer: string, JPEG encoding of RGB image. height: integer, image height in pixels. width: integer, image width in pixels. """ # Read the image file. image_data = tf.gfile.GFile(filename, 'r').read() # Clean the dirty data. if _is_png(filename): # 1 image is a PNG. print('Converting PNG to JPEG for %s' % filename) image_data = coder.png_to_jpeg(image_data) elif _is_cmyk(filename): # 22 JPEG images are in CMYK colorspace. print('Converting CMYK to RGB for %s' % filename) image_data = coder.cmyk_to_rgb(image_data) # Decode the RGB JPEG. image = coder.decode_jpeg(image_data) # Check that image converted to RGB assert len(image.shape) == 3 height = image.shape[0] width = image.shape[1] assert image.shape[2] == 3 return image_data, height, width def _process_image_files_batch(coder, thread_index, ranges, name, filenames, synsets, labels, humans, bboxes, num_shards): """Processes and saves list of images as TFRecord in 1 thread. Args: coder: instance of ImageCoder to provide TensorFlow image coding utils. thread_index: integer, unique batch to run index is within [0, len(ranges)). ranges: list of pairs of integers specifying ranges of each batches to analyze in parallel. name: string, unique identifier specifying the data set filenames: list of strings; each string is a path to an image file synsets: list of strings; each string is a unique WordNet ID labels: list of integer; each integer identifies the ground truth humans: list of strings; each string is a human-readable label bboxes: list of bounding boxes for each image. Note that each entry in this list might contain from 0+ entries corresponding to the number of bounding box annotations for the image. num_shards: integer number of shards for this data set. """ # Each thread produces N shards where N = int(num_shards / num_threads). # For instance, if num_shards = 128, and the num_threads = 2, then the first # thread would produce shards [0, 64). num_threads = len(ranges) assert not num_shards % num_threads num_shards_per_batch = int(num_shards / num_threads) shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1], num_shards_per_batch + 1).astype(int) num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0] counter = 0 for s in xrange(num_shards_per_batch): # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' shard = thread_index * num_shards_per_batch + s output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards) output_file = os.path.join(FLAGS.output_directory, output_filename) writer = tf.python_io.TFRecordWriter(output_file) shard_counter = 0 files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) for i in files_in_shard: filename = filenames[i] label = labels[i] synset = synsets[i] human = humans[i] bbox = bboxes[i] image_buffer, height, width = _process_image(filename, coder) example = _convert_to_example(filename, image_buffer, label, synset, human, bbox, height, width) writer.write(example.SerializeToString()) shard_counter += 1 counter += 1 if not counter % 1000: print('%s [thread %d]: Processed %d of %d images in thread batch.' % (datetime.now(), thread_index, counter, num_files_in_thread)) sys.stdout.flush() writer.close() print('%s [thread %d]: Wrote %d images to %s' % (datetime.now(), thread_index, shard_counter, output_file)) sys.stdout.flush() shard_counter = 0 print('%s [thread %d]: Wrote %d images to %d shards.' % (datetime.now(), thread_index, counter, num_files_in_thread)) sys.stdout.flush() def _process_image_files(name, filenames, synsets, labels, humans, bboxes, num_shards): """Process and save list of images as TFRecord of Example protos. Args: name: string, unique identifier specifying the data set filenames: list of strings; each string is a path to an image file synsets: list of strings; each string is a unique WordNet ID labels: list of integer; each integer identifies the ground truth humans: list of strings; each string is a human-readable label bboxes: list of bounding boxes for each image. Note that each entry in this list might contain from 0+ entries corresponding to the number of bounding box annotations for the image. num_shards: integer number of shards for this data set. """ assert len(filenames) == len(synsets) assert len(filenames) == len(labels) assert len(filenames) == len(humans) assert len(filenames) == len(bboxes) # Break all images into batches with a [ranges[i][0], ranges[i][1]]. spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int) ranges = [] threads = [] for i in xrange(len(spacing) - 1): ranges.append([spacing[i], spacing[i+1]]) # Launch a thread for each batch. print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)) sys.stdout.flush() # Create a mechanism for monitoring when all threads are finished. coord = tf.train.Coordinator() # Create a generic TensorFlow-based utility for converting all image codings. coder = ImageCoder() threads = [] for thread_index in xrange(len(ranges)): args = (coder, thread_index, ranges, name, filenames, synsets, labels, humans, bboxes, num_shards) t = threading.Thread(target=_process_image_files_batch, args=args) t.start() threads.append(t) # Wait for all the threads to terminate. coord.join(threads) print('%s: Finished writing all %d images in data set.' % (datetime.now(), len(filenames))) sys.stdout.flush() def _find_image_files(data_dir, labels_file): """Build a list of all images files and labels in the data set. Args: data_dir: string, path to the root directory of images. Assumes that the ImageNet data set resides in JPEG files located in the following directory structure. data_dir/n01440764/ILSVRC2012_val_00000293.JPEG data_dir/n01440764/ILSVRC2012_val_00000543.JPEG where 'n01440764' is the unique synset label associated with these images. labels_file: string, path to the labels file. The list of valid labels are held in this file. Assumes that the file contains entries as such: n01440764 n01443537 n01484850 where each line corresponds to a label expressed as a synset. We map each synset contained in the file to an integer (based on the alphabetical ordering) starting with the integer 1 corresponding to the synset contained in the first line. The reason we start the integer labels at 1 is to reserve label 0 as an unused background class. Returns: filenames: list of strings; each string is a path to an image file. synsets: list of strings; each string is a unique WordNet ID. labels: list of integer; each integer identifies the ground truth. """ print('Determining list of input files and labels from %s.' % data_dir) challenge_synsets = [ l.strip() for l in tf.gfile.GFile(labels_file, 'r').readlines() ] labels = [] filenames = [] synsets = [] # Leave label index 0 empty as a background class. label_index = 1 # Construct the list of JPEG files and labels. for synset in challenge_synsets: jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset) matching_files = tf.gfile.Glob(jpeg_file_path) labels.extend([label_index] * len(matching_files)) synsets.extend([synset] * len(matching_files)) filenames.extend(matching_files) if not label_index % 100: print('Finished finding files in %d of %d classes.' % ( label_index, len(challenge_synsets))) label_index += 1 # Shuffle the ordering of all image files in order to guarantee # random ordering of the images with respect to label in the # saved TFRecord files. Make the randomization repeatable. shuffled_index = range(len(filenames)) random.seed(12345) random.shuffle(shuffled_index) filenames = [filenames[i] for i in shuffled_index] synsets = [synsets[i] for i in shuffled_index] labels = [labels[i] for i in shuffled_index] print('Found %d JPEG files across %d labels inside %s.' % (len(filenames), len(challenge_synsets), data_dir)) return filenames, synsets, labels def _find_human_readable_labels(synsets, synset_to_human): """Build a list of human-readable labels. Args: synsets: list of strings; each string is a unique WordNet ID. synset_to_human: dict of synset to human labels, e.g., 'n02119022' --> 'red fox, Vulpes vulpes' Returns: List of human-readable strings corresponding to each synset. """ humans = [] for s in synsets: assert s in synset_to_human, ('Failed to find: %s' % s) humans.append(synset_to_human[s]) return humans def _find_image_bounding_boxes(filenames, image_to_bboxes): """Find the bounding boxes for a given image file. Args: filenames: list of strings; each string is a path to an image file. image_to_bboxes: dictionary mapping image file names to a list of bounding boxes. This list contains 0+ bounding boxes. Returns: List of bounding boxes for each image. Note that each entry in this list might contain from 0+ entries corresponding to the number of bounding box annotations for the image. """ num_image_bbox = 0 bboxes = [] for f in filenames: basename = os.path.basename(f) if basename in image_to_bboxes: bboxes.append(image_to_bboxes[basename]) num_image_bbox += 1 else: bboxes.append([]) print('Found %d images with bboxes out of %d images' % ( num_image_bbox, len(filenames))) return bboxes def _process_dataset(name, directory, num_shards, synset_to_human, image_to_bboxes): """Process a complete data set and save it as a TFRecord. Args: name: string, unique identifier specifying the data set. directory: string, root path to the data set. num_shards: integer number of shards for this data set. synset_to_human: dict of synset to human labels, e.g., 'n02119022' --> 'red fox, Vulpes vulpes' image_to_bboxes: dictionary mapping image file names to a list of bounding boxes. This list contains 0+ bounding boxes. """ filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file) humans = _find_human_readable_labels(synsets, synset_to_human) bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes) _process_image_files(name, filenames, synsets, labels, humans, bboxes, num_shards) def _build_synset_lookup(imagenet_metadata_file): """Build lookup for synset to human-readable label. Args: imagenet_metadata_file: string, path to file containing mapping from synset to human-readable label. Assumes each line of the file looks like: n02119247 black fox n02119359 silver fox n02119477 red fox, Vulpes fulva where each line corresponds to a unique mapping. Note that each line is formatted as <synset>\t<human readable label>. Returns: Dictionary of synset to human labels, such as: 'n02119022' --> 'red fox, Vulpes vulpes' """ lines = tf.gfile.GFile(imagenet_metadata_file, 'r').readlines() synset_to_human = {} for l in lines: if l: parts = l.strip().split('\t') assert len(parts) == 2 synset = parts[0] human = parts[1] synset_to_human[synset] = human return synset_to_human def _build_bounding_box_lookup(bounding_box_file): """Build a lookup from image file to bounding boxes. Args: bounding_box_file: string, path to file with bounding boxes annotations. Assumes each line of the file looks like: n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940 where each line corresponds to one bounding box annotation associated with an image. Each line can be parsed as: <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax> Note that there might exist mulitple bounding box annotations associated with an image file. This file is the output of process_bounding_boxes.py. Returns: Dictionary mapping image file names to a list of bounding boxes. This list contains 0+ bounding boxes. """ lines = tf.gfile.GFile(bounding_box_file, 'r').readlines() images_to_bboxes = {} num_bbox = 0 num_image = 0 for l in lines: if l: parts = l.split(',') assert len(parts) == 5, ('Failed to parse: %s' % l) filename = parts[0] xmin = float(parts[1]) ymin = float(parts[2]) xmax = float(parts[3]) ymax = float(parts[4]) box = [xmin, ymin, xmax, ymax] if filename not in images_to_bboxes: images_to_bboxes[filename] = [] num_image += 1 images_to_bboxes[filename].append(box) num_bbox += 1 print('Successfully read %d bounding boxes ' 'across %d images.' % (num_bbox, num_image)) return images_to_bboxes def main(unused_argv): assert not FLAGS.train_shards % FLAGS.num_threads, ( 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards') assert not FLAGS.validation_shards % FLAGS.num_threads, ( 'Please make the FLAGS.num_threads commensurate with ' 'FLAGS.validation_shards') print('Saving results to %s' % FLAGS.output_directory) # Build a map from synset to human-readable label. synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file) image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file) # Run it! _process_dataset('validation', FLAGS.validation_directory, FLAGS.validation_shards, synset_to_human, image_to_bboxes) _process_dataset('train', FLAGS.train_directory, FLAGS.train_shards, synset_to_human, image_to_bboxes) if __name__ == '__main__': tf.app.run()
googleinterns/wss
third_party/slim/datasets/build_imagenet_data.py
Python
apache-2.0
26,229
# # Copyright 2012 WebFilings, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Async Callback Examples. There are 3 examples below. 1. AsyncCallbackHandler Registering a function as a callback to be triggered when the job has completed. 2. AsyncErrorCallbackHandler Registering a function as an error callback to be triggered when an error has been hit in the Async process. 3. AsyncAsyncCallbackHandler Registering another Async object as a callback to be inserted when the job has complated. """ import logging import webapp2 class AsyncCallbackHandler(webapp2.RequestHandler): """Demonstrate setting an Async callback.""" def get(self): from furious.async import Async # Instantiate an Async object, specifying a 'success' callback. async_task = Async( target=example_function, args=[1], kwargs={'some': 'value'}, callbacks={'success': all_done} ) # Insert the task to run the Async object. The success callback will # be executed in the furious task after the job is executed. async_task.start() logging.info('Async job kicked off.') self.response.out.write('Successfully inserted Async job.') class AsyncErrorCallbackHandler(webapp2.RequestHandler): """Demonstrate handling an error using an Async callback.""" def get(self): from furious.async import Async # Instantiate an Async object, specifying a 'error' callback. async_task = Async( target=dir, args=[1, 2, 3], callbacks={'error': handle_an_error} ) # Insert the task to run the Async object. The error callback will be # executed in the furious task after the job has raised an exception. async_task.start() logging.info('Erroneous Async job kicked off.') self.response.out.write('Successfully inserted Async job.') class AsyncAsyncCallbackHandler(webapp2.RequestHandler): """Demonstrate using an Async as a callback for another Async.""" def get(self): from furious.async import Async # Instantiate an Async object to act as our success callback. # NOTE: Your async.result is not directly available from the # success_callback Async, you will need to persist the result # and fetch it from the other Async if needed. success_callback = Async( target=example_function, kwargs={'it': 'worked'} ) # Instantiate an Async object, setting the success_callback to the # above Async object. async_task = Async( target=example_function, kwargs={'trigger': 'job'}, callbacks={'success': success_callback} ) # Insert the task to run the Async object. async_task.start() logging.info('Async job kicked off.') self.response.out.write('Successfully inserted Async job.') def example_function(*args, **kwargs): """This function is called by furious tasks to demonstrate usage.""" logging.info('example_function executed with args: %r, kwargs: %r', args, kwargs) return args def all_done(): """Will be run if the async task runs successfully.""" from furious.context import get_current_async async = get_current_async() logging.info('async task complete, value returned: %r', async.result) def handle_an_error(): """Will be run if the async task raises an unhandled exception.""" import os from furious.context import get_current_async async = get_current_async() async_exception = async.result.payload exc_info = async_exception.traceback logging.info('async job blew up, exception info: %r', exc_info) retries = int(os.environ['HTTP_X_APPENGINE_TASKRETRYCOUNT']) if retries < 2: raise Exception(async_exception.error) else: logging.info('Caught too many errors, giving up now.')
Workiva/furious
example/callback.py
Python
apache-2.0
4,446
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest from unittest.mock import MagicMock, patch from moto import mock_sqs from airflow import DAG from airflow.exceptions import AirflowException from airflow.providers.amazon.aws.hooks.sqs import SQSHook from airflow.providers.amazon.aws.sensors.sqs import SQSSensor from airflow.utils import timezone DEFAULT_DATE = timezone.datetime(2017, 1, 1) class TestSQSSensor(unittest.TestCase): def setUp(self): args = { 'owner': 'airflow', 'start_date': DEFAULT_DATE } self.dag = DAG('test_dag_id', default_args=args) self.sensor = SQSSensor( task_id='test_task', dag=self.dag, sqs_queue='test', aws_conn_id='aws_default' ) self.mock_context = MagicMock() self.sqs_hook = SQSHook() @mock_sqs def test_poke_success(self): self.sqs_hook.create_queue('test') self.sqs_hook.send_message(queue_url='test', message_body='hello') result = self.sensor.poke(self.mock_context) self.assertTrue(result) self.assertTrue("'Body': 'hello'" in str(self.mock_context['ti'].method_calls), "context call should contain message hello") @mock_sqs def test_poke_no_messsage_failed(self): self.sqs_hook.create_queue('test') result = self.sensor.poke(self.mock_context) self.assertFalse(result) context_calls = [] self.assertTrue(self.mock_context['ti'].method_calls == context_calls, "context call should be same") @patch('airflow.providers.amazon.aws.sensors.sqs.SQSHook') def test_poke_delete_raise_airflow_exception(self, mock_sqs_hook): message = {'Messages': [{'MessageId': 'c585e508-2ea0-44c7-bf3e-d1ba0cb87834', 'ReceiptHandle': 'mockHandle', 'MD5OfBody': 'e5a9d8684a8edfed460b8d42fd28842f', 'Body': 'h21'}], 'ResponseMetadata': {'RequestId': '56cbf4aa-f4ef-5518-9574-a04e0a5f1411', 'HTTPStatusCode': 200, 'HTTPHeaders': { 'x-amzn-requestid': '56cbf4aa-f4ef-5518-9574-a04e0a5f1411', 'date': 'Mon, 18 Feb 2019 18:41:52 GMT', 'content-type': 'text/xml', 'mock_sqs_hook-length': '830'}, 'RetryAttempts': 0}} mock_sqs_hook().get_conn().receive_message.return_value = message mock_sqs_hook().get_conn().delete_message_batch.return_value = \ {'Failed': [{'Id': '22f67273-4dbc-4c19-83b5-aee71bfeb832'}]} with self.assertRaises(AirflowException) as context: self.sensor.poke(self.mock_context) self.assertTrue('Delete SQS Messages failed' in context.exception.args[0]) @patch('airflow.providers.amazon.aws.sensors.sqs.SQSHook') def test_poke_receive_raise_exception(self, mock_sqs_hook): mock_sqs_hook().get_conn().receive_message.side_effect = Exception('test exception') with self.assertRaises(Exception) as context: self.sensor.poke(self.mock_context) self.assertTrue('test exception' in context.exception.args[0]) if __name__ == '__main__': unittest.main()
wileeam/airflow
tests/providers/amazon/aws/sensors/test_sqs.py
Python
apache-2.0
4,197
# $Id: statemachine.py 7320 2012-01-19 22:33:02Z milde $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. """ A finite state machine specialized for regular-expression-based text filters, this module defines the following classes: - `StateMachine`, a state machine - `State`, a state superclass - `StateMachineWS`, a whitespace-sensitive version of `StateMachine` - `StateWS`, a state superclass for use with `StateMachineWS` - `SearchStateMachine`, uses `re.search()` instead of `re.match()` - `SearchStateMachineWS`, uses `re.search()` instead of `re.match()` - `ViewList`, extends standard Python lists. - `StringList`, string-specific ViewList. Exception classes: - `StateMachineError` - `UnknownStateError` - `DuplicateStateError` - `UnknownTransitionError` - `DuplicateTransitionError` - `TransitionPatternNotFound` - `TransitionMethodNotFound` - `UnexpectedIndentationError` - `TransitionCorrection`: Raised to switch to another transition. - `StateCorrection`: Raised to switch to another state & transition. Functions: - `string2lines()`: split a multi-line string into a list of one-line strings How To Use This Module ====================== (See the individual classes, methods, and attributes for details.) 1. Import it: ``import statemachine`` or ``from statemachine import ...``. You will also need to ``import re``. 2. Derive a subclass of `State` (or `StateWS`) for each state in your state machine:: class MyState(statemachine.State): Within the state's class definition: a) Include a pattern for each transition, in `State.patterns`:: patterns = {'atransition': r'pattern', ...} b) Include a list of initial transitions to be set up automatically, in `State.initial_transitions`:: initial_transitions = ['atransition', ...] c) Define a method for each transition, with the same name as the transition pattern:: def atransition(self, match, context, next_state): # do something result = [...] # a list return context, next_state, result # context, next_state may be altered Transition methods may raise an `EOFError` to cut processing short. d) You may wish to override the `State.bof()` and/or `State.eof()` implicit transition methods, which handle the beginning- and end-of-file. e) In order to handle nested processing, you may wish to override the attributes `State.nested_sm` and/or `State.nested_sm_kwargs`. If you are using `StateWS` as a base class, in order to handle nested indented blocks, you may wish to: - override the attributes `StateWS.indent_sm`, `StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or `StateWS.known_indent_sm_kwargs`; - override the `StateWS.blank()` method; and/or - override or extend the `StateWS.indent()`, `StateWS.known_indent()`, and/or `StateWS.firstknown_indent()` methods. 3. Create a state machine object:: sm = StateMachine(state_classes=[MyState, ...], initial_state='MyState') 4. Obtain the input text, which needs to be converted into a tab-free list of one-line strings. For example, to read text from a file called 'inputfile':: input_string = open('inputfile').read() input_lines = statemachine.string2lines(input_string) 5. Run the state machine on the input text and collect the results, a list:: results = sm.run(input_lines) 6. Remove any lingering circular references:: sm.unlink() """ __docformat__ = 'restructuredtext' import sys import re import types import unicodedata from docutils import utils from docutils.error_reporting import ErrorOutput class StateMachine: """ A finite state machine for text filters using regular expressions. The input is provided in the form of a list of one-line strings (no newlines). States are subclasses of the `State` class. Transitions consist of regular expression patterns and transition methods, and are defined in each state. The state machine is started with the `run()` method, which returns the results of processing in a list. """ def __init__(self, state_classes, initial_state, debug=False): """ Initialize a `StateMachine` object; add state objects. Parameters: - `state_classes`: a list of `State` (sub)classes. - `initial_state`: a string, the class name of the initial state. - `debug`: a boolean; produce verbose output if true (nonzero). """ self.input_lines = None """`StringList` of input lines (without newlines). Filled by `self.run()`.""" self.input_offset = 0 """Offset of `self.input_lines` from the beginning of the file.""" self.line = None """Current input line.""" self.line_offset = -1 """Current input line offset from beginning of `self.input_lines`.""" self.debug = debug """Debugging mode on/off.""" self.initial_state = initial_state """The name of the initial state (key to `self.states`).""" self.current_state = initial_state """The name of the current state (key to `self.states`).""" self.states = {} """Mapping of {state_name: State_object}.""" self.add_states(state_classes) self.observers = [] """List of bound methods or functions to call whenever the current line changes. Observers are called with one argument, ``self``. Cleared at the end of `run()`.""" self._stderr = ErrorOutput() """Wrapper around sys.stderr catching en-/decoding errors""" def unlink(self): """Remove circular references to objects no longer required.""" for state in self.states.values(): state.unlink() self.states = None def run(self, input_lines, input_offset=0, context=None, input_source=None, initial_state=None): """ Run the state machine on `input_lines`. Return results (a list). Reset `self.line_offset` and `self.current_state`. Run the beginning-of-file transition. Input one line at a time and check for a matching transition. If a match is found, call the transition method and possibly change the state. Store the context returned by the transition method to be passed on to the next transition matched. Accumulate the results returned by the transition methods in a list. Run the end-of-file transition. Finally, return the accumulated results. Parameters: - `input_lines`: a list of strings without newlines, or `StringList`. - `input_offset`: the line offset of `input_lines` from the beginning of the file. - `context`: application-specific storage. - `input_source`: name or path of source of `input_lines`. - `initial_state`: name of initial state. """ self.runtime_init() if isinstance(input_lines, StringList): self.input_lines = input_lines else: self.input_lines = StringList(input_lines, source=input_source) self.input_offset = input_offset self.line_offset = -1 self.current_state = initial_state or self.initial_state if self.debug: print >>self._stderr, ( u'\nStateMachine.run: input_lines (line_offset=%s):\n| %s' % (self.line_offset, u'\n| '.join(self.input_lines))) transitions = None results = [] state = self.get_state() try: if self.debug: print >>self._stderr, '\nStateMachine.run: bof transition' context, result = state.bof(context) results.extend(result) while True: try: try: self.next_line() if self.debug: source, offset = self.input_lines.info( self.line_offset) print >>self._stderr, ( u'\nStateMachine.run: line (source=%r, ' u'offset=%r):\n| %s' % (source, offset, self.line)) context, next_state, result = self.check_line( context, state, transitions) except EOFError: if self.debug: print >>self._stderr, ( '\nStateMachine.run: %s.eof transition' % state.__class__.__name__) result = state.eof(context) results.extend(result) break else: results.extend(result) except TransitionCorrection, exception: self.previous_line() # back up for another try transitions = (exception.args[0],) if self.debug: print >>self._stderr, ( '\nStateMachine.run: TransitionCorrection to ' 'state "%s", transition %s.' % (state.__class__.__name__, transitions[0])) continue except StateCorrection, exception: self.previous_line() # back up for another try next_state = exception.args[0] if len(exception.args) == 1: transitions = None else: transitions = (exception.args[1],) if self.debug: print >>self._stderr, ( '\nStateMachine.run: StateCorrection to state ' '"%s", transition %s.' % (next_state, transitions[0])) else: transitions = None state = self.get_state(next_state) except: if self.debug: self.error() raise self.observers = [] return results def get_state(self, next_state=None): """ Return current state object; set it first if `next_state` given. Parameter `next_state`: a string, the name of the next state. Exception: `UnknownStateError` raised if `next_state` unknown. """ if next_state: if self.debug and next_state != self.current_state: print >>self._stderr, ( '\nStateMachine.get_state: Changing state from ' '"%s" to "%s" (input line %s).' % (self.current_state, next_state, self.abs_line_number())) self.current_state = next_state try: return self.states[self.current_state] except KeyError: raise UnknownStateError(self.current_state) def next_line(self, n=1): """Load `self.line` with the `n`'th next line and return it.""" try: try: self.line_offset += n self.line = self.input_lines[self.line_offset] except IndexError: self.line = None raise EOFError return self.line finally: self.notify_observers() def is_next_line_blank(self): """Return 1 if the next line is blank or non-existant.""" try: return not self.input_lines[self.line_offset + 1].strip() except IndexError: return 1 def at_eof(self): """Return 1 if the input is at or past end-of-file.""" return self.line_offset >= len(self.input_lines) - 1 def at_bof(self): """Return 1 if the input is at or before beginning-of-file.""" return self.line_offset <= 0 def previous_line(self, n=1): """Load `self.line` with the `n`'th previous line and return it.""" self.line_offset -= n if self.line_offset < 0: self.line = None else: self.line = self.input_lines[self.line_offset] self.notify_observers() return self.line def goto_line(self, line_offset): """Jump to absolute line offset `line_offset`, load and return it.""" try: try: self.line_offset = line_offset - self.input_offset self.line = self.input_lines[self.line_offset] except IndexError: self.line = None raise EOFError return self.line finally: self.notify_observers() def get_source(self, line_offset): """Return source of line at absolute line offset `line_offset`.""" return self.input_lines.source(line_offset - self.input_offset) def abs_line_offset(self): """Return line offset of current line, from beginning of file.""" return self.line_offset + self.input_offset def abs_line_number(self): """Return line number of current line (counting from 1).""" return self.line_offset + self.input_offset + 1 def get_source_and_line(self, lineno=None): """Return (source, line) tuple for current or given line number. Looks up the source and line number in the `self.input_lines` StringList instance to count for included source files. If the optional argument `lineno` is given, convert it from an absolute line number to the corresponding (source, line) pair. """ if lineno is None: offset = self.line_offset else: offset = lineno - self.input_offset - 1 try: src, srcoffset = self.input_lines.info(offset) srcline = srcoffset + 1 except (TypeError): # line is None if index is "Just past the end" src, srcline = self.get_source_and_line(offset + self.input_offset) return src, srcline + 1 except (IndexError): # `offset` is off the list src, srcline = None, None # raise AssertionError('cannot find line %d in %s lines' % # (offset, len(self.input_lines))) # # list(self.input_lines.lines()))) # assert offset == srcoffset, str(self.input_lines) # print "get_source_and_line(%s):" % lineno, # print offset + 1, '->', src, srcline # print self.input_lines return (src, srcline) def insert_input(self, input_lines, source): self.input_lines.insert(self.line_offset + 1, '', source='internal padding after '+source, offset=len(input_lines)) self.input_lines.insert(self.line_offset + 1, '', source='internal padding before '+source, offset=-1) self.input_lines.insert(self.line_offset + 2, StringList(input_lines, source)) def get_text_block(self, flush_left=False): """ Return a contiguous block of text. If `flush_left` is true, raise `UnexpectedIndentationError` if an indented line is encountered before the text block ends (with a blank line). """ try: block = self.input_lines.get_text_block(self.line_offset, flush_left) self.next_line(len(block) - 1) return block except UnexpectedIndentationError, err: block = err.args[0] self.next_line(len(block) - 1) # advance to last line of block raise def check_line(self, context, state, transitions=None): """ Examine one line of input for a transition match & execute its method. Parameters: - `context`: application-dependent storage. - `state`: a `State` object, the current state. - `transitions`: an optional ordered list of transition names to try, instead of ``state.transition_order``. Return the values returned by the transition method: - context: possibly modified from the parameter `context`; - next state name (`State` subclass name); - the result output of the transition, a list. When there is no match, ``state.no_match()`` is called and its return value is returned. """ if transitions is None: transitions = state.transition_order state_correction = None if self.debug: print >>self._stderr, ( '\nStateMachine.check_line: state="%s", transitions=%r.' % (state.__class__.__name__, transitions)) for name in transitions: pattern, method, next_state = state.transitions[name] match = pattern.match(self.line) if match: if self.debug: print >>self._stderr, ( '\nStateMachine.check_line: Matched transition ' '"%s" in state "%s".' % (name, state.__class__.__name__)) return method(match, context, next_state) else: if self.debug: print >>self._stderr, ( '\nStateMachine.check_line: No match in state "%s".' % state.__class__.__name__) return state.no_match(context, transitions) def add_state(self, state_class): """ Initialize & add a `state_class` (`State` subclass) object. Exception: `DuplicateStateError` raised if `state_class` was already added. """ statename = state_class.__name__ if statename in self.states: raise DuplicateStateError(statename) self.states[statename] = state_class(self, self.debug) def add_states(self, state_classes): """ Add `state_classes` (a list of `State` subclasses). """ for state_class in state_classes: self.add_state(state_class) def runtime_init(self): """ Initialize `self.states`. """ for state in self.states.values(): state.runtime_init() def error(self): """Report error details.""" type, value, module, line, function = _exception_data() print >>self._stderr, u'%s: %s' % (type, value) print >>self._stderr, 'input line %s' % (self.abs_line_number()) print >>self._stderr, (u'module %s, line %s, function %s' % (module, line, function)) def attach_observer(self, observer): """ The `observer` parameter is a function or bound method which takes two arguments, the source and offset of the current line. """ self.observers.append(observer) def detach_observer(self, observer): self.observers.remove(observer) def notify_observers(self): for observer in self.observers: try: info = self.input_lines.info(self.line_offset) except IndexError: info = (None, None) observer(*info) class State: """ State superclass. Contains a list of transitions, and transition methods. Transition methods all have the same signature. They take 3 parameters: - An `re` match object. ``match.string`` contains the matched input line, ``match.start()`` gives the start index of the match, and ``match.end()`` gives the end index. - A context object, whose meaning is application-defined (initial value ``None``). It can be used to store any information required by the state machine, and the retured context is passed on to the next transition method unchanged. - The name of the next state, a string, taken from the transitions list; normally it is returned unchanged, but it may be altered by the transition method if necessary. Transition methods all return a 3-tuple: - A context object, as (potentially) modified by the transition method. - The next state name (a return value of ``None`` means no state change). - The processing result, a list, which is accumulated by the state machine. Transition methods may raise an `EOFError` to cut processing short. There are two implicit transitions, and corresponding transition methods are defined: `bof()` handles the beginning-of-file, and `eof()` handles the end-of-file. These methods have non-standard signatures and return values. `bof()` returns the initial context and results, and may be used to return a header string, or do any other processing needed. `eof()` should handle any remaining context and wrap things up; it returns the final processing result. Typical applications need only subclass `State` (or a subclass), set the `patterns` and `initial_transitions` class attributes, and provide corresponding transition methods. The default object initialization will take care of constructing the list of transitions. """ patterns = None """ {Name: pattern} mapping, used by `make_transition()`. Each pattern may be a string or a compiled `re` pattern. Override in subclasses. """ initial_transitions = None """ A list of transitions to initialize when a `State` is instantiated. Each entry is either a transition name string, or a (transition name, next state name) pair. See `make_transitions()`. Override in subclasses. """ nested_sm = None """ The `StateMachine` class for handling nested processing. If left as ``None``, `nested_sm` defaults to the class of the state's controlling state machine. Override it in subclasses to avoid the default. """ nested_sm_kwargs = None """ Keyword arguments dictionary, passed to the `nested_sm` constructor. Two keys must have entries in the dictionary: - Key 'state_classes' must be set to a list of `State` classes. - Key 'initial_state' must be set to the name of the initial state class. If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the class of the current state, and 'initial_state' defaults to the name of the class of the current state. Override in subclasses to avoid the defaults. """ def __init__(self, state_machine, debug=False): """ Initialize a `State` object; make & add initial transitions. Parameters: - `statemachine`: the controlling `StateMachine` object. - `debug`: a boolean; produce verbose output if true. """ self.transition_order = [] """A list of transition names in search order.""" self.transitions = {} """ A mapping of transition names to 3-tuples containing (compiled_pattern, transition_method, next_state_name). Initialized as an instance attribute dynamically (instead of as a class attribute) because it may make forward references to patterns and methods in this or other classes. """ self.add_initial_transitions() self.state_machine = state_machine """A reference to the controlling `StateMachine` object.""" self.debug = debug """Debugging mode on/off.""" if self.nested_sm is None: self.nested_sm = self.state_machine.__class__ if self.nested_sm_kwargs is None: self.nested_sm_kwargs = {'state_classes': [self.__class__], 'initial_state': self.__class__.__name__} def runtime_init(self): """ Initialize this `State` before running the state machine; called from `self.state_machine.run()`. """ pass def unlink(self): """Remove circular references to objects no longer required.""" self.state_machine = None def add_initial_transitions(self): """Make and add transitions listed in `self.initial_transitions`.""" if self.initial_transitions: names, transitions = self.make_transitions( self.initial_transitions) self.add_transitions(names, transitions) def add_transitions(self, names, transitions): """ Add a list of transitions to the start of the transition list. Parameters: - `names`: a list of transition names. - `transitions`: a mapping of names to transition tuples. Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`. """ for name in names: if name in self.transitions: raise DuplicateTransitionError(name) if name not in transitions: raise UnknownTransitionError(name) self.transition_order[:0] = names self.transitions.update(transitions) def add_transition(self, name, transition): """ Add a transition to the start of the transition list. Parameter `transition`: a ready-made transition 3-tuple. Exception: `DuplicateTransitionError`. """ if name in self.transitions: raise DuplicateTransitionError(name) self.transition_order[:0] = [name] self.transitions[name] = transition def remove_transition(self, name): """ Remove a transition by `name`. Exception: `UnknownTransitionError`. """ try: del self.transitions[name] self.transition_order.remove(name) except: raise UnknownTransitionError(name) def make_transition(self, name, next_state=None): """ Make & return a transition tuple based on `name`. This is a convenience function to simplify transition creation. Parameters: - `name`: a string, the name of the transition pattern & method. This `State` object must have a method called '`name`', and a dictionary `self.patterns` containing a key '`name`'. - `next_state`: a string, the name of the next `State` object for this transition. A value of ``None`` (or absent) implies no state change (i.e., continue with the same state). Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`. """ if next_state is None: next_state = self.__class__.__name__ try: pattern = self.patterns[name] if not hasattr(pattern, 'match'): pattern = re.compile(pattern) except KeyError: raise TransitionPatternNotFound( '%s.patterns[%r]' % (self.__class__.__name__, name)) try: method = getattr(self, name) except AttributeError: raise TransitionMethodNotFound( '%s.%s' % (self.__class__.__name__, name)) return (pattern, method, next_state) def make_transitions(self, name_list): """ Return a list of transition names and a transition mapping. Parameter `name_list`: a list, where each entry is either a transition name string, or a 1- or 2-tuple (transition name, optional next state name). """ stringtype = type('') names = [] transitions = {} for namestate in name_list: if type(namestate) is stringtype: transitions[namestate] = self.make_transition(namestate) names.append(namestate) else: transitions[namestate[0]] = self.make_transition(*namestate) names.append(namestate[0]) return names, transitions def no_match(self, context, transitions): """ Called when there is no match from `StateMachine.check_line()`. Return the same values returned by transition methods: - context: unchanged; - next state name: ``None``; - empty result list. Override in subclasses to catch this event. """ return context, None, [] def bof(self, context): """ Handle beginning-of-file. Return unchanged `context`, empty result. Override in subclasses. Parameter `context`: application-defined storage. """ return context, [] def eof(self, context): """ Handle end-of-file. Return empty result. Override in subclasses. Parameter `context`: application-defined storage. """ return [] def nop(self, match, context, next_state): """ A "do nothing" transition method. Return unchanged `context` & `next_state`, empty result. Useful for simple state changes (actionless transitions). """ return context, next_state, [] class StateMachineWS(StateMachine): """ `StateMachine` subclass specialized for whitespace recognition. There are three methods provided for extracting indented text blocks: - `get_indented()`: use when the indent is unknown. - `get_known_indented()`: use when the indent is known for all lines. - `get_first_known_indented()`: use when only the first line's indent is known. """ def get_indented(self, until_blank=False, strip_indent=True): """ Return a block of indented lines of text, and info. Extract an indented block where the indent is unknown for all lines. :Parameters: - `until_blank`: Stop collecting at the first blank line if true. - `strip_indent`: Strip common leading indent if true (default). :Return: - the indented block (a list of lines of text), - its indent, - its first line offset from BOF, and - whether or not it finished with a blank line. """ offset = self.abs_line_offset() indented, indent, blank_finish = self.input_lines.get_indented( self.line_offset, until_blank, strip_indent) if indented: self.next_line(len(indented) - 1) # advance to last indented line while indented and not indented[0].strip(): indented.trim_start() offset += 1 return indented, indent, offset, blank_finish def get_known_indented(self, indent, until_blank=False, strip_indent=True): """ Return an indented block and info. Extract an indented block where the indent is known for all lines. Starting with the current line, extract the entire text block with at least `indent` indentation (which must be whitespace, except for the first line). :Parameters: - `indent`: The number of indent columns/characters. - `until_blank`: Stop collecting at the first blank line if true. - `strip_indent`: Strip `indent` characters of indentation if true (default). :Return: - the indented block, - its first line offset from BOF, and - whether or not it finished with a blank line. """ offset = self.abs_line_offset() indented, indent, blank_finish = self.input_lines.get_indented( self.line_offset, until_blank, strip_indent, block_indent=indent) self.next_line(len(indented) - 1) # advance to last indented line while indented and not indented[0].strip(): indented.trim_start() offset += 1 return indented, offset, blank_finish def get_first_known_indented(self, indent, until_blank=False, strip_indent=True, strip_top=True): """ Return an indented block and info. Extract an indented block where the indent is known for the first line and unknown for all other lines. :Parameters: - `indent`: The first line's indent (# of columns/characters). - `until_blank`: Stop collecting at the first blank line if true (1). - `strip_indent`: Strip `indent` characters of indentation if true (1, default). - `strip_top`: Strip blank lines from the beginning of the block. :Return: - the indented block, - its indent, - its first line offset from BOF, and - whether or not it finished with a blank line. """ offset = self.abs_line_offset() indented, indent, blank_finish = self.input_lines.get_indented( self.line_offset, until_blank, strip_indent, first_indent=indent) self.next_line(len(indented) - 1) # advance to last indented line if strip_top: while indented and not indented[0].strip(): indented.trim_start() offset += 1 return indented, indent, offset, blank_finish class StateWS(State): """ State superclass specialized for whitespace (blank lines & indents). Use this class with `StateMachineWS`. The transitions 'blank' (for blank lines) and 'indent' (for indented text blocks) are added automatically, before any other transitions. The transition method `blank()` handles blank lines and `indent()` handles nested indented blocks. Indented blocks trigger a new state machine to be created by `indent()` and run. The class of the state machine to be created is in `indent_sm`, and the constructor keyword arguments are in the dictionary `indent_sm_kwargs`. The methods `known_indent()` and `firstknown_indent()` are provided for indented blocks where the indent (all lines' and first line's only, respectively) is known to the transition method, along with the attributes `known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method is triggered automatically. """ indent_sm = None """ The `StateMachine` class handling indented text blocks. If left as ``None``, `indent_sm` defaults to the value of `State.nested_sm`. Override it in subclasses to avoid the default. """ indent_sm_kwargs = None """ Keyword arguments dictionary, passed to the `indent_sm` constructor. If left as ``None``, `indent_sm_kwargs` defaults to the value of `State.nested_sm_kwargs`. Override it in subclasses to avoid the default. """ known_indent_sm = None """ The `StateMachine` class handling known-indented text blocks. If left as ``None``, `known_indent_sm` defaults to the value of `indent_sm`. Override it in subclasses to avoid the default. """ known_indent_sm_kwargs = None """ Keyword arguments dictionary, passed to the `known_indent_sm` constructor. If left as ``None``, `known_indent_sm_kwargs` defaults to the value of `indent_sm_kwargs`. Override it in subclasses to avoid the default. """ ws_patterns = {'blank': ' *$', 'indent': ' +'} """Patterns for default whitespace transitions. May be overridden in subclasses.""" ws_initial_transitions = ('blank', 'indent') """Default initial whitespace transitions, added before those listed in `State.initial_transitions`. May be overridden in subclasses.""" def __init__(self, state_machine, debug=False): """ Initialize a `StateSM` object; extends `State.__init__()`. Check for indent state machine attributes, set defaults if not set. """ State.__init__(self, state_machine, debug) if self.indent_sm is None: self.indent_sm = self.nested_sm if self.indent_sm_kwargs is None: self.indent_sm_kwargs = self.nested_sm_kwargs if self.known_indent_sm is None: self.known_indent_sm = self.indent_sm if self.known_indent_sm_kwargs is None: self.known_indent_sm_kwargs = self.indent_sm_kwargs def add_initial_transitions(self): """ Add whitespace-specific transitions before those defined in subclass. Extends `State.add_initial_transitions()`. """ State.add_initial_transitions(self) if self.patterns is None: self.patterns = {} self.patterns.update(self.ws_patterns) names, transitions = self.make_transitions( self.ws_initial_transitions) self.add_transitions(names, transitions) def blank(self, match, context, next_state): """Handle blank lines. Does nothing. Override in subclasses.""" return self.nop(match, context, next_state) def indent(self, match, context, next_state): """ Handle an indented text block. Extend or override in subclasses. Recursively run the registered state machine for indented blocks (`self.indent_sm`). """ indented, indent, line_offset, blank_finish = \ self.state_machine.get_indented() sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs) results = sm.run(indented, input_offset=line_offset) return context, next_state, results def known_indent(self, match, context, next_state): """ Handle a known-indent text block. Extend or override in subclasses. Recursively run the registered state machine for known-indent indented blocks (`self.known_indent_sm`). The indent is the length of the match, ``match.end()``. """ indented, line_offset, blank_finish = \ self.state_machine.get_known_indented(match.end()) sm = self.known_indent_sm(debug=self.debug, **self.known_indent_sm_kwargs) results = sm.run(indented, input_offset=line_offset) return context, next_state, results def first_known_indent(self, match, context, next_state): """ Handle an indented text block (first line's indent known). Extend or override in subclasses. Recursively run the registered state machine for known-indent indented blocks (`self.known_indent_sm`). The indent is the length of the match, ``match.end()``. """ indented, line_offset, blank_finish = \ self.state_machine.get_first_known_indented(match.end()) sm = self.known_indent_sm(debug=self.debug, **self.known_indent_sm_kwargs) results = sm.run(indented, input_offset=line_offset) return context, next_state, results class _SearchOverride: """ Mix-in class to override `StateMachine` regular expression behavior. Changes regular expression matching, from the default `re.match()` (succeeds only if the pattern matches at the start of `self.line`) to `re.search()` (succeeds if the pattern matches anywhere in `self.line`). When subclassing a `StateMachine`, list this class **first** in the inheritance list of the class definition. """ def match(self, pattern): """ Return the result of a regular expression search. Overrides `StateMachine.match()`. Parameter `pattern`: `re` compiled regular expression. """ return pattern.search(self.line) class SearchStateMachine(_SearchOverride, StateMachine): """`StateMachine` which uses `re.search()` instead of `re.match()`.""" pass class SearchStateMachineWS(_SearchOverride, StateMachineWS): """`StateMachineWS` which uses `re.search()` instead of `re.match()`.""" pass class ViewList: """ List with extended functionality: slices of ViewList objects are child lists, linked to their parents. Changes made to a child list also affect the parent list. A child list is effectively a "view" (in the SQL sense) of the parent list. Changes to parent lists, however, do *not* affect active child lists. If a parent list is changed, any active child lists should be recreated. The start and end of the slice can be trimmed using the `trim_start()` and `trim_end()` methods, without affecting the parent list. The link between child and parent lists can be broken by calling `disconnect()` on the child list. Also, ViewList objects keep track of the source & offset of each item. This information is accessible via the `source()`, `offset()`, and `info()` methods. """ def __init__(self, initlist=None, source=None, items=None, parent=None, parent_offset=None): self.data = [] """The actual list of data, flattened from various sources.""" self.items = [] """A list of (source, offset) pairs, same length as `self.data`: the source of each line and the offset of each line from the beginning of its source.""" self.parent = parent """The parent list.""" self.parent_offset = parent_offset """Offset of this list from the beginning of the parent list.""" if isinstance(initlist, ViewList): self.data = initlist.data[:] self.items = initlist.items[:] elif initlist is not None: self.data = list(initlist) if items: self.items = items else: self.items = [(source, i) for i in range(len(initlist))] assert len(self.data) == len(self.items), 'data mismatch' def __str__(self): return str(self.data) def __repr__(self): return '%s(%s, items=%s)' % (self.__class__.__name__, self.data, self.items) def __lt__(self, other): return self.data < self.__cast(other) def __le__(self, other): return self.data <= self.__cast(other) def __eq__(self, other): return self.data == self.__cast(other) def __ne__(self, other): return self.data != self.__cast(other) def __gt__(self, other): return self.data > self.__cast(other) def __ge__(self, other): return self.data >= self.__cast(other) def __cmp__(self, other): return cmp(self.data, self.__cast(other)) def __cast(self, other): if isinstance(other, ViewList): return other.data else: return other def __contains__(self, item): return item in self.data def __len__(self): return len(self.data) # The __getitem__()/__setitem__() methods check whether the index # is a slice first, since indexing a native list with a slice object # just works. def __getitem__(self, i): if isinstance(i, types.SliceType): assert i.step in (None, 1), 'cannot handle slice with stride' return self.__class__(self.data[i.start:i.stop], items=self.items[i.start:i.stop], parent=self, parent_offset=i.start or 0) else: return self.data[i] def __setitem__(self, i, item): if isinstance(i, types.SliceType): assert i.step in (None, 1), 'cannot handle slice with stride' if not isinstance(item, ViewList): raise TypeError('assigning non-ViewList to ViewList slice') self.data[i.start:i.stop] = item.data self.items[i.start:i.stop] = item.items assert len(self.data) == len(self.items), 'data mismatch' if self.parent: self.parent[(i.start or 0) + self.parent_offset : (i.stop or len(self)) + self.parent_offset] = item else: self.data[i] = item if self.parent: self.parent[i + self.parent_offset] = item def __delitem__(self, i): try: del self.data[i] del self.items[i] if self.parent: del self.parent[i + self.parent_offset] except TypeError: assert i.step is None, 'cannot handle slice with stride' del self.data[i.start:i.stop] del self.items[i.start:i.stop] if self.parent: del self.parent[(i.start or 0) + self.parent_offset : (i.stop or len(self)) + self.parent_offset] def __add__(self, other): if isinstance(other, ViewList): return self.__class__(self.data + other.data, items=(self.items + other.items)) else: raise TypeError('adding non-ViewList to a ViewList') def __radd__(self, other): if isinstance(other, ViewList): return self.__class__(other.data + self.data, items=(other.items + self.items)) else: raise TypeError('adding ViewList to a non-ViewList') def __iadd__(self, other): if isinstance(other, ViewList): self.data += other.data else: raise TypeError('argument to += must be a ViewList') return self def __mul__(self, n): return self.__class__(self.data * n, items=(self.items * n)) __rmul__ = __mul__ def __imul__(self, n): self.data *= n self.items *= n return self def extend(self, other): if not isinstance(other, ViewList): raise TypeError('extending a ViewList with a non-ViewList') if self.parent: self.parent.insert(len(self.data) + self.parent_offset, other) self.data.extend(other.data) self.items.extend(other.items) def append(self, item, source=None, offset=0): if source is None: self.extend(item) else: if self.parent: self.parent.insert(len(self.data) + self.parent_offset, item, source, offset) self.data.append(item) self.items.append((source, offset)) def insert(self, i, item, source=None, offset=0): if source is None: if not isinstance(item, ViewList): raise TypeError('inserting non-ViewList with no source given') self.data[i:i] = item.data self.items[i:i] = item.items if self.parent: index = (len(self.data) + i) % len(self.data) self.parent.insert(index + self.parent_offset, item) else: self.data.insert(i, item) self.items.insert(i, (source, offset)) if self.parent: index = (len(self.data) + i) % len(self.data) self.parent.insert(index + self.parent_offset, item, source, offset) def pop(self, i=-1): if self.parent: index = (len(self.data) + i) % len(self.data) self.parent.pop(index + self.parent_offset) self.items.pop(i) return self.data.pop(i) def trim_start(self, n=1): """ Remove items from the start of the list, without touching the parent. """ if n > len(self.data): raise IndexError("Size of trim too large; can't trim %s items " "from a list of size %s." % (n, len(self.data))) elif n < 0: raise IndexError('Trim size must be >= 0.') del self.data[:n] del self.items[:n] if self.parent: self.parent_offset += n def trim_end(self, n=1): """ Remove items from the end of the list, without touching the parent. """ if n > len(self.data): raise IndexError("Size of trim too large; can't trim %s items " "from a list of size %s." % (n, len(self.data))) elif n < 0: raise IndexError('Trim size must be >= 0.') del self.data[-n:] del self.items[-n:] def remove(self, item): index = self.index(item) del self[index] def count(self, item): return self.data.count(item) def index(self, item): return self.data.index(item) def reverse(self): self.data.reverse() self.items.reverse() self.parent = None def sort(self, *args): tmp = zip(self.data, self.items) tmp.sort(*args) self.data = [entry[0] for entry in tmp] self.items = [entry[1] for entry in tmp] self.parent = None def info(self, i): """Return source & offset for index `i`.""" try: return self.items[i] except IndexError: if i == len(self.data): # Just past the end return self.items[i - 1][0], None else: raise def source(self, i): """Return source for index `i`.""" return self.info(i)[0] def offset(self, i): """Return offset for index `i`.""" return self.info(i)[1] def disconnect(self): """Break link between this list and parent list.""" self.parent = None def xitems(self): """Return iterator yielding (source, offset, value) tuples.""" for (value, (source, offset)) in zip(self.data, self.items): yield (source, offset, value) def pprint(self): """Print the list in `grep` format (`source:offset:value` lines)""" for line in self.xitems(): print "%s:%d:%s" % line class StringList(ViewList): """A `ViewList` with string-specific methods.""" def trim_left(self, length, start=0, end=sys.maxint): """ Trim `length` characters off the beginning of each item, in-place, from index `start` to `end`. No whitespace-checking is done on the trimmed text. Does not affect slice parent. """ self.data[start:end] = [line[length:] for line in self.data[start:end]] def get_text_block(self, start, flush_left=False): """ Return a contiguous block of text. If `flush_left` is true, raise `UnexpectedIndentationError` if an indented line is encountered before the text block ends (with a blank line). """ end = start last = len(self.data) while end < last: line = self.data[end] if not line.strip(): break if flush_left and (line[0] == ' '): source, offset = self.info(end) raise UnexpectedIndentationError(self[start:end], source, offset + 1) end += 1 return self[start:end] def get_indented(self, start=0, until_blank=False, strip_indent=True, block_indent=None, first_indent=None): """ Extract and return a StringList of indented lines of text. Collect all lines with indentation, determine the minimum indentation, remove the minimum indentation from all indented lines (unless `strip_indent` is false), and return them. All lines up to but not including the first unindented line will be returned. :Parameters: - `start`: The index of the first line to examine. - `until_blank`: Stop collecting at the first blank line if true. - `strip_indent`: Strip common leading indent if true (default). - `block_indent`: The indent of the entire block, if known. - `first_indent`: The indent of the first line, if known. :Return: - a StringList of indented lines with mininum indent removed; - the amount of the indent; - a boolean: did the indented block finish with a blank line or EOF? """ indent = block_indent # start with None if unknown end = start if block_indent is not None and first_indent is None: first_indent = block_indent if first_indent is not None: end += 1 last = len(self.data) while end < last: line = self.data[end] if line and (line[0] != ' ' or (block_indent is not None and line[:block_indent].strip())): # Line not indented or insufficiently indented. # Block finished properly iff the last indented line blank: blank_finish = ((end > start) and not self.data[end - 1].strip()) break stripped = line.lstrip() if not stripped: # blank line if until_blank: blank_finish = 1 break elif block_indent is None: line_indent = len(line) - len(stripped) if indent is None: indent = line_indent else: indent = min(indent, line_indent) end += 1 else: blank_finish = 1 # block ends at end of lines block = self[start:end] if first_indent is not None and block: block.data[0] = block.data[0][first_indent:] if indent and strip_indent: block.trim_left(indent, start=(first_indent is not None)) return block, indent or 0, blank_finish def get_2D_block(self, top, left, bottom, right, strip_indent=True): block = self[top:bottom] indent = right for i in range(len(block.data)): # get slice from line, care for combining characters ci = utils.column_indices(block.data[i]) try: left = ci[left] except IndexError: left += len(block.data[i]) - len(ci) try: right = ci[right] except IndexError: right += len(block.data[i]) - len(ci) block.data[i] = line = block.data[i][left:right].rstrip() if line: indent = min(indent, len(line) - len(line.lstrip())) if strip_indent and 0 < indent < right: block.data = [line[indent:] for line in block.data] return block def pad_double_width(self, pad_char): """ Pad all double-width characters in self by appending `pad_char` to each. For East Asian language support. """ if hasattr(unicodedata, 'east_asian_width'): east_asian_width = unicodedata.east_asian_width else: return # new in Python 2.4 for i in range(len(self.data)): line = self.data[i] if isinstance(line, unicode): new = [] for char in line: new.append(char) if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width new.append(pad_char) self.data[i] = ''.join(new) def replace(self, old, new): """Replace all occurrences of substring `old` with `new`.""" for i in range(len(self.data)): self.data[i] = self.data[i].replace(old, new) class StateMachineError(Exception): pass class UnknownStateError(StateMachineError): pass class DuplicateStateError(StateMachineError): pass class UnknownTransitionError(StateMachineError): pass class DuplicateTransitionError(StateMachineError): pass class TransitionPatternNotFound(StateMachineError): pass class TransitionMethodNotFound(StateMachineError): pass class UnexpectedIndentationError(StateMachineError): pass class TransitionCorrection(Exception): """ Raise from within a transition method to switch to another transition. Raise with one argument, the new transition name. """ class StateCorrection(Exception): """ Raise from within a transition method to switch to another state. Raise with one or two arguments: new state name, and an optional new transition name. """ def string2lines(astring, tab_width=8, convert_whitespace=False, whitespace=re.compile('[\v\f]')): """ Return a list of one-line strings with tabs expanded, no newlines, and trailing whitespace stripped. Each tab is expanded with between 1 and `tab_width` spaces, so that the next character's index becomes a multiple of `tab_width` (8 by default). Parameters: - `astring`: a multi-line string. - `tab_width`: the number of columns between tab stops. - `convert_whitespace`: convert form feeds and vertical tabs to spaces? """ if convert_whitespace: astring = whitespace.sub(' ', astring) return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()] def _exception_data(): """ Return exception information: - the exception's class name; - the exception object; - the name of the file containing the offending code; - the line number of the offending code; - the function name of the offending code. """ type, value, traceback = sys.exc_info() while traceback.tb_next: traceback = traceback.tb_next code = traceback.tb_frame.f_code return (type.__name__, value, code.co_filename, traceback.tb_lineno, code.co_name)
ddd332/presto
presto-docs/target/sphinx/docutils/statemachine.py
Python
apache-2.0
57,566
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.image_ops.""" import colorsys import contextlib import functools import itertools import math import os import time from absl.testing import parameterized import numpy as np from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.compat import compat from tensorflow.python.data.experimental.ops import get_single_element from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import config as tf_config from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_image_ops from tensorflow.python.ops import image_ops from tensorflow.python.ops import image_ops_impl from tensorflow.python.ops import io_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import stateless_random_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.platform import test class RGBToHSVTest(test_util.TensorFlowTestCase): def testBatch(self): # Build an arbitrary RGB image np.random.seed(7) batch_size = 5 shape = (batch_size, 2, 7, 3) for nptype in [np.float32, np.float64]: inp = np.random.rand(*shape).astype(nptype) # Convert to HSV and back, as a batch and individually with self.cached_session(): batch0 = constant_op.constant(inp) batch1 = image_ops.rgb_to_hsv(batch0) batch2 = image_ops.hsv_to_rgb(batch1) split0 = array_ops.unstack(batch0) split1 = list(map(image_ops.rgb_to_hsv, split0)) split2 = list(map(image_ops.hsv_to_rgb, split1)) join1 = array_ops.stack(split1) join2 = array_ops.stack(split2) batch1, batch2, join1, join2 = self.evaluate( [batch1, batch2, join1, join2]) # Verify that processing batch elements together is the same as separate self.assertAllClose(batch1, join1) self.assertAllClose(batch2, join2) self.assertAllClose(batch2, inp) def testRGBToHSVRoundTrip(self): data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] for nptype in [np.float32, np.float64]: rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255. with self.cached_session(): hsv = image_ops.rgb_to_hsv(rgb_np) rgb = image_ops.hsv_to_rgb(hsv) rgb_tf = self.evaluate(rgb) self.assertAllClose(rgb_tf, rgb_np) def testRGBToHSVDataTypes(self): # Test case for GitHub issue 54855. data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] for dtype in [ dtypes.float32, dtypes.float64, dtypes.float16, dtypes.bfloat16 ]: with self.cached_session(use_gpu=False): rgb = math_ops.cast( np.array(data, np.float32).reshape([2, 2, 3]) / 255., dtype=dtype) hsv = image_ops.rgb_to_hsv(rgb) val = image_ops.hsv_to_rgb(hsv) out = self.evaluate(val) self.assertAllClose(rgb, out, atol=1e-2) class RGBToYIQTest(test_util.TensorFlowTestCase): @test_util.run_without_tensor_float_32( "Calls rgb_to_yiq and yiq_to_rgb, which use matmul") def testBatch(self): # Build an arbitrary RGB image np.random.seed(7) batch_size = 5 shape = (batch_size, 2, 7, 3) for nptype in [np.float32, np.float64]: inp = np.random.rand(*shape).astype(nptype) # Convert to YIQ and back, as a batch and individually with self.cached_session(): batch0 = constant_op.constant(inp) batch1 = image_ops.rgb_to_yiq(batch0) batch2 = image_ops.yiq_to_rgb(batch1) split0 = array_ops.unstack(batch0) split1 = list(map(image_ops.rgb_to_yiq, split0)) split2 = list(map(image_ops.yiq_to_rgb, split1)) join1 = array_ops.stack(split1) join2 = array_ops.stack(split2) batch1, batch2, join1, join2 = self.evaluate( [batch1, batch2, join1, join2]) # Verify that processing batch elements together is the same as separate self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4) self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4) self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4) class RGBToYUVTest(test_util.TensorFlowTestCase): @test_util.run_without_tensor_float_32( "Calls rgb_to_yuv and yuv_to_rgb, which use matmul") def testBatch(self): # Build an arbitrary RGB image np.random.seed(7) batch_size = 5 shape = (batch_size, 2, 7, 3) for nptype in [np.float32, np.float64]: inp = np.random.rand(*shape).astype(nptype) # Convert to YUV and back, as a batch and individually with self.cached_session(): batch0 = constant_op.constant(inp) batch1 = image_ops.rgb_to_yuv(batch0) batch2 = image_ops.yuv_to_rgb(batch1) split0 = array_ops.unstack(batch0) split1 = list(map(image_ops.rgb_to_yuv, split0)) split2 = list(map(image_ops.yuv_to_rgb, split1)) join1 = array_ops.stack(split1) join2 = array_ops.stack(split2) batch1, batch2, join1, join2 = self.evaluate( [batch1, batch2, join1, join2]) # Verify that processing batch elements together is the same as separate self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4) self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4) self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4) class GrayscaleToRGBTest(test_util.TensorFlowTestCase): def _RGBToGrayscale(self, images): is_batch = True if len(images.shape) == 3: is_batch = False images = np.expand_dims(images, axis=0) out_shape = images.shape[0:3] + (1,) out = np.zeros(shape=out_shape, dtype=np.uint8) for batch in range(images.shape[0]): for y in range(images.shape[1]): for x in range(images.shape[2]): red = images[batch, y, x, 0] green = images[batch, y, x, 1] blue = images[batch, y, x, 2] gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue out[batch, y, x, 0] = int(gray) if not is_batch: out = np.squeeze(out, axis=0) return out def _TestRGBToGrayscale(self, x_np): y_np = self._RGBToGrayscale(x_np) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.rgb_to_grayscale(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testBasicRGBToGrayscale(self): # 4-D input with batch dimension. x_np = np.array( [[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3]) self._TestRGBToGrayscale(x_np) # 3-D input with no batch dimension. x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3]) self._TestRGBToGrayscale(x_np) def testBasicGrayscaleToRGB(self): # 4-D input with batch dimension. x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1]) y_np = np.array( [[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.grayscale_to_rgb(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) # 3-D input with no batch dimension. x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1]) y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.grayscale_to_rgb(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testGrayscaleToRGBInputValidation(self): # tests whether the grayscale_to_rgb function raises # an exception if the input images' last dimension is # not of size 1, i.e. the images have shape # [batch size, height, width] or [height, width] # tests if an exception is raised if a three dimensional # input is used, i.e. the images have shape [batch size, height, width] with self.cached_session(): # 3-D input with batch dimension. x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2]) x_tf = constant_op.constant(x_np, shape=x_np.shape) # this is the error message we expect the function to raise err_msg = "Last dimension of a grayscale image should be size 1" with self.assertRaisesRegex(ValueError, err_msg): image_ops.grayscale_to_rgb(x_tf) # tests if an exception is raised if a two dimensional # input is used, i.e. the images have shape [height, width] with self.cached_session(): # 1-D input without batch dimension. x_np = np.array([[1, 2]], dtype=np.uint8).reshape([2]) x_tf = constant_op.constant(x_np, shape=x_np.shape) # this is the error message we expect the function to raise err_msg = "must be at least two-dimensional" with self.assertRaisesRegex(ValueError, err_msg): image_ops.grayscale_to_rgb(x_tf) def testShapeInference(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): # Shape inference works and produces expected output where possible rgb_shape = [7, None, 19, 3] gray_shape = rgb_shape[:-1] + [1] with self.cached_session(): rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape) gray = image_ops.rgb_to_grayscale(rgb_tf) self.assertEqual(gray_shape, gray.get_shape().as_list()) with self.cached_session(): gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape) rgb = image_ops.grayscale_to_rgb(gray_tf) self.assertEqual(rgb_shape, rgb.get_shape().as_list()) # Shape inference does not break for unknown shapes with self.cached_session(): rgb_tf_unknown = array_ops.placeholder(dtypes.uint8) gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown) self.assertFalse(gray_unknown.get_shape()) with self.cached_session(): gray_tf_unknown = array_ops.placeholder(dtypes.uint8) rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown) self.assertFalse(rgb_unknown.get_shape()) class AdjustGamma(test_util.TensorFlowTestCase): def test_adjust_gamma_less_zero_float32(self): """White image should be returned for gamma equal to zero""" with self.cached_session(): x_data = np.random.uniform(0, 1.0, (8, 8)) x_np = np.array(x_data, dtype=np.float32) x = constant_op.constant(x_np, shape=x_np.shape) err_msg = "Gamma should be a non-negative real number" with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), err_msg): image_ops.adjust_gamma(x, gamma=-1) def test_adjust_gamma_less_zero_uint8(self): """White image should be returned for gamma equal to zero""" with self.cached_session(): x_data = np.random.uniform(0, 255, (8, 8)) x_np = np.array(x_data, dtype=np.uint8) x = constant_op.constant(x_np, shape=x_np.shape) err_msg = "Gamma should be a non-negative real number" with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), err_msg): image_ops.adjust_gamma(x, gamma=-1) def test_adjust_gamma_less_zero_tensor(self): """White image should be returned for gamma equal to zero""" with self.cached_session(): x_data = np.random.uniform(0, 1.0, (8, 8)) x_np = np.array(x_data, dtype=np.float32) x = constant_op.constant(x_np, shape=x_np.shape) y = constant_op.constant(-1.0, dtype=dtypes.float32) err_msg = "Gamma should be a non-negative real number" with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), err_msg): image = image_ops.adjust_gamma(x, gamma=y) self.evaluate(image) def _test_adjust_gamma_uint8(self, gamma): """Verifying the output with expected results for gamma correction for uint8 images """ with self.cached_session(): x_np = np.random.uniform(0, 255, (8, 8)).astype(np.uint8) x = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.adjust_gamma(x, gamma=gamma) y_tf = np.trunc(self.evaluate(y)) # calculate gamma correction using numpy # firstly, transform uint8 to float representation # then perform correction y_np = np.power(x_np / 255.0, gamma) # convert correct numpy image back to uint8 type y_np = np.trunc(np.clip(y_np * 255.5, 0, 255.0)) self.assertAllClose(y_tf, y_np, 1e-6) def _test_adjust_gamma_float32(self, gamma): """Verifying the output with expected results for gamma correction for float32 images """ with self.cached_session(): x_np = np.random.uniform(0, 1.0, (8, 8)) x = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.adjust_gamma(x, gamma=gamma) y_tf = self.evaluate(y) y_np = np.clip(np.power(x_np, gamma), 0, 1.0) self.assertAllClose(y_tf, y_np, 1e-6) def test_adjust_gamma_one_float32(self): """Same image should be returned for gamma equal to one""" self._test_adjust_gamma_float32(1.0) def test_adjust_gamma_one_uint8(self): self._test_adjust_gamma_uint8(1.0) def test_adjust_gamma_zero_uint8(self): """White image should be returned for gamma equal to zero for uint8 images """ self._test_adjust_gamma_uint8(gamma=0.0) def test_adjust_gamma_less_one_uint8(self): """Verifying the output with expected results for gamma correction with gamma equal to half for uint8 images """ self._test_adjust_gamma_uint8(gamma=0.5) def test_adjust_gamma_greater_one_uint8(self): """Verifying the output with expected results for gamma correction for uint8 images """ self._test_adjust_gamma_uint8(gamma=1.0) def test_adjust_gamma_less_one_float32(self): """Verifying the output with expected results for gamma correction with gamma equal to half for float32 images """ self._test_adjust_gamma_float32(0.5) def test_adjust_gamma_greater_one_float32(self): """Verifying the output with expected results for gamma correction with gamma equal to two for float32 images """ self._test_adjust_gamma_float32(1.0) def test_adjust_gamma_zero_float32(self): """White image should be returned for gamma equal to zero for float32 images """ self._test_adjust_gamma_float32(0.0) class AdjustHueTest(test_util.TensorFlowTestCase): def testAdjustNegativeHue(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) delta = -0.25 y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_hue(x, delta) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testAdjustPositiveHue(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) delta = 0.25 y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_hue(x, delta) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testBatchAdjustHue(self): x_shape = [2, 1, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) delta = 0.25 y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_hue(x, delta) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def _adjustHueNp(self, x_np, delta_h): self.assertEqual(x_np.shape[-1], 3) x_v = x_np.reshape([-1, 3]) y_v = np.ndarray(x_v.shape, dtype=x_v.dtype) channel_count = x_v.shape[0] for i in range(channel_count): r = x_v[i][0] g = x_v[i][1] b = x_v[i][2] h, s, v = colorsys.rgb_to_hsv(r, g, b) h += delta_h h = math.fmod(h + 10.0, 1.0) r, g, b = colorsys.hsv_to_rgb(h, s, v) y_v[i][0] = r y_v[i][1] = g y_v[i][2] = b return y_v.reshape(x_np.shape) def _adjustHueTf(self, x_np, delta_h): with self.cached_session(): x = constant_op.constant(x_np) y = image_ops.adjust_hue(x, delta_h) y_tf = self.evaluate(y) return y_tf def testAdjustRandomHue(self): x_shapes = [ [2, 2, 3], [4, 2, 3], [2, 4, 3], [2, 5, 3], [1000, 1, 3], ] test_styles = [ "all_random", "rg_same", "rb_same", "gb_same", "rgb_same", ] for x_shape in x_shapes: for test_style in test_styles: x_np = np.random.rand(*x_shape) * 255. delta_h = np.random.rand() * 2.0 - 1.0 if test_style == "all_random": pass elif test_style == "rg_same": x_np[..., 1] = x_np[..., 0] elif test_style == "rb_same": x_np[..., 2] = x_np[..., 0] elif test_style == "gb_same": x_np[..., 2] = x_np[..., 1] elif test_style == "rgb_same": x_np[..., 1] = x_np[..., 0] x_np[..., 2] = x_np[..., 0] else: raise AssertionError("Invalid test style: %s" % (test_style)) y_np = self._adjustHueNp(x_np, delta_h) y_tf = self._adjustHueTf(x_np, delta_h) self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5) def testInvalidShapes(self): fused = False if not fused: # The tests are known to pass with the fused adjust_hue. We will enable # them when the fused implementation is the default. return x_np = np.random.rand(2, 3) * 255. delta_h = np.random.rand() * 2.0 - 1.0 fused = False with self.assertRaisesRegex(ValueError, "Shape must be at least rank 3"): self._adjustHueTf(x_np, delta_h) x_np = np.random.rand(4, 2, 4) * 255. delta_h = np.random.rand() * 2.0 - 1.0 with self.assertRaisesOpError("input must have 3 channels"): self._adjustHueTf(x_np, delta_h) def testInvalidDeltaValue(self): """Delta value must be in the inetrval of [-1,1].""" if not context.executing_eagerly(): self.skipTest("Eager mode only") else: with self.cached_session(): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) x = constant_op.constant(x_np, shape=x_np.shape) err_msg = r"delta must be in the interval \[-1, 1\]" with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), err_msg): image_ops.adjust_hue(x, delta=1.5) class FlipImageBenchmark(test.Benchmark): def _benchmarkFlipLeftRight(self, device, cpu_count): image_shape = [299, 299, 3] warmup_rounds = 100 benchmark_rounds = 1000 config = config_pb2.ConfigProto() if cpu_count is not None: config.inter_op_parallelism_threads = 1 config.intra_op_parallelism_threads = cpu_count with session.Session("", graph=ops.Graph(), config=config) as sess: with ops.device(device): inputs = variables.Variable( random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255, trainable=False, dtype=dtypes.float32) run_op = image_ops.flip_left_right(inputs) self.evaluate(variables.global_variables_initializer()) for i in range(warmup_rounds + benchmark_rounds): if i == warmup_rounds: start = time.time() self.evaluate(run_op) end = time.time() step_time = (end - start) / benchmark_rounds tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all") print("benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us" % (tag, step_time * 1e6)) self.report_benchmark( name="benchmarkFlipLeftRight_299_299_3_%s" % (tag), iters=benchmark_rounds, wall_time=step_time) def _benchmarkRandomFlipLeftRight(self, device, cpu_count): image_shape = [299, 299, 3] warmup_rounds = 100 benchmark_rounds = 1000 config = config_pb2.ConfigProto() if cpu_count is not None: config.inter_op_parallelism_threads = 1 config.intra_op_parallelism_threads = cpu_count with session.Session("", graph=ops.Graph(), config=config) as sess: with ops.device(device): inputs = variables.Variable( random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255, trainable=False, dtype=dtypes.float32) run_op = image_ops.random_flip_left_right(inputs) self.evaluate(variables.global_variables_initializer()) for i in range(warmup_rounds + benchmark_rounds): if i == warmup_rounds: start = time.time() self.evaluate(run_op) end = time.time() step_time = (end - start) / benchmark_rounds tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all") print("benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us" % (tag, step_time * 1e6)) self.report_benchmark( name="benchmarkRandomFlipLeftRight_299_299_3_%s" % (tag), iters=benchmark_rounds, wall_time=step_time) def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count): image_shape = [16, 299, 299, 3] warmup_rounds = 100 benchmark_rounds = 1000 config = config_pb2.ConfigProto() if cpu_count is not None: config.inter_op_parallelism_threads = 1 config.intra_op_parallelism_threads = cpu_count with session.Session("", graph=ops.Graph(), config=config) as sess: with ops.device(device): inputs = variables.Variable( random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255, trainable=False, dtype=dtypes.float32) run_op = image_ops.random_flip_left_right(inputs) self.evaluate(variables.global_variables_initializer()) for i in range(warmup_rounds + benchmark_rounds): if i == warmup_rounds: start = time.time() self.evaluate(run_op) end = time.time() step_time = (end - start) / benchmark_rounds tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all") print("benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: " "%.2f us" % (tag, step_time * 1e6)) self.report_benchmark( name="benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s" % (tag), iters=benchmark_rounds, wall_time=step_time) def benchmarkFlipLeftRightCpu1(self): self._benchmarkFlipLeftRight("/cpu:0", 1) def benchmarkFlipLeftRightCpuAll(self): self._benchmarkFlipLeftRight("/cpu:0", None) def benchmarkFlipLeftRightGpu(self): self._benchmarkFlipLeftRight(test.gpu_device_name(), None) def benchmarkRandomFlipLeftRightCpu1(self): self._benchmarkRandomFlipLeftRight("/cpu:0", 1) def benchmarkRandomFlipLeftRightCpuAll(self): self._benchmarkRandomFlipLeftRight("/cpu:0", None) def benchmarkRandomFlipLeftRightGpu(self): self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None) def benchmarkBatchedRandomFlipLeftRightCpu1(self): self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", 1) def benchmarkBatchedRandomFlipLeftRightCpuAll(self): self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", None) def benchmarkBatchedRandomFlipLeftRightGpu(self): self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None) class AdjustHueBenchmark(test.Benchmark): def _benchmarkAdjustHue(self, device, cpu_count): image_shape = [299, 299, 3] warmup_rounds = 100 benchmark_rounds = 1000 config = config_pb2.ConfigProto() if cpu_count is not None: config.inter_op_parallelism_threads = 1 config.intra_op_parallelism_threads = cpu_count with self.benchmark_session(config=config, device=device) as sess: inputs = variables.Variable( random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255, trainable=False, dtype=dtypes.float32) delta = constant_op.constant(0.1, dtype=dtypes.float32) outputs = image_ops.adjust_hue(inputs, delta) run_op = control_flow_ops.group(outputs) self.evaluate(variables.global_variables_initializer()) for i in range(warmup_rounds + benchmark_rounds): if i == warmup_rounds: start = time.time() self.evaluate(run_op) end = time.time() step_time = (end - start) / benchmark_rounds tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all") print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" % (tag, step_time * 1e6)) self.report_benchmark( name="benchmarkAdjustHue_299_299_3_%s" % (tag), iters=benchmark_rounds, wall_time=step_time) def benchmarkAdjustHueCpu1(self): self._benchmarkAdjustHue("/cpu:0", 1) def benchmarkAdjustHueCpuAll(self): self._benchmarkAdjustHue("/cpu:0", None) def benchmarkAdjustHueGpu(self): self._benchmarkAdjustHue(test.gpu_device_name(), None) class AdjustSaturationBenchmark(test.Benchmark): def _benchmarkAdjustSaturation(self, device, cpu_count): image_shape = [299, 299, 3] warmup_rounds = 100 benchmark_rounds = 1000 config = config_pb2.ConfigProto() if cpu_count is not None: config.inter_op_parallelism_threads = 1 config.intra_op_parallelism_threads = cpu_count with self.benchmark_session(config=config, device=device) as sess: inputs = variables.Variable( random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255, trainable=False, dtype=dtypes.float32) delta = constant_op.constant(0.1, dtype=dtypes.float32) outputs = image_ops.adjust_saturation(inputs, delta) run_op = control_flow_ops.group(outputs) self.evaluate(variables.global_variables_initializer()) for _ in range(warmup_rounds): self.evaluate(run_op) start = time.time() for _ in range(benchmark_rounds): self.evaluate(run_op) end = time.time() step_time = (end - start) / benchmark_rounds tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all") print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" % (tag, step_time * 1e6)) self.report_benchmark( name="benchmarkAdjustSaturation_299_299_3_%s" % (tag), iters=benchmark_rounds, wall_time=step_time) def benchmarkAdjustSaturationCpu1(self): self._benchmarkAdjustSaturation("/cpu:0", 1) def benchmarkAdjustSaturationCpuAll(self): self._benchmarkAdjustSaturation("/cpu:0", None) def benchmarkAdjustSaturationGpu(self): self._benchmarkAdjustSaturation(test.gpu_device_name(), None) class ResizeBilinearBenchmark(test.Benchmark): def _benchmarkResize(self, image_size, num_channels): batch_size = 1 num_ops = 1000 img = variables.Variable( random_ops.random_normal( [batch_size, image_size[0], image_size[1], num_channels]), name="img") deps = [] for _ in range(num_ops): with ops.control_dependencies(deps): resize_op = image_ops.resize_bilinear( img, [299, 299], align_corners=False) deps = [resize_op] benchmark_op = control_flow_ops.group(*deps) with self.benchmark_session() as sess: self.evaluate(variables.global_variables_initializer()) results = self.run_op_benchmark( sess, benchmark_op, name=("resize_bilinear_%s_%s_%s" % (image_size[0], image_size[1], num_channels))) print("%s : %.2f ms/img" % (results["name"], 1000 * results["wall_time"] / (batch_size * num_ops))) def benchmarkSimilar3Channel(self): self._benchmarkResize((183, 229), 3) def benchmarkScaleUp3Channel(self): self._benchmarkResize((141, 186), 3) def benchmarkScaleDown3Channel(self): self._benchmarkResize((749, 603), 3) def benchmarkSimilar1Channel(self): self._benchmarkResize((183, 229), 1) def benchmarkScaleUp1Channel(self): self._benchmarkResize((141, 186), 1) def benchmarkScaleDown1Channel(self): self._benchmarkResize((749, 603), 1) class ResizeBicubicBenchmark(test.Benchmark): def _benchmarkResize(self, image_size, num_channels): batch_size = 1 num_ops = 1000 img = variables.Variable( random_ops.random_normal( [batch_size, image_size[0], image_size[1], num_channels]), name="img") deps = [] for _ in range(num_ops): with ops.control_dependencies(deps): resize_op = image_ops.resize_bicubic( img, [299, 299], align_corners=False) deps = [resize_op] benchmark_op = control_flow_ops.group(*deps) with self.benchmark_session() as sess: self.evaluate(variables.global_variables_initializer()) results = self.run_op_benchmark( sess, benchmark_op, min_iters=20, name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1], num_channels))) print("%s : %.2f ms/img" % (results["name"], 1000 * results["wall_time"] / (batch_size * num_ops))) def benchmarkSimilar3Channel(self): self._benchmarkResize((183, 229), 3) def benchmarkScaleUp3Channel(self): self._benchmarkResize((141, 186), 3) def benchmarkScaleDown3Channel(self): self._benchmarkResize((749, 603), 3) def benchmarkSimilar1Channel(self): self._benchmarkResize((183, 229), 1) def benchmarkScaleUp1Channel(self): self._benchmarkResize((141, 186), 1) def benchmarkScaleDown1Channel(self): self._benchmarkResize((749, 603), 1) def benchmarkSimilar4Channel(self): self._benchmarkResize((183, 229), 4) def benchmarkScaleUp4Channel(self): self._benchmarkResize((141, 186), 4) def benchmarkScaleDown4Channel(self): self._benchmarkResize((749, 603), 4) class ResizeAreaBenchmark(test.Benchmark): def _benchmarkResize(self, image_size, num_channels): batch_size = 1 num_ops = 1000 img = variables.Variable( random_ops.random_normal( [batch_size, image_size[0], image_size[1], num_channels]), name="img") deps = [] for _ in range(num_ops): with ops.control_dependencies(deps): resize_op = image_ops.resize_area(img, [299, 299], align_corners=False) deps = [resize_op] benchmark_op = control_flow_ops.group(*deps) with self.benchmark_session() as sess: self.evaluate(variables.global_variables_initializer()) results = self.run_op_benchmark( sess, benchmark_op, name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1], num_channels))) print("%s : %.2f ms/img" % (results["name"], 1000 * results["wall_time"] / (batch_size * num_ops))) def benchmarkSimilar3Channel(self): self._benchmarkResize((183, 229), 3) def benchmarkScaleUp3Channel(self): self._benchmarkResize((141, 186), 3) def benchmarkScaleDown3Channel(self): self._benchmarkResize((749, 603), 3) def benchmarkSimilar1Channel(self): self._benchmarkResize((183, 229), 1) def benchmarkScaleUp1Channel(self): self._benchmarkResize((141, 186), 1) def benchmarkScaleDown1Channel(self): self._benchmarkResize((749, 603), 1) class AdjustSaturationTest(test_util.TensorFlowTestCase): def testHalfSaturation(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) saturation_factor = 0.5 y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_saturation(x, saturation_factor) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testTwiceSaturation(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) saturation_factor = 2.0 y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_saturation(x, saturation_factor) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testBatchSaturation(self): x_shape = [2, 1, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) saturation_factor = 0.5 y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_saturation(x, saturation_factor) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def _adjustSaturationNp(self, x_np, scale): self.assertEqual(x_np.shape[-1], 3) x_v = x_np.reshape([-1, 3]) y_v = np.ndarray(x_v.shape, dtype=x_v.dtype) channel_count = x_v.shape[0] for i in range(channel_count): r = x_v[i][0] g = x_v[i][1] b = x_v[i][2] h, s, v = colorsys.rgb_to_hsv(r, g, b) s *= scale s = min(1.0, max(0.0, s)) r, g, b = colorsys.hsv_to_rgb(h, s, v) y_v[i][0] = r y_v[i][1] = g y_v[i][2] = b return y_v.reshape(x_np.shape) def testAdjustRandomSaturation(self): x_shapes = [ [2, 2, 3], [4, 2, 3], [2, 4, 3], [2, 5, 3], [1000, 1, 3], ] test_styles = [ "all_random", "rg_same", "rb_same", "gb_same", "rgb_same", ] with self.cached_session(): for x_shape in x_shapes: for test_style in test_styles: x_np = np.random.rand(*x_shape) * 255. scale = np.random.rand() if test_style == "all_random": pass elif test_style == "rg_same": x_np[..., 1] = x_np[..., 0] elif test_style == "rb_same": x_np[..., 2] = x_np[..., 0] elif test_style == "gb_same": x_np[..., 2] = x_np[..., 1] elif test_style == "rgb_same": x_np[..., 1] = x_np[..., 0] x_np[..., 2] = x_np[..., 0] else: raise AssertionError("Invalid test style: %s" % (test_style)) y_baseline = self._adjustSaturationNp(x_np, scale) y_fused = self.evaluate(image_ops.adjust_saturation(x_np, scale)) self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5) class FlipTransposeRotateTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testInvolutionLeftRight(self): x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf)) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) def testInvolutionLeftRightWithBatch(self): x_np = np.array( [[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]], dtype=np.uint8).reshape([2, 2, 3, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf)) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) def testLeftRight(self): x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_left_right(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testLeftRightWithBatch(self): x_np = np.array( [[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]], dtype=np.uint8).reshape([2, 2, 3, 1]) y_np = np.array( [[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]], dtype=np.uint8).reshape([2, 2, 3, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_left_right(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testRandomFlipLeftRightStateful(self): # Test random flip with single seed (stateful). with ops.Graph().as_default(): x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1]) seed = 42 with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.random_flip_left_right(x_tf, seed=seed) self.assertTrue(y.op.name.startswith("random_flip_left_right")) count_flipped = 0 count_unflipped = 0 for _ in range(100): y_tf = self.evaluate(y) if y_tf[0][0] == 1: self.assertAllEqual(y_tf, x_np) count_unflipped += 1 else: self.assertAllEqual(y_tf, y_np) count_flipped += 1 # 100 trials # Mean: 50 # Std Dev: ~5 # Six Sigma: 50 - (5 * 6) = 20 self.assertGreaterEqual(count_flipped, 20) self.assertGreaterEqual(count_unflipped, 20) def testRandomFlipLeftRight(self): x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) count_flipped = 0 count_unflipped = 0 for seed in range(100): y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed)) if y_tf[0][0] == 1: self.assertAllEqual(y_tf, x_np) count_unflipped += 1 else: self.assertAllEqual(y_tf, y_np) count_flipped += 1 self.assertEqual(count_flipped, 45) self.assertEqual(count_unflipped, 55) # TODO(b/162345082): stateless random op generates different random number # with xla_gpu. Update tests such that there is a single ground truth result # to test against. @parameterized.named_parameters( ("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right), ("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down), ) def testRandomFlipStateless(self, func): with test_util.use_gpu(): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([2, 3, 1]) if "RandomFlipUpDown" in self.id(): y_np = np.array( [[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) x_tf = constant_op.constant(x_np, shape=x_np.shape) iterations = 2 flip_counts = [None for _ in range(iterations)] flip_sequences = ["" for _ in range(iterations)] test_seed = (1, 2) split_seeds = stateless_random_ops.split(test_seed, 10) seeds_list = self.evaluate(split_seeds) for i in range(iterations): count_flipped = 0 count_unflipped = 0 flip_seq = "" for seed in seeds_list: y_tf = func(x_tf, seed=seed) y_tf_eval = self.evaluate(y_tf) if y_tf_eval[0][0] == 1: self.assertAllEqual(y_tf_eval, x_np) count_unflipped += 1 flip_seq += "U" else: self.assertAllEqual(y_tf_eval, y_np) count_flipped += 1 flip_seq += "F" flip_counts[i] = (count_flipped, count_unflipped) flip_sequences[i] = flip_seq # Verify that results are deterministic. for i in range(1, iterations): self.assertAllEqual(flip_counts[0], flip_counts[i]) self.assertAllEqual(flip_sequences[0], flip_sequences[i]) # TODO(b/162345082): stateless random op generates different random number # with xla_gpu. Update tests such that there is a single ground truth result # to test against. @parameterized.named_parameters( ("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right), ("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down) ) def testRandomFlipStatelessWithBatch(self, func): with test_util.use_gpu(): batch_size = 16 # create single item of test data x_np_raw = np.array( [[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([1, 2, 3, 1]) y_np_raw = np.array( [[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([1, 2, 3, 1]) if "RandomFlipUpDown" in self.id(): y_np_raw = np.array( [[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([1, 2, 3, 1]) # create batched test data x_np = np.vstack([x_np_raw for _ in range(batch_size)]) y_np = np.vstack([y_np_raw for _ in range(batch_size)]) x_tf = constant_op.constant(x_np, shape=x_np.shape) iterations = 2 flip_counts = [None for _ in range(iterations)] flip_sequences = ["" for _ in range(iterations)] test_seed = (1, 2) split_seeds = stateless_random_ops.split(test_seed, 10) seeds_list = self.evaluate(split_seeds) for i in range(iterations): count_flipped = 0 count_unflipped = 0 flip_seq = "" for seed in seeds_list: y_tf = func(x_tf, seed=seed) y_tf_eval = self.evaluate(y_tf) for j in range(batch_size): if y_tf_eval[j][0][0] == 1: self.assertAllEqual(y_tf_eval[j], x_np[j]) count_unflipped += 1 flip_seq += "U" else: self.assertAllEqual(y_tf_eval[j], y_np[j]) count_flipped += 1 flip_seq += "F" flip_counts[i] = (count_flipped, count_unflipped) flip_sequences[i] = flip_seq for i in range(1, iterations): self.assertAllEqual(flip_counts[0], flip_counts[i]) self.assertAllEqual(flip_sequences[0], flip_sequences[i]) def testRandomFlipLeftRightWithBatch(self): batch_size = 16 seed = 42 # create single item of test data x_np_raw = np.array( [[1, 2, 3], [1, 2, 3]], dtype=np.uint8 ).reshape([1, 2, 3, 1]) y_np_raw = np.array( [[3, 2, 1], [3, 2, 1]], dtype=np.uint8 ).reshape([1, 2, 3, 1]) # create batched test data x_np = np.vstack([x_np_raw for _ in range(batch_size)]) y_np = np.vstack([y_np_raw for _ in range(batch_size)]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) count_flipped = 0 count_unflipped = 0 for seed in range(100): y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed)) # check every element of the batch for i in range(batch_size): if y_tf[i][0][0] == 1: self.assertAllEqual(y_tf[i], x_np[i]) count_unflipped += 1 else: self.assertAllEqual(y_tf[i], y_np[i]) count_flipped += 1 self.assertEqual(count_flipped, 772) self.assertEqual(count_unflipped, 828) def testInvolutionUpDown(self): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf)) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) def testInvolutionUpDownWithBatch(self): x_np = np.array( [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.uint8).reshape([2, 2, 3, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf)) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) def testUpDown(self): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_up_down(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testUpDownWithBatch(self): x_np = np.array( [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.uint8).reshape([2, 2, 3, 1]) y_np = np.array( [[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]], dtype=np.uint8).reshape([2, 2, 3, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_up_down(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testRandomFlipUpDownStateful(self): # Test random flip with single seed (stateful). with ops.Graph().as_default(): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) seed = 42 with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.random_flip_up_down(x_tf, seed=seed) self.assertTrue(y.op.name.startswith("random_flip_up_down")) count_flipped = 0 count_unflipped = 0 for _ in range(100): y_tf = self.evaluate(y) if y_tf[0][0] == 1: self.assertAllEqual(y_tf, x_np) count_unflipped += 1 else: self.assertAllEqual(y_tf, y_np) count_flipped += 1 # 100 trials # Mean: 50 # Std Dev: ~5 # Six Sigma: 50 - (5 * 6) = 20 self.assertGreaterEqual(count_flipped, 20) self.assertGreaterEqual(count_unflipped, 20) def testRandomFlipUpDown(self): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) count_flipped = 0 count_unflipped = 0 for seed in range(100): y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed)) if y_tf[0][0] == 1: self.assertAllEqual(y_tf, x_np) count_unflipped += 1 else: self.assertAllEqual(y_tf, y_np) count_flipped += 1 self.assertEqual(count_flipped, 45) self.assertEqual(count_unflipped, 55) def testRandomFlipUpDownWithBatch(self): batch_size = 16 seed = 42 # create single item of test data x_np_raw = np.array( [[1, 2, 3], [4, 5, 6]], dtype=np.uint8 ).reshape([1, 2, 3, 1]) y_np_raw = np.array( [[4, 5, 6], [1, 2, 3]], dtype=np.uint8 ).reshape([1, 2, 3, 1]) # create batched test data x_np = np.vstack([x_np_raw for _ in range(batch_size)]) y_np = np.vstack([y_np_raw for _ in range(batch_size)]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) count_flipped = 0 count_unflipped = 0 for seed in range(100): y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed)) # check every element of the batch for i in range(batch_size): if y_tf[i][0][0] == 1: self.assertAllEqual(y_tf[i], x_np[i]) count_unflipped += 1 else: self.assertAllEqual(y_tf[i], y_np[i]) count_flipped += 1 self.assertEqual(count_flipped, 772) self.assertEqual(count_unflipped, 828) def testInvolutionTranspose(self): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.transpose(image_ops.transpose(x_tf)) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) def testInvolutionTransposeWithBatch(self): x_np = np.array( [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.uint8).reshape([2, 2, 3, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.transpose(image_ops.transpose(x_tf)) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) def testTranspose(self): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.transpose(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testTransposeWithBatch(self): x_np = np.array( [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.uint8).reshape([2, 2, 3, 1]) y_np = np.array( [[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]], dtype=np.uint8).reshape([2, 3, 2, 1]) with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.transpose(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testPartialShapes(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): p_unknown_rank = array_ops.placeholder(dtypes.uint8) p_unknown_dims_3 = array_ops.placeholder( dtypes.uint8, shape=[None, None, None]) p_unknown_dims_4 = array_ops.placeholder( dtypes.uint8, shape=[None, None, None, None]) p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3]) p_unknown_batch = array_ops.placeholder( dtypes.uint8, shape=[None, 64, 64, 3]) p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None]) p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3]) #Ops that support 3D input for op in [ image_ops.flip_left_right, image_ops.flip_up_down, image_ops.random_flip_left_right, image_ops.random_flip_up_down, image_ops.transpose, image_ops.rot90 ]: transformed_unknown_rank = op(p_unknown_rank) self.assertIsNone(transformed_unknown_rank.get_shape().ndims) transformed_unknown_dims_3 = op(p_unknown_dims_3) self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims) transformed_unknown_width = op(p_unknown_width) self.assertEqual(3, transformed_unknown_width.get_shape().ndims) with self.assertRaisesRegex(ValueError, "must be > 0"): op(p_zero_dim) #Ops that support 4D input for op in [ image_ops.flip_left_right, image_ops.flip_up_down, image_ops.random_flip_left_right, image_ops.random_flip_up_down, image_ops.transpose, image_ops.rot90 ]: transformed_unknown_dims_4 = op(p_unknown_dims_4) self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims) transformed_unknown_batch = op(p_unknown_batch) self.assertEqual(4, transformed_unknown_batch.get_shape().ndims) with self.assertRaisesRegex(ValueError, "must be at least three-dimensional"): op(p_wrong_rank) def testRot90GroupOrder(self): image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3]) with self.cached_session(): rotated = image for _ in range(4): rotated = image_ops.rot90(rotated) self.assertAllEqual(image, self.evaluate(rotated)) def testRot90GroupOrderWithBatch(self): image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3]) with self.cached_session(): rotated = image for _ in range(4): rotated = image_ops.rot90(rotated) self.assertAllEqual(image, self.evaluate(rotated)) def testRot90NumpyEquivalence(self): image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3]) with self.cached_session(): for k in range(4): y_np = np.rot90(image, k=k) self.assertAllEqual( y_np, self.evaluate(image_ops.rot90(image, k))) def testRot90NumpyEquivalenceWithBatch(self): image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3]) with self.cached_session(): for k in range(4): y_np = np.rot90(image, k=k, axes=(1, 2)) self.assertAllEqual( y_np, self.evaluate(image_ops.rot90(image, k))) def testFlipImageUnknownShape(self): expected_output = constant_op.constant([[[[3, 4, 5], [0, 1, 2]], [[9, 10, 11], [6, 7, 8]]]]) def generator(): image_input = np.array( [[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], np.int32) yield image_input dataset = dataset_ops.Dataset.from_generator( generator, output_types=dtypes.int32, output_shapes=tensor_shape.TensorShape([1, 2, 2, 3])) dataset = dataset.map(image_ops.flip_left_right) image_flipped_via_dataset_map = get_single_element.get_single_element( dataset.take(1)) self.assertAllEqual(image_flipped_via_dataset_map, expected_output) class AdjustContrastTest(test_util.TensorFlowTestCase): def _testContrast(self, x_np, y_np, contrast_factor): with self.cached_session(): x = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.adjust_contrast(x, contrast_factor) y_tf = self.evaluate(y) self.assertAllClose(y_tf, y_np, 1e-6) def testDoubleContrastUint8(self): x_shape = [1, 2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) self._testContrast(x_np, y_np, contrast_factor=2.0) def testDoubleContrastFloat(self): x_shape = [1, 2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.float64).reshape(x_shape) / 255. y_data = [ -45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5, 134.75, 409.25, -116.5 ] y_np = np.array(y_data, dtype=np.float64).reshape(x_shape) / 255. self._testContrast(x_np, y_np, contrast_factor=2.0) def testHalfContrastUint8(self): x_shape = [1, 2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) self._testContrast(x_np, y_np, contrast_factor=0.5) def testBatchDoubleContrast(self): x_shape = [2, 1, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) self._testContrast(x_np, y_np, contrast_factor=2.0) def _adjustContrastNp(self, x_np, contrast_factor): mean = np.mean(x_np, (1, 2), keepdims=True) y_np = mean + contrast_factor * (x_np - mean) return y_np def _adjustContrastTf(self, x_np, contrast_factor): with self.cached_session(): x = constant_op.constant(x_np) y = image_ops.adjust_contrast(x, contrast_factor) y_tf = self.evaluate(y) return y_tf def testRandomContrast(self): x_shapes = [ [1, 2, 2, 3], [2, 1, 2, 3], [1, 2, 2, 3], [2, 5, 5, 3], [2, 1, 1, 3], ] for x_shape in x_shapes: x_np = np.random.rand(*x_shape) * 255. contrast_factor = np.random.rand() * 2.0 + 0.1 y_np = self._adjustContrastNp(x_np, contrast_factor) y_tf = self._adjustContrastTf(x_np, contrast_factor) self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5) def testContrastFactorShape(self): x_shape = [1, 2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError), "contrast_factor must be scalar|" "Shape must be rank 0 but is rank 1"): image_ops.adjust_contrast(x_np, [2.0]) @test_util.run_in_graph_and_eager_modes def testDeterminismUnimplementedExceptionThrowing(self): """Test d9m-unimplemented exception-throwing when op-determinism is enabled. This test depends upon other tests, tests which do not enable op-determinism, to ensure that determinism-unimplemented exceptions are not erroneously thrown when op-determinism is not enabled. """ if test_util.is_xla_enabled(): self.skipTest('XLA implementation does not raise exception') with self.session(), test_util.deterministic_ops(): input_shape = (1, 2, 2, 1) on_gpu = len(tf_config.list_physical_devices("GPU")) # AdjustContrast seems to now be inaccessible via the Python API. # AdjustContrastv2 only supports float16 and float32 on GPU, and other # types are converted to and from float32 at the Python level before # AdjustContrastv2 is called. dtypes_to_test = [ dtypes.uint8, dtypes.int8, dtypes.int16, dtypes.int32, dtypes.float32, dtypes.float64 ] if on_gpu: dtypes_to_test.append(dtypes.float16) ctx_mgr = self.assertRaisesRegex( errors.UnimplementedError, "A deterministic GPU implementation of AdjustContrastv2 is not" + " currently available.") else: ctx_mgr = contextlib.suppress() for dtype in dtypes_to_test: input_images = array_ops.zeros(input_shape, dtype=dtype) contrast_factor = 1. with ctx_mgr: output_images = image_ops.adjust_contrast(input_images, contrast_factor) self.evaluate(output_images) class AdjustBrightnessTest(test_util.TensorFlowTestCase): def _testBrightness(self, x_np, y_np, delta, tol=1e-6): with self.cached_session(): x = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.adjust_brightness(x, delta) y_tf = self.evaluate(y) self.assertAllClose(y_tf, y_np, tol) def testPositiveDeltaUint8(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) self._testBrightness(x_np, y_np, delta=10. / 255.) def testPositiveDeltaFloat32(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255. y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11] y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255. self._testBrightness(x_np, y_np, delta=10. / 255.) def testPositiveDeltaFloat16(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.float16).reshape(x_shape) / 255. y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11] y_np = np.array(y_data, dtype=np.float16).reshape(x_shape) / 255. self._testBrightness(x_np, y_np, delta=10. / 255., tol=1e-3) def testNegativeDelta(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) self._testBrightness(x_np, y_np, delta=-10. / 255.) class PerImageWhiteningTest(test_util.TensorFlowTestCase, parameterized.TestCase): def _NumpyPerImageWhitening(self, x): num_pixels = np.prod(x.shape) mn = np.mean(x) std = np.std(x) stddev = max(std, 1.0 / math.sqrt(num_pixels)) y = x.astype(np.float32) y -= mn y /= stddev return y @parameterized.named_parameters([("_int8", np.int8), ("_int16", np.int16), ("_int32", np.int32), ("_int64", np.int64), ("_uint8", np.uint8), ("_uint16", np.uint16), ("_uint32", np.uint32), ("_uint64", np.uint64), ("_float32", np.float32)]) def testBasic(self, data_type): x_shape = [13, 9, 3] x_np = np.arange(0, np.prod(x_shape), dtype=data_type).reshape(x_shape) y_np = self._NumpyPerImageWhitening(x_np) with self.cached_session(): x = constant_op.constant(x_np, dtype=data_type, shape=x_shape) y = image_ops.per_image_standardization(x) y_tf = self.evaluate(y) self.assertAllClose(y_tf, y_np, atol=1e-4) def testUniformImage(self): im_np = np.ones([19, 19, 3]).astype(np.float32) * 249 im = constant_op.constant(im_np) whiten = image_ops.per_image_standardization(im) with self.cached_session(): whiten_np = self.evaluate(whiten) self.assertFalse(np.any(np.isnan(whiten_np))) def testBatchWhitening(self): imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3]) whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np] with self.cached_session(): imgs = constant_op.constant(imgs_np) whiten = image_ops.per_image_standardization(imgs) whiten_tf = self.evaluate(whiten) for w_tf, w_np in zip(whiten_tf, whiten_np): self.assertAllClose(w_tf, w_np, atol=1e-4) class CropToBoundingBoxTest(test_util.TensorFlowTestCase): def _CropToBoundingBox(self, x, offset_height, offset_width, target_height, target_width, use_tensor_inputs): if use_tensor_inputs: offset_height = ops.convert_to_tensor(offset_height) offset_width = ops.convert_to_tensor(offset_width) target_height = ops.convert_to_tensor(target_height) target_width = ops.convert_to_tensor(target_width) x_tensor = ops.convert_to_tensor(x) else: x_tensor = x y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width, target_height, target_width) with self.cached_session(): return self.evaluate(y) def _assertReturns(self, x, x_shape, offset_height, offset_width, y, y_shape, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._CropToBoundingBox(x, offset_height, offset_width, target_height, target_width, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertRaises(self, x, x_shape, offset_height, offset_width, target_height, target_width, err_msg, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] x = np.array(x).reshape(x_shape) for use_tensor_inputs in use_tensor_inputs_options: with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), err_msg): self._CropToBoundingBox(x, offset_height, offset_width, target_height, target_width, use_tensor_inputs) def _assertShapeInference(self, pre_shape, height, width, post_shape): image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.crop_to_bounding_box(image, 0, 0, height, width) self.assertEqual(y.get_shape().as_list(), post_shape) def testNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) self._assertReturns(x, x_shape, 0, 0, x, x_shape) def testCrop(self): x = [1, 2, 3, 4, 5, 6, 7, 8, 9] x_shape = [3, 3, 1] offset_height, offset_width = [1, 0] y_shape = [2, 3, 1] y = [4, 5, 6, 7, 8, 9] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 1] y_shape = [3, 2, 1] y = [2, 3, 5, 6, 8, 9] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 0] y_shape = [2, 3, 1] y = [1, 2, 3, 4, 5, 6] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 0] y_shape = [3, 2, 1] y = [1, 2, 4, 5, 7, 8] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) def testShapeInference(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None]) self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None]) self._assertShapeInference([None, None, None], 55, 66, [55, 66, None]) self._assertShapeInference(None, 55, 66, [55, 66, None]) def testNon3DInput(self): # Input image is not 3D x = [0] * 15 offset_height, offset_width = [0, 0] target_height, target_width = [2, 2] for x_shape in ([3, 5], [1, 3, 5, 1, 1]): self._assertRaises(x, x_shape, offset_height, offset_width, target_height, target_width, "must have either 3 or 4 dimensions.") def testZeroLengthInput(self): # Input image has 0-length dimension(s). # Each line is a test configuration: # x_shape, target_height, target_width test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1), ([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0)) offset_height, offset_width = [0, 0] x = [] for x_shape, target_height, target_width in test_config: self._assertRaises( x, x_shape, offset_height, offset_width, target_height, target_width, "inner 3 dims of 'image.shape' must be > 0", use_tensor_inputs_options=[False]) # Multiple assertion could fail, but the evaluation order is arbitrary. # Match gainst generic pattern. self._assertRaises( x, x_shape, offset_height, offset_width, target_height, target_width, "inner 3 dims of 'image.shape' must be > 0", use_tensor_inputs_options=[True]) def testBadParams(self): x_shape = [4, 4, 1] x = np.zeros(x_shape) # Each line is a test configuration: # (offset_height, offset_width, target_height, target_width), err_msg test_config = ( ([-1, 0, 3, 3], "offset_height must be >= 0"), ([0, -1, 3, 3], "offset_width must be >= 0"), ([0, 0, 0, 3], "target_height must be > 0"), ([0, 0, 3, 0], "target_width must be > 0"), ([2, 0, 3, 3], r"height must be >= target \+ offset"), ([0, 2, 3, 3], r"width must be >= target \+ offset")) for params, err_msg in test_config: self._assertRaises(x, x_shape, *params, err_msg=err_msg) def testNameScope(self): # Testing name scope requires a graph. with ops.Graph().as_default(): image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3]) y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66) self.assertTrue(y.name.startswith("crop_to_bounding_box")) class CentralCropTest(test_util.TensorFlowTestCase): def _assertShapeInference(self, pre_shape, fraction, post_shape): image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.central_crop(image, fraction) if post_shape is None: self.assertEqual(y.get_shape().dims, None) else: self.assertEqual(y.get_shape().as_list(), post_shape) def testNoOp(self): x_shapes = [[13, 9, 3], [5, 13, 9, 3]] for x_shape in x_shapes: x_np = np.ones(x_shape, dtype=np.float32) for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.central_crop(x, 1.0) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) def testCropping(self): x_shape = [4, 8, 1] x_np = np.array( [[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]], dtype=np.int32).reshape(x_shape) y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1]) for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.central_crop(x, 0.5) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) self.assertAllEqual(y_tf.shape, y_np.shape) x_shape = [2, 4, 8, 1] x_np = np.array( [[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]], dtype=np.int32).reshape(x_shape) y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]], [[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1]) with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.central_crop(x, 0.5) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) self.assertAllEqual(y_tf.shape, y_np.shape) def testCropping2(self): # Test case for 10315 x_shapes = [[240, 320, 3], [5, 240, 320, 3]] expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]] for x_shape, y_shape in zip(x_shapes, expected_y_shapes): x_np = np.zeros(x_shape, dtype=np.int32) y_np = np.zeros(y_shape, dtype=np.int32) for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): y_tf = self.evaluate(image_ops.central_crop(x_np, 0.33)) self.assertAllEqual(y_tf, y_np) self.assertAllEqual(y_tf.shape, y_np.shape) def testShapeInference(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): # Test no-op fraction=1.0, with 3-D tensors. self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3]) self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3]) self._assertShapeInference([50, None, 3], 1.0, [50, None, 3]) self._assertShapeInference([None, None, 3], 1.0, [None, None, 3]) self._assertShapeInference([50, 60, None], 1.0, [50, 60, None]) self._assertShapeInference([None, None, None], 1.0, [None, None, None]) # Test no-op fraction=0.5, with 3-D tensors. self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3]) self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3]) self._assertShapeInference([50, None, 3], 0.5, [26, None, 3]) self._assertShapeInference([None, None, 3], 0.5, [None, None, 3]) self._assertShapeInference([50, 60, None], 0.5, [26, 30, None]) self._assertShapeInference([None, None, None], 0.5, [None, None, None]) # Test no-op fraction=1.0, with 4-D tensors. self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3]) self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3]) self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3]) self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3]) self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None]) self._assertShapeInference([5, None, None, None], 1.0, [5, None, None, None]) self._assertShapeInference([None, None, None, None], 1.0, [None, None, None, None]) # Test no-op fraction=0.5, with 4-D tensors. self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3]) self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3]) self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3]) self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3]) self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None]) self._assertShapeInference([5, None, None, None], 0.5, [5, None, None, None]) self._assertShapeInference([None, None, None, None], 0.5, [None, None, None, None]) def testErrorOnInvalidCentralCropFractionValues(self): x_shape = [13, 9, 3] x_np = np.ones(x_shape, dtype=np.float32) for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): x = constant_op.constant(x_np, shape=x_shape) with self.assertRaises(ValueError): _ = image_ops.central_crop(x, 0.0) with self.assertRaises(ValueError): _ = image_ops.central_crop(x, 1.01) def testErrorOnInvalidShapes(self): x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]] for x_shape in x_shapes: x_np = np.ones(x_shape, dtype=np.float32) for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): x = constant_op.constant(x_np, shape=x_shape) with self.assertRaises(ValueError): _ = image_ops.central_crop(x, 0.5) def testNameScope(self): # Testing name scope requires a graph. with ops.Graph().as_default(): x_shape = [13, 9, 3] x_np = np.ones(x_shape, dtype=np.float32) for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): y = image_ops.central_crop(x_np, 1.0) self.assertTrue(y.op.name.startswith("central_crop")) def testCentralFractionTensor(self): # Test case for GitHub issue 45324. x_shape = [240, 320, 3] y_shape = [80, 106, 3] @def_function.function(autograph=False) def f(x, central_fraction): return image_ops.central_crop(x, central_fraction) x_np = np.zeros(x_shape, dtype=np.int32) y_np = np.zeros(y_shape, dtype=np.int32) y_tf = self.evaluate(f(x_np, constant_op.constant(0.33))) self.assertAllEqual(y_tf, y_np) self.assertAllEqual(y_tf.shape, y_np.shape) class PadToBoundingBoxTest(test_util.TensorFlowTestCase, parameterized.TestCase): def _PadToBoundingBox(self, x, offset_height, offset_width, target_height, target_width, use_tensor_inputs): if use_tensor_inputs: offset_height = ops.convert_to_tensor(offset_height) offset_width = ops.convert_to_tensor(offset_width) target_height = ops.convert_to_tensor(target_height) target_width = ops.convert_to_tensor(target_width) x_tensor = ops.convert_to_tensor(x) else: x_tensor = x @def_function.function def pad_bbox(*args): return image_ops.pad_to_bounding_box(*args) with self.cached_session(): return self.evaluate(pad_bbox(x_tensor, offset_height, offset_width, target_height, target_width)) def _assertReturns(self, x, x_shape, offset_height, offset_width, y, y_shape, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._PadToBoundingBox(x, offset_height, offset_width, target_height, target_width, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertRaises(self, x, x_shape, offset_height, offset_width, target_height, target_width, err_msg, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] x = np.array(x).reshape(x_shape) for use_tensor_inputs in use_tensor_inputs_options: with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), err_msg): self._PadToBoundingBox(x, offset_height, offset_width, target_height, target_width, use_tensor_inputs) def _assertShapeInference(self, pre_shape, height, width, post_shape): image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.pad_to_bounding_box(image, 0, 0, height, width) self.assertEqual(y.get_shape().as_list(), post_shape) def testInt64(self): x = [1, 2, 3, 4, 5, 6, 7, 8, 9] x_shape = [3, 3, 1] y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] y_shape = [4, 3, 1] x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64) y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3]) with self.cached_session(): self.assertAllClose(y, self.evaluate(y_tf)) def testNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) offset_height, offset_width = [0, 0] self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape) def testPadding(self): x = [1, 2, 3, 4, 5, 6, 7, 8, 9] x_shape = [3, 3, 1] offset_height, offset_width = [1, 0] y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] y_shape = [4, 3, 1] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 1] y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9] y_shape = [3, 4, 1] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 0] y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0] y_shape = [4, 3, 1] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 0] y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0] y_shape = [3, 4, 1] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) def testShapeInference(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None]) self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None]) self._assertShapeInference([None, None, None], 55, 66, [55, 66, None]) self._assertShapeInference(None, 55, 66, [55, 66, None]) def testNon3DInput(self): # Input image is not 3D x = [0] * 15 offset_height, offset_width = [0, 0] target_height, target_width = [2, 2] for x_shape in ([3, 5], [1, 3, 5, 1, 1]): self._assertRaises(x, x_shape, offset_height, offset_width, target_height, target_width, "must have either 3 or 4 dimensions.") def testZeroLengthInput(self): # Input image has 0-length dimension(s). # Each line is a test configuration: # x_shape, target_height, target_width test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2)) offset_height, offset_width = [0, 0] x = [] for x_shape, target_height, target_width in test_config: self._assertRaises( x, x_shape, offset_height, offset_width, target_height, target_width, "inner 3 dims of 'image.shape' must be > 0", use_tensor_inputs_options=[False]) # The original error message does not contain back slashes. However, they # are added by either the assert op or the runtime. If this behavior # changes in the future, the match string will also needs to be changed. self._assertRaises( x, x_shape, offset_height, offset_width, target_height, target_width, "inner 3 dims of \\'image.shape\\' must be > 0", use_tensor_inputs_options=[True]) def testBadParamsScalarInputs(self): # In this test, inputs do not get converted to tensors before calling the # tf.function. The error message here is raised in python # since the python function has direct access to the scalars. x_shape = [3, 3, 1] x = np.zeros(x_shape) # Each line is a test configuration: # offset_height, offset_width, target_height, target_width, err_msg test_config = ( (-1, 0, 4, 4, "offset_height must be >= 0"), (0, -1, 4, 4, "offset_width must be >= 0"), (2, 0, 4, 4, "height must be <= target - offset"), (0, 2, 4, 4, "width must be <= target - offset")) for config_item in test_config: self._assertRaises( x, x_shape, *config_item, use_tensor_inputs_options=[False]) def testBadParamsTensorInputsEager(self): # In this test inputs get converted to EagerTensors before calling the # tf.function. The error message here is raised in python # since the python function has direct access to the tensor's values. with context.eager_mode(): x_shape = [3, 3, 1] x = np.zeros(x_shape) # Each line is a test configuration: # offset_height, offset_width, target_height, target_width, err_msg test_config = ( (-1, 0, 4, 4, "offset_height must be >= 0"), (0, -1, 4, 4, "offset_width must be >= 0"), (2, 0, 4, 4, "height must be <= target - offset"), (0, 2, 4, 4, "width must be <= target - offset")) for config_item in test_config: self._assertRaises( x, x_shape, *config_item, use_tensor_inputs_options=[True]) @parameterized.named_parameters([("OffsetHeight", (-1, 0, 4, 4)), ("OffsetWidth", (0, -1, 4, 4)), ("Height", (2, 0, 4, 4)), ("Width", (0, 2, 4, 4))]) def testBadParamsTensorInputsGraph(self, config): # In this test inputs get converted to tensors before calling the # tf.function. The error message here is raised during shape inference. with context.graph_mode(): x_shape = [3, 3, 1] x = np.zeros(x_shape) self._assertRaises( x, x_shape, *config, "Paddings must be non-negative", use_tensor_inputs_options=[True]) def testNameScope(self): # Testing name scope requires a graph. with ops.Graph().as_default(): image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3]) y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66) self.assertTrue(y.op.name.startswith("pad_to_bounding_box")) def testInvalidInput(self): # Test case for GitHub issue 46890. if test_util.is_xla_enabled(): # TODO(b/200850176): test fails with XLA. return with self.session(): with self.assertRaises(errors_impl.InvalidArgumentError): v = image_ops.pad_to_bounding_box( image=np.ones((1, 1, 1)), target_height=5191549470, target_width=5191549470, offset_height=1, offset_width=1) self.evaluate(v) class InternalPadToBoundingBoxTest(test_util.TensorFlowTestCase, parameterized.TestCase): def _InternalPadToBoundingBox(self, x, offset_height, offset_width, target_height, target_width, use_tensor_inputs): if use_tensor_inputs: offset_height = ops.convert_to_tensor(offset_height) offset_width = ops.convert_to_tensor(offset_width) target_height = ops.convert_to_tensor(target_height) target_width = ops.convert_to_tensor(target_width) x_tensor = ops.convert_to_tensor(x) else: x_tensor = x @def_function.function def pad_bbox(*args): return image_ops.pad_to_bounding_box_internal(*args, check_dims=False) with self.cached_session(): return self.evaluate( pad_bbox(x_tensor, offset_height, offset_width, target_height, target_width)) def _assertReturns(self, x, x_shape, offset_height, offset_width, y, y_shape, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._InternalPadToBoundingBox(x, offset_height, offset_width, target_height, target_width, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertShapeInference(self, pre_shape, height, width, post_shape): image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.pad_to_bounding_box_internal( image, 0, 0, height, width, check_dims=False) self.assertEqual(y.get_shape().as_list(), post_shape) def testInt64(self): x = [1, 2, 3, 4, 5, 6, 7, 8, 9] x_shape = [3, 3, 1] y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] y_shape = [4, 3, 1] x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64) y_tf = image_ops.pad_to_bounding_box_internal( x, i[0], i[1], i[2], i[3], check_dims=False) with self.cached_session(): self.assertAllClose(y, self.evaluate(y_tf)) def testNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) offset_height, offset_width = [0, 0] self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape) def testPadding(self): x = [1, 2, 3, 4, 5, 6, 7, 8, 9] x_shape = [3, 3, 1] offset_height, offset_width = [1, 0] y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] y_shape = [4, 3, 1] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 1] y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9] y_shape = [3, 4, 1] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 0] y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0] y_shape = [4, 3, 1] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 0] y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0] y_shape = [3, 4, 1] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) def testShapeInference(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None]) self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None]) self._assertShapeInference([None, None, None], 55, 66, [55, 66, None]) self._assertShapeInference(None, 55, 66, [55, 66, None]) def testNameScope(self): # Testing name scope requires a graph. with ops.Graph().as_default(): image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3]) y = image_ops.pad_to_bounding_box_internal( image, 0, 0, 55, 66, check_dims=False) self.assertTrue(y.op.name.startswith("pad_to_bounding_box")) class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase): def _testSampleDistortedBoundingBox(self, image, bounding_box, min_object_covered, aspect_ratio_range, area_range): original_area = float(np.prod(image.shape)) bounding_box_area = float((bounding_box[3] - bounding_box[1]) * (bounding_box[2] - bounding_box[0])) image_size_np = np.array(image.shape, dtype=np.int32) bounding_box_np = ( np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4])) aspect_ratios = [] area_ratios = [] fraction_object_covered = [] num_iter = 1000 with self.cached_session(): image_tf = constant_op.constant(image, shape=image.shape) image_size_tf = constant_op.constant( image_size_np, shape=image_size_np.shape) bounding_box_tf = constant_op.constant( bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape) begin, size, _ = image_ops.sample_distorted_bounding_box( image_size=image_size_tf, bounding_boxes=bounding_box_tf, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range) y = array_ops.strided_slice(image_tf, begin, begin + size) for _ in range(num_iter): y_tf = self.evaluate(y) crop_height = y_tf.shape[0] crop_width = y_tf.shape[1] aspect_ratio = float(crop_width) / float(crop_height) area = float(crop_width * crop_height) aspect_ratios.append(aspect_ratio) area_ratios.append(area / original_area) fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area) # min_object_covered as tensor min_object_covered_t = ops.convert_to_tensor(min_object_covered) begin, size, _ = image_ops.sample_distorted_bounding_box( image_size=image_size_tf, bounding_boxes=bounding_box_tf, min_object_covered=min_object_covered_t, aspect_ratio_range=aspect_ratio_range, area_range=area_range) y = array_ops.strided_slice(image_tf, begin, begin + size) for _ in range(num_iter): y_tf = self.evaluate(y) crop_height = y_tf.shape[0] crop_width = y_tf.shape[1] aspect_ratio = float(crop_width) / float(crop_height) area = float(crop_width * crop_height) aspect_ratios.append(aspect_ratio) area_ratios.append(area / original_area) fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area) # Ensure that each entry is observed within 3 standard deviations. # num_bins = 10 # aspect_ratio_hist, _ = np.histogram(aspect_ratios, # bins=num_bins, # range=aspect_ratio_range) # mean = np.mean(aspect_ratio_hist) # stddev = np.sqrt(mean) # TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky. # TODO(irving): Since the rejection probability is not independent of the # aspect ratio, the aspect_ratio random value is not exactly uniformly # distributed in [min_aspect_ratio, max_aspect_ratio). This test should be # fixed to reflect the true statistical property, then tightened to enforce # a stricter bound. Or, ideally, the sample_distorted_bounding_box Op # be fixed to not use rejection sampling and generate correctly uniform # aspect ratios. # self.assertAllClose(aspect_ratio_hist, # [mean] * num_bins, atol=3.6 * stddev) # The resulting crop will not be uniformly distributed in area. In practice, # we find that the area skews towards the small sizes. Instead, we perform # a weaker test to ensure that the area ratios are merely within the # specified bounds. self.assertLessEqual(max(area_ratios), area_range[1]) self.assertGreaterEqual(min(area_ratios), area_range[0]) # For reference, here is what the distribution of area ratios look like. area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range) print("area_ratio_hist ", area_ratio_hist) # Ensure that fraction_object_covered is satisfied. # TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky. # self.assertGreaterEqual(min(fraction_object_covered), min_object_covered) def testWholeImageBoundingBox(self): height = 40 width = 50 image_size = [height, width, 1] bounding_box = [0.0, 0.0, 1.0, 1.0] image = np.arange( 0, np.prod(image_size), dtype=np.int32).reshape(image_size) self._testSampleDistortedBoundingBox( image, bounding_box, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) def testWithBoundingBox(self): height = 40 width = 50 x_shape = [height, width, 1] image = np.zeros(x_shape, dtype=np.int32) # Create an object with 1's in a region with area A and require that # the total pixel values >= 0.1 * A. min_object_covered = 0.1 xmin = 2 ymin = 3 xmax = 12 ymax = 13 for x in np.arange(xmin, xmax + 1, 1): for y in np.arange(ymin, ymax + 1, 1): image[x, y] = 1 # Bounding box is specified as (ymin, xmin, ymax, xmax) in # relative coordinates. bounding_box = (float(ymin) / height, float(xmin) / width, float(ymax) / height, float(xmax) / width) self._testSampleDistortedBoundingBox( image, bounding_box=bounding_box, min_object_covered=min_object_covered, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) def testSampleDistortedBoundingBoxShape(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): with self.cached_session(): image_size = constant_op.constant( [40, 50, 1], shape=[3], dtype=dtypes.int32) bounding_box = constant_op.constant( [[[0.0, 0.0, 1.0, 1.0]]], shape=[1, 1, 4], dtype=dtypes.float32, ) begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box( image_size=image_size, bounding_boxes=bounding_box, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) # Test that the shapes are correct. self.assertAllEqual([3], begin.get_shape().as_list()) self.assertAllEqual([3], end.get_shape().as_list()) self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list()) # Actual run to make sure shape is correct inside Compute(). begin = self.evaluate(begin) end = self.evaluate(end) bbox_for_drawing = self.evaluate(bbox_for_drawing) begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box( image_size=image_size, bounding_boxes=bounding_box, min_object_covered=array_ops.placeholder(dtypes.float32), aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) # Test that the shapes are correct. self.assertAllEqual([3], begin.get_shape().as_list()) self.assertAllEqual([3], end.get_shape().as_list()) self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list()) def testDefaultMinObjectCovered(self): # By default min_object_covered=0.1 if not provided with self.cached_session(): image_size = constant_op.constant( [40, 50, 1], shape=[3], dtype=dtypes.int32) bounding_box = constant_op.constant( [[[0.0, 0.0, 1.0, 1.0]]], shape=[1, 1, 4], dtype=dtypes.float32, ) begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box( image_size=image_size, bounding_boxes=bounding_box, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) self.assertAllEqual([3], begin.get_shape().as_list()) self.assertAllEqual([3], end.get_shape().as_list()) self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list()) # Actual run to make sure shape is correct inside Compute(). begin = self.evaluate(begin) end = self.evaluate(end) bbox_for_drawing = self.evaluate(bbox_for_drawing) def _testStatelessSampleDistortedBoundingBox(self, image, bounding_box, min_object_covered, aspect_ratio_range, area_range): with test_util.use_gpu(): original_area = float(np.prod(image.shape)) bounding_box_area = float((bounding_box[3] - bounding_box[1]) * (bounding_box[2] - bounding_box[0])) image_size_np = np.array(image.shape, dtype=np.int32) bounding_box_np = ( np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4])) iterations = 2 test_seeds = [(1, 2), (3, 4), (5, 6)] for seed in test_seeds: aspect_ratios = [] area_ratios = [] fraction_object_covered = [] for _ in range(iterations): image_tf = constant_op.constant(image, shape=image.shape) image_size_tf = constant_op.constant( image_size_np, shape=image_size_np.shape) bounding_box_tf = constant_op.constant(bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape) begin, size, _ = image_ops.stateless_sample_distorted_bounding_box( image_size=image_size_tf, bounding_boxes=bounding_box_tf, seed=seed, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range) y = array_ops.strided_slice(image_tf, begin, begin + size) y_tf = self.evaluate(y) crop_height = y_tf.shape[0] crop_width = y_tf.shape[1] aspect_ratio = float(crop_width) / float(crop_height) area = float(crop_width * crop_height) aspect_ratios.append(aspect_ratio) area_ratio = area / original_area area_ratios.append(area_ratio) fraction_object_covered.append( float(np.sum(y_tf)) / bounding_box_area) # Check that `area_ratio` is within valid range. self.assertLessEqual(area_ratio, area_range[1]) self.assertGreaterEqual(area_ratio, area_range[0]) # Each array should consist of one value just repeated `iteration` times # because the same seed is used. self.assertEqual(len(set(aspect_ratios)), 1) self.assertEqual(len(set(area_ratios)), 1) self.assertEqual(len(set(fraction_object_covered)), 1) # TODO(b/162345082): stateless random op generates different random number # with xla_gpu. Update tests such that there is a single ground truth result # to test against. def testWholeImageBoundingBoxStateless(self): height = 40 width = 50 image_size = [height, width, 1] bounding_box = [0.0, 0.0, 1.0, 1.0] image = np.arange( 0, np.prod(image_size), dtype=np.int32).reshape(image_size) for min_obj_covered in [0.1, constant_op.constant(0.1)]: self._testStatelessSampleDistortedBoundingBox( image, bounding_box, min_object_covered=min_obj_covered, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) # TODO(b/162345082): stateless random op generates different random number # with xla_gpu. Update tests such that there is a single ground truth result # to test against. def testWithBoundingBoxStateless(self): height = 40 width = 50 x_shape = [height, width, 1] image = np.zeros(x_shape, dtype=np.int32) xmin = 2 ymin = 3 xmax = 12 ymax = 13 for x in np.arange(xmin, xmax + 1, 1): for y in np.arange(ymin, ymax + 1, 1): image[x, y] = 1 # Bounding box is specified as (ymin, xmin, ymax, xmax) in # relative coordinates. bounding_box = (float(ymin) / height, float(xmin) / width, float(ymax) / height, float(xmax) / width) # Test both scalar and tensor input for `min_object_covered`. for min_obj_covered in [0.1, constant_op.constant(0.1)]: self._testStatelessSampleDistortedBoundingBox( image, bounding_box=bounding_box, min_object_covered=min_obj_covered, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) def testSampleDistortedBoundingBoxShapeStateless(self): with test_util.use_gpu(): image_size = constant_op.constant( [40, 50, 1], shape=[3], dtype=dtypes.int32) bounding_box = constant_op.constant( [[[0.0, 0.0, 1.0, 1.0]]], shape=[1, 1, 4], dtype=dtypes.float32, ) bbox_func = functools.partial( image_ops.stateless_sample_distorted_bounding_box, image_size=image_size, bounding_boxes=bounding_box, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) # Check error is raised with wrong seed shapes. for seed in [1, (1, 2, 3)]: with self.assertRaises((ValueError, errors.InvalidArgumentError)): begin, end, bbox_for_drawing = bbox_func(seed=seed) test_seed = (1, 2) begin, end, bbox_for_drawing = bbox_func(seed=test_seed) # Test that the shapes are correct. self.assertAllEqual([3], begin.get_shape().as_list()) self.assertAllEqual([3], end.get_shape().as_list()) self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list()) # Actual run to make sure shape is correct inside Compute(). begin = self.evaluate(begin) end = self.evaluate(end) bbox_for_drawing = self.evaluate(bbox_for_drawing) self.assertAllEqual([3], begin.shape) self.assertAllEqual([3], end.shape) self.assertAllEqual([1, 1, 4], bbox_for_drawing.shape) def testDeterminismExceptionThrowing(self): with test_util.deterministic_ops(): with self.assertRaisesRegex( ValueError, "requires a non-zero seed to be passed in when " "determinism is enabled"): image_ops_impl.sample_distorted_bounding_box_v2( image_size=[50, 50, 1], bounding_boxes=[[[0., 0., 1., 1.]]], ) image_ops_impl.sample_distorted_bounding_box_v2( image_size=[50, 50, 1], bounding_boxes=[[[0., 0., 1., 1.]]], seed=1) with self.assertRaisesRegex( ValueError, 'requires "seed" or "seed2" to be non-zero when ' "determinism is enabled"): image_ops_impl.sample_distorted_bounding_box( image_size=[50, 50, 1], bounding_boxes=[[[0., 0., 1., 1.]]]) image_ops_impl.sample_distorted_bounding_box( image_size=[50, 50, 1], bounding_boxes=[[[0., 0., 1., 1.]]], seed=1) class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase): METHODS = [ image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA, image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5, image_ops.ResizeMethod.GAUSSIAN, image_ops.ResizeMethod.MITCHELLCUBIC ] # Some resize methods, such as Gaussian, are non-interpolating in that they # change the image even if there is no scale change, for some test, we only # check the value on the value preserving methods. INTERPOLATING_METHODS = [ image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA, image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5 ] TYPES = [ np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64 ] def _assertShapeInference(self, pre_shape, size, post_shape): # Try single image resize single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.resize_images_v2(single_image, size) self.assertEqual(y.get_shape().as_list(), post_shape) # Try batch images resize with known batch size images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape) y = image_ops.resize_images_v2(images, size) self.assertEqual(y.get_shape().as_list(), [99] + post_shape) # Try batch images resize with unknown batch size images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape) y = image_ops.resize_images_v2(images, size) self.assertEqual(y.get_shape().as_list(), [None] + post_shape) def shouldRunOnGPU(self, method, nptype): if (method == image_ops.ResizeMethod.NEAREST_NEIGHBOR and nptype in [np.float32, np.float64]): return True else: return False @test_util.disable_xla("align_corners=False not supported by XLA") def testNoOp(self): img_shape = [1, 6, 4, 1] single_shape = [6, 4, 1] # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] target_height = 6 target_width = 4 for nptype in self.TYPES: img_np = np.array(data, dtype=nptype).reshape(img_shape) for method in self.METHODS: with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2(image, [target_height, target_width], method) yshape = array_ops.shape(y) resized, newshape = self.evaluate([y, yshape]) self.assertAllEqual(img_shape, newshape) if method in self.INTERPOLATING_METHODS: self.assertAllClose(resized, img_np, atol=1e-5) # Resizing with a single image must leave the shape unchanged also. with self.cached_session(): img_single = img_np.reshape(single_shape) image = constant_op.constant(img_single, shape=single_shape) y = image_ops.resize_images_v2(image, [target_height, target_width], self.METHODS[0]) yshape = array_ops.shape(y) newshape = self.evaluate(yshape) self.assertAllEqual(single_shape, newshape) # half_pixel_centers unsupported in ResizeBilinear @test_util.disable_xla("b/127616992") def testTensorArguments(self): img_shape = [1, 6, 4, 1] single_shape = [6, 4, 1] # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] def resize_func(t, new_size, method): return image_ops.resize_images_v2(t, new_size, method) img_np = np.array(data, dtype=np.uint8).reshape(img_shape) for method in self.METHODS: with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = resize_func(image, [6, 4], method) yshape = array_ops.shape(y) resized, newshape = self.evaluate([y, yshape]) self.assertAllEqual(img_shape, newshape) if method in self.INTERPOLATING_METHODS: self.assertAllClose(resized, img_np, atol=1e-5) # Resizing with a single image must leave the shape unchanged also. with self.cached_session(): img_single = img_np.reshape(single_shape) image = constant_op.constant(img_single, shape=single_shape) y = resize_func(image, [6, 4], self.METHODS[0]) yshape = array_ops.shape(y) resized, newshape = self.evaluate([y, yshape]) self.assertAllEqual(single_shape, newshape) if method in self.INTERPOLATING_METHODS: self.assertAllClose(resized, img_single, atol=1e-5) # Incorrect shape. with self.assertRaises(ValueError): new_size = constant_op.constant(4) _ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR) with self.assertRaises(ValueError): new_size = constant_op.constant([4]) _ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR) with self.assertRaises(ValueError): new_size = constant_op.constant([1, 2, 3]) _ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR) # Incorrect dtypes. with self.assertRaises(ValueError): new_size = constant_op.constant([6.0, 4]) _ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR) with self.assertRaises(ValueError): _ = resize_func(image, [6, 4.0], image_ops.ResizeMethod.BILINEAR) with self.assertRaises(ValueError): _ = resize_func(image, [None, 4], image_ops.ResizeMethod.BILINEAR) with self.assertRaises(ValueError): _ = resize_func(image, [6, None], image_ops.ResizeMethod.BILINEAR) def testReturnDtypeV1(self): # Shape inference in V1. with ops.Graph().as_default(): target_shapes = [[6, 4], [3, 2], [ array_ops.placeholder(dtypes.int32), array_ops.placeholder(dtypes.int32) ]] for nptype in self.TYPES: image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1]) for method in self.METHODS: for target_shape in target_shapes: y = image_ops.resize_images_v2(image, target_shape, method) if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR: expected_dtype = image.dtype else: expected_dtype = dtypes.float32 self.assertEqual(y.dtype, expected_dtype) @parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)]) def testReturnDtypeV2(self, run_func_eagerly): if not context.executing_eagerly() and run_func_eagerly: # Skip running tf.function eagerly in V1 mode. self.skipTest("Skip test that runs tf.function eagerly in V1 mode.") else: @def_function.function def test_dtype(image, target_shape, target_method): y = image_ops.resize_images_v2(image, target_shape, target_method) if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR: expected_dtype = image.dtype else: expected_dtype = dtypes.float32 self.assertEqual(y.dtype, expected_dtype) target_shapes = [[6, 4], [3, 2], [tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32), tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)]] for nptype in self.TYPES: image = tensor_spec.TensorSpec(shape=[1, 6, 4, 1], dtype=nptype) for method in self.METHODS: for target_shape in target_shapes: with test_util.run_functions_eagerly(run_func_eagerly): test_dtype.get_concrete_function(image, target_shape, method) # half_pixel_centers not supported by XLA @test_util.disable_xla("b/127616992") def testSumTensor(self): img_shape = [1, 6, 4, 1] # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] # Test size where width is specified as a tensor which is a sum # of two tensors. width_1 = constant_op.constant(1) width_2 = constant_op.constant(3) width = math_ops.add(width_1, width_2) height = constant_op.constant(6) img_np = np.array(data, dtype=np.uint8).reshape(img_shape) for method in self.METHODS: with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2(image, [height, width], method) yshape = array_ops.shape(y) resized, newshape = self.evaluate([y, yshape]) self.assertAllEqual(img_shape, newshape) if method in self.INTERPOLATING_METHODS: self.assertAllClose(resized, img_np, atol=1e-5) @test_util.disable_xla("align_corners=False not supported by XLA") def testResizeDown(self): # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] expected_data = [127, 64, 64, 127, 50, 100] target_height = 3 target_width = 2 # Test out 3-D and 4-D image shapes. img_shapes = [[1, 6, 4, 1], [6, 4, 1]] target_shapes = [[1, target_height, target_width, 1], [target_height, target_width, 1]] for target_shape, img_shape in zip(target_shapes, img_shapes): for nptype in self.TYPES: img_np = np.array(data, dtype=nptype).reshape(img_shape) for method in self.METHODS: if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype): with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2( image, [target_height, target_width], method) expected = np.array(expected_data).reshape(target_shape) resized = self.evaluate(y) self.assertAllClose(resized, expected, atol=1e-5) @test_util.disable_xla("align_corners=False not supported by XLA") def testResizeUp(self): img_shape = [1, 3, 2, 1] data = [64, 32, 32, 64, 50, 100] target_height = 6 target_width = 4 expected_data = {} expected_data[image_ops.ResizeMethod.BILINEAR] = [ 64.0, 56.0, 40.0, 32.0, 56.0, 52.0, 44.0, 40.0, 40.0, 44.0, 52.0, 56.0, 36.5, 45.625, 63.875, 73.0, 45.5, 56.875, 79.625, 91.0, 50.0, 62.5, 87.5, 100.0 ] expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [ 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0, 100.0 ] expected_data[image_ops.ResizeMethod.AREA] = [ 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0, 100.0 ] expected_data[image_ops.ResizeMethod.LANCZOS3] = [ 75.8294, 59.6281, 38.4313, 22.23, 60.6851, 52.0037, 40.6454, 31.964, 35.8344, 41.0779, 47.9383, 53.1818, 24.6968, 43.0769, 67.1244, 85.5045, 35.7939, 56.4713, 83.5243, 104.2017, 44.8138, 65.1949, 91.8603, 112.2413 ] expected_data[image_ops.ResizeMethod.LANCZOS5] = [ 77.5699, 60.0223, 40.6694, 23.1219, 61.8253, 51.2369, 39.5593, 28.9709, 35.7438, 40.8875, 46.5604, 51.7041, 21.5942, 43.5299, 67.7223, 89.658, 32.1213, 56.784, 83.984, 108.6467, 44.5802, 66.183, 90.0082, 111.6109 ] expected_data[image_ops.ResizeMethod.GAUSSIAN] = [ 61.1087, 54.6926, 41.3074, 34.8913, 54.6926, 51.4168, 44.5832, 41.3074, 41.696, 45.2456, 52.6508, 56.2004, 39.4273, 47.0526, 62.9602, 70.5855, 47.3008, 57.3042, 78.173, 88.1764, 51.4771, 62.3638, 85.0752, 95.9619 ] expected_data[image_ops.ResizeMethod.BICUBIC] = [ 70.1453, 59.0252, 36.9748, 25.8547, 59.3195, 53.3386, 41.4789, 35.4981, 36.383, 41.285, 51.0051, 55.9071, 30.2232, 42.151, 65.8032, 77.731, 41.6492, 55.823, 83.9288, 98.1026, 47.0363, 62.2744, 92.4903, 107.7284 ] expected_data[image_ops.ResizeMethod.MITCHELLCUBIC] = [ 66.0382, 56.6079, 39.3921, 29.9618, 56.7255, 51.9603, 43.2611, 38.4959, 39.1828, 43.4664, 51.2864, 55.57, 34.6287, 45.1812, 64.4458, 74.9983, 43.8523, 56.8078, 80.4594, 93.4149, 48.9943, 63.026, 88.6422, 102.6739 ] for nptype in self.TYPES: for method in expected_data: with self.cached_session(): img_np = np.array(data, dtype=nptype).reshape(img_shape) image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2(image, [target_height, target_width], method) resized = self.evaluate(y) expected = np.array(expected_data[method]).reshape( [1, target_height, target_width, 1]) self.assertAllClose(resized, expected, atol=1e-04) # XLA doesn't implement half_pixel_centers @test_util.disable_xla("b/127616992") def testLegacyBicubicMethodsMatchNewMethods(self): img_shape = [1, 3, 2, 1] data = [64, 32, 32, 64, 50, 100] target_height = 6 target_width = 4 methods_to_test = ((gen_image_ops.resize_bilinear, "triangle"), (gen_image_ops.resize_bicubic, "keyscubic")) for legacy_method, new_method in methods_to_test: with self.cached_session(): img_np = np.array(data, dtype=np.float32).reshape(img_shape) image = constant_op.constant(img_np, shape=img_shape) legacy_result = legacy_method( image, constant_op.constant([target_height, target_width], dtype=dtypes.int32), half_pixel_centers=True) scale = ( constant_op.constant([target_height, target_width], dtype=dtypes.float32) / math_ops.cast(array_ops.shape(image)[1:3], dtype=dtypes.float32)) new_result = gen_image_ops.scale_and_translate( image, constant_op.constant([target_height, target_width], dtype=dtypes.int32), scale, array_ops.zeros([2]), kernel_type=new_method, antialias=False) self.assertAllClose( self.evaluate(legacy_result), self.evaluate(new_result), atol=1e-04) def testResizeDownArea(self): img_shape = [1, 6, 6, 1] data = [ 128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5, 10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30 ] img_np = np.array(data, dtype=np.uint8).reshape(img_shape) target_height = 4 target_width = 4 expected_data = [ 73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21 ] with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2(image, [target_height, target_width], image_ops.ResizeMethod.AREA) expected = np.array(expected_data).reshape( [1, target_height, target_width, 1]) resized = self.evaluate(y) self.assertAllClose(resized, expected, atol=1) @test_util.disable_xla("align_corners=False not supported by XLA") def testCompareNearestNeighbor(self): if test.is_gpu_available(): input_shape = [1, 5, 6, 3] target_height = 8 target_width = 12 for nptype in [np.float32, np.float64]: img_np = np.arange( 0, np.prod(input_shape), dtype=nptype).reshape(input_shape) with self.cached_session(): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images_v2( image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR) gpu_val = self.evaluate(out_op) with self.cached_session(use_gpu=False): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images_v2( image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR) cpu_val = self.evaluate(out_op) self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5) @test_util.disable_xla("align_corners=False not supported by XLA") def testBfloat16MultipleOps(self): target_height = 8 target_width = 12 img = np.random.uniform(0, 100, size=(30, 10, 2)).astype(np.float32) img_bf16 = ops.convert_to_tensor(img, dtype="bfloat16") new_size = constant_op.constant([target_height, target_width]) img_methods = [ image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA ] for method in img_methods: out_op_bf16 = image_ops.resize_images_v2(img_bf16, new_size, method) out_op_f32 = image_ops.resize_images_v2(img, new_size, method) bf16_val = self.evaluate(out_op_bf16) f32_val = self.evaluate(out_op_f32) self.assertAllClose(bf16_val, f32_val, rtol=1e-2, atol=1e-2) def testCompareBilinear(self): if test.is_gpu_available(): input_shape = [1, 5, 6, 3] target_height = 8 target_width = 12 for nptype in [np.float32, np.float64]: img_np = np.arange( 0, np.prod(input_shape), dtype=nptype).reshape(input_shape) value = {} for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images(image, new_size, image_ops.ResizeMethod.BILINEAR) value[use_gpu] = self.evaluate(out_op) self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5) def testShapeInference(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None]) self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None]) self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None]) self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None]) self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None]) self._assertShapeInference([None, None, None], [55, 66], [55, 66, None]) def testNameScope(self): # Testing name scope requires placeholders and a graph. with ops.Graph().as_default(): with self.cached_session(): single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3]) y = image_ops.resize_images(single_image, [55, 66]) self.assertTrue(y.op.name.startswith("resize")) def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio, use_tensor_inputs): if use_tensor_inputs: target_max = ops.convert_to_tensor([max_h, max_w]) x_tensor = ops.convert_to_tensor(x) else: target_max = (max_h, max_w) x_tensor = x def resize_func(t, target_max=target_max, preserve_aspect_ratio=preserve_aspect_ratio): return image_ops.resize_images( t, ops.convert_to_tensor(target_max), preserve_aspect_ratio=preserve_aspect_ratio) with self.cached_session(): return self.evaluate(resize_func(x_tensor)) def _assertResizeEqual(self, x, x_shape, y, y_shape, preserve_aspect_ratio=True, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageCall(x, target_height, target_width, preserve_aspect_ratio, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertResizeCheckShape(self, x, x_shape, target_shape, y_shape, preserve_aspect_ratio=True, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width = target_shape x = np.array(x).reshape(x_shape) y = np.zeros(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageCall(x, target_height, target_width, preserve_aspect_ratio, use_tensor_inputs) self.assertShapeEqual(y, ops.convert_to_tensor(y_tf)) def testPreserveAspectRatioMultipleImages(self): x_shape = [10, 100, 80, 10] x = np.random.uniform(size=x_shape) for preserve_aspect_ratio in [True, False]: with self.subTest(preserve_aspect_ratio=preserve_aspect_ratio): expect_shape = [10, 250, 200, 10] if preserve_aspect_ratio \ else [10, 250, 250, 10] self._assertResizeCheckShape( x, x_shape, [250, 250], expect_shape, preserve_aspect_ratio=preserve_aspect_ratio) def testPreserveAspectRatioNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) self._assertResizeEqual(x, x_shape, x, x_shape) def testPreserveAspectRatioSmaller(self): x_shape = [100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10]) def testPreserveAspectRatioSmallerMultipleImages(self): x_shape = [10, 100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10]) def testPreserveAspectRatioLarger(self): x_shape = [100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10]) def testPreserveAspectRatioSameRatio(self): x_shape = [1920, 1080, 3] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3]) def testPreserveAspectRatioSquare(self): x_shape = [299, 299, 3] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3]) def testLargeDim(self): with self.session(): with self.assertRaises(errors.InvalidArgumentError): x = np.ones((5, 1, 1, 2)) v = image_ops.resize_images_v2(x, [1610637938, 1610637938], image_ops.ResizeMethod.BILINEAR) _ = self.evaluate(v) class ResizeImagesTest(test_util.TensorFlowTestCase, parameterized.TestCase): METHODS = [ image_ops.ResizeMethodV1.BILINEAR, image_ops.ResizeMethodV1.NEAREST_NEIGHBOR, image_ops.ResizeMethodV1.BICUBIC, image_ops.ResizeMethodV1.AREA ] TYPES = [ np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64 ] def _assertShapeInference(self, pre_shape, size, post_shape): # Try single image resize single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.resize_images(single_image, size) self.assertEqual(y.get_shape().as_list(), post_shape) # Try batch images resize with known batch size images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape) y = image_ops.resize_images(images, size) self.assertEqual(y.get_shape().as_list(), [99] + post_shape) # Try batch images resize with unknown batch size images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape) y = image_ops.resize_images(images, size) self.assertEqual(y.get_shape().as_list(), [None] + post_shape) def shouldRunOnGPU(self, method, nptype): if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR and nptype in [np.float32, np.float64]): return True else: return False @test_util.disable_xla("align_corners=False not supported by XLA") def testNoOp(self): img_shape = [1, 6, 4, 1] single_shape = [6, 4, 1] # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] target_height = 6 target_width = 4 for nptype in self.TYPES: img_np = np.array(data, dtype=nptype).reshape(img_shape) for method in self.METHODS: with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [target_height, target_width], method) yshape = array_ops.shape(y) resized, newshape = self.evaluate([y, yshape]) self.assertAllEqual(img_shape, newshape) self.assertAllClose(resized, img_np, atol=1e-5) # Resizing with a single image must leave the shape unchanged also. with self.cached_session(): img_single = img_np.reshape(single_shape) image = constant_op.constant(img_single, shape=single_shape) y = image_ops.resize_images(image, [target_height, target_width], self.METHODS[0]) yshape = array_ops.shape(y) newshape = self.evaluate(yshape) self.assertAllEqual(single_shape, newshape) def testTensorArguments(self): img_shape = [1, 6, 4, 1] single_shape = [6, 4, 1] # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] def resize_func(t, new_size, method): return image_ops.resize_images(t, new_size, method) img_np = np.array(data, dtype=np.uint8).reshape(img_shape) for method in self.METHODS: with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = resize_func(image, [6, 4], method) yshape = array_ops.shape(y) resized, newshape = self.evaluate([y, yshape]) self.assertAllEqual(img_shape, newshape) self.assertAllClose(resized, img_np, atol=1e-5) # Resizing with a single image must leave the shape unchanged also. with self.cached_session(): img_single = img_np.reshape(single_shape) image = constant_op.constant(img_single, shape=single_shape) y = resize_func(image, [6, 4], self.METHODS[0]) yshape = array_ops.shape(y) resized, newshape = self.evaluate([y, yshape]) self.assertAllEqual(single_shape, newshape) self.assertAllClose(resized, img_single, atol=1e-5) # Incorrect shape. with self.assertRaises(ValueError): new_size = constant_op.constant(4) _ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR) with self.assertRaises(ValueError): new_size = constant_op.constant([4]) _ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR) with self.assertRaises(ValueError): new_size = constant_op.constant([1, 2, 3]) _ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR) # Incorrect dtypes. with self.assertRaises(ValueError): new_size = constant_op.constant([6.0, 4]) _ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR) with self.assertRaises(ValueError): _ = resize_func(image, [6, 4.0], image_ops.ResizeMethodV1.BILINEAR) with self.assertRaises(ValueError): _ = resize_func(image, [None, 4], image_ops.ResizeMethodV1.BILINEAR) with self.assertRaises(ValueError): _ = resize_func(image, [6, None], image_ops.ResizeMethodV1.BILINEAR) def testReturnDtypeV1(self): # Shape inference in V1. with ops.Graph().as_default(): target_shapes = [[6, 4], [3, 2], [ array_ops.placeholder(dtypes.int32), array_ops.placeholder(dtypes.int32) ]] for nptype in self.TYPES: image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1]) for method in self.METHODS: for target_shape in target_shapes: y = image_ops.resize_images(image, target_shape, method) if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or target_shape == image.shape[1:3]): expected_dtype = image.dtype else: expected_dtype = dtypes.float32 self.assertEqual(y.dtype, expected_dtype) @parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)]) def testReturnDtypeV2(self, run_func_eagerly): if not context.executing_eagerly() and run_func_eagerly: # Skip running tf.function eagerly in V1 mode. self.skipTest("Skip test that runs tf.function eagerly in V1 mode.") else: @def_function.function def test_dtype(image, target_shape, target_method): y = image_ops.resize_images(image, target_shape, target_method) if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or target_shape == image.shape[1:3]): expected_dtype = image.dtype else: expected_dtype = dtypes.float32 self.assertEqual(y.dtype, expected_dtype) target_shapes = [[6, 4], [3, 2], [tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32), tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)]] for nptype in self.TYPES: image = tensor_spec.TensorSpec(shape=[1, 6, 4, 1], dtype=nptype) for method in self.METHODS: for target_shape in target_shapes: with test_util.run_functions_eagerly(run_func_eagerly): test_dtype.get_concrete_function(image, target_shape, method) @test_util.disable_xla("align_corners=False not supported by XLA") def testSumTensor(self): img_shape = [1, 6, 4, 1] # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] # Test size where width is specified as a tensor which is a sum # of two tensors. width_1 = constant_op.constant(1) width_2 = constant_op.constant(3) width = math_ops.add(width_1, width_2) height = constant_op.constant(6) img_np = np.array(data, dtype=np.uint8).reshape(img_shape) for method in self.METHODS: with self.cached_session() as sess: image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [height, width], method) yshape = array_ops.shape(y) resized, newshape = self.evaluate([y, yshape]) self.assertAllEqual(img_shape, newshape) self.assertAllClose(resized, img_np, atol=1e-5) @test_util.disable_xla("align_corners=False not supported by XLA") def testResizeDown(self): # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] expected_data = [127, 64, 64, 127, 50, 100] target_height = 3 target_width = 2 # Test out 3-D and 4-D image shapes. img_shapes = [[1, 6, 4, 1], [6, 4, 1]] target_shapes = [[1, target_height, target_width, 1], [target_height, target_width, 1]] for target_shape, img_shape in zip(target_shapes, img_shapes): for nptype in self.TYPES: img_np = np.array(data, dtype=nptype).reshape(img_shape) for method in self.METHODS: if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype): with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [target_height, target_width], method) expected = np.array(expected_data).reshape(target_shape) resized = self.evaluate(y) self.assertAllClose(resized, expected, atol=1e-5) @test_util.disable_xla("align_corners=False not supported by XLA") def testResizeUpAlignCornersFalse(self): img_shape = [1, 3, 2, 1] data = [64, 32, 32, 64, 50, 100] target_height = 6 target_width = 4 expected_data = {} expected_data[image_ops.ResizeMethodV1.BILINEAR] = [ 64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0, 41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0, 100.0 ] expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [ 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0, 100.0 ] expected_data[image_ops.ResizeMethodV1.AREA] = [ 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0, 100.0 ] for nptype in self.TYPES: for method in [ image_ops.ResizeMethodV1.BILINEAR, image_ops.ResizeMethodV1.NEAREST_NEIGHBOR, image_ops.ResizeMethodV1.AREA ]: with self.cached_session(): img_np = np.array(data, dtype=nptype).reshape(img_shape) image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images( image, [target_height, target_width], method, align_corners=False) resized = self.evaluate(y) expected = np.array(expected_data[method]).reshape( [1, target_height, target_width, 1]) self.assertAllClose(resized, expected, atol=1e-05) def testResizeUpAlignCornersTrue(self): img_shape = [1, 3, 2, 1] data = [6, 3, 3, 6, 6, 9] target_height = 5 target_width = 4 expected_data = {} expected_data[image_ops.ResizeMethodV1.BILINEAR] = [ 6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5, 6.5, 7.5, 6.0, 7.0, 8.0, 9.0 ] expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [ 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0, 9.0, 9.0, 6.0, 6.0, 9.0, 9.0 ] # TODO(b/37749740): Improve alignment of ResizeMethodV1.AREA when # align_corners=True. expected_data[image_ops.ResizeMethodV1.AREA] = [ 6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0, 9.0 ] for nptype in self.TYPES: for method in [ image_ops.ResizeMethodV1.BILINEAR, image_ops.ResizeMethodV1.NEAREST_NEIGHBOR, image_ops.ResizeMethodV1.AREA ]: with self.cached_session(): img_np = np.array(data, dtype=nptype).reshape(img_shape) image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images( image, [target_height, target_width], method, align_corners=True) resized = self.evaluate(y) expected = np.array(expected_data[method]).reshape( [1, target_height, target_width, 1]) self.assertAllClose(resized, expected, atol=1e-05) def testResizeUpBicubic(self): img_shape = [1, 6, 6, 1] data = [ 128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100 ] img_np = np.array(data, dtype=np.uint8).reshape(img_shape) target_height = 8 target_width = 8 expected_data = [ 128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136, 55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100, 105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69, 75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105 ] with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [target_height, target_width], image_ops.ResizeMethodV1.BICUBIC) resized = self.evaluate(y) expected = np.array(expected_data).reshape( [1, target_height, target_width, 1]) self.assertAllClose(resized, expected, atol=1) def testResizeDownArea(self): img_shape = [1, 6, 6, 1] data = [ 128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5, 10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30 ] img_np = np.array(data, dtype=np.uint8).reshape(img_shape) target_height = 4 target_width = 4 expected_data = [ 73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21 ] with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [target_height, target_width], image_ops.ResizeMethodV1.AREA) expected = np.array(expected_data).reshape( [1, target_height, target_width, 1]) resized = self.evaluate(y) self.assertAllClose(resized, expected, atol=1) @test_util.disable_xla("align_corners=False not supported by XLA") def testCompareNearestNeighbor(self): if test.is_gpu_available(): input_shape = [1, 5, 6, 3] target_height = 8 target_width = 12 for nptype in [np.float32, np.float64]: for align_corners in [True, False]: img_np = np.arange( 0, np.prod(input_shape), dtype=nptype).reshape(input_shape) with self.cached_session(): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images( image, new_size, image_ops.ResizeMethodV1.NEAREST_NEIGHBOR, align_corners=align_corners) gpu_val = self.evaluate(out_op) with self.cached_session(use_gpu=False): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images( image, new_size, image_ops.ResizeMethodV1.NEAREST_NEIGHBOR, align_corners=align_corners) cpu_val = self.evaluate(out_op) self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5) def testCompareBilinear(self): if test.is_gpu_available(): input_shape = [1, 5, 6, 3] target_height = 8 target_width = 12 for nptype in [np.float32, np.float64]: for align_corners in [True, False]: img_np = np.arange( 0, np.prod(input_shape), dtype=nptype).reshape(input_shape) value = {} for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images( image, new_size, image_ops.ResizeMethodV1.BILINEAR, align_corners=align_corners) value[use_gpu] = self.evaluate(out_op) self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5) def testShapeInference(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None]) self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None]) self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None]) self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None]) self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None]) self._assertShapeInference([None, None, None], [55, 66], [55, 66, None]) def testNameScope(self): # Testing name scope requires placeholders and a graph. with ops.Graph().as_default(): img_shape = [1, 3, 2, 1] with self.cached_session(): single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3]) y = image_ops.resize_images(single_image, [55, 66]) self.assertTrue(y.op.name.startswith("resize")) def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio, use_tensor_inputs): if use_tensor_inputs: target_max = ops.convert_to_tensor([max_h, max_w]) x_tensor = ops.convert_to_tensor(x) else: target_max = [max_h, max_w] x_tensor = x y = image_ops.resize_images( x_tensor, target_max, preserve_aspect_ratio=preserve_aspect_ratio) with self.cached_session(): return self.evaluate(y) def _assertResizeEqual(self, x, x_shape, y, y_shape, preserve_aspect_ratio=True, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageCall(x, target_height, target_width, preserve_aspect_ratio, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertResizeCheckShape(self, x, x_shape, target_shape, y_shape, preserve_aspect_ratio=True, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width = target_shape x = np.array(x).reshape(x_shape) y = np.zeros(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageCall(x, target_height, target_width, preserve_aspect_ratio, use_tensor_inputs) self.assertShapeEqual(y, ops.convert_to_tensor(y_tf)) def testPreserveAspectRatioMultipleImages(self): x_shape = [10, 100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10], preserve_aspect_ratio=False) def testPreserveAspectRatioNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) self._assertResizeEqual(x, x_shape, x, x_shape) def testPreserveAspectRatioSmaller(self): x_shape = [100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10]) def testPreserveAspectRatioSmallerMultipleImages(self): x_shape = [10, 100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10]) def testPreserveAspectRatioLarger(self): x_shape = [100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10]) def testPreserveAspectRatioSameRatio(self): x_shape = [1920, 1080, 3] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3]) def testPreserveAspectRatioSquare(self): x_shape = [299, 299, 3] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3]) class ResizeImageWithPadV1Test(test_util.TensorFlowTestCase): def _ResizeImageWithPad(self, x, target_height, target_width, use_tensor_inputs): if use_tensor_inputs: target_height = ops.convert_to_tensor(target_height) target_width = ops.convert_to_tensor(target_width) x_tensor = ops.convert_to_tensor(x) else: x_tensor = x with self.cached_session(): return self.evaluate( image_ops.resize_image_with_pad_v1(x_tensor, target_height, target_width)) def _assertReturns(self, x, x_shape, y, y_shape, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageWithPad(x, target_height, target_width, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertRaises(self, x, x_shape, target_height, target_width, err_msg, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] x = np.array(x).reshape(x_shape) for use_tensor_inputs in use_tensor_inputs_options: with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), err_msg): self._ResizeImageWithPad(x, target_height, target_width, use_tensor_inputs) def _assertShapeInference(self, pre_shape, height, width, post_shape): image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.resize_image_with_pad_v1(image, height, width) self.assertEqual(y.get_shape().as_list(), post_shape) def testShapeInference(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): # Test with 3-D tensors. self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None]) self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None]) self._assertShapeInference([None, None, None], 55, 66, [55, 66, None]) self._assertShapeInference(None, 55, 66, [55, 66, None]) # Test with 4-D tensors. self._assertShapeInference([5, 55, 66, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, 50, 60, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, None, 66, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, None, 60, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, 55, None, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, 50, None, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, None, None, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, 55, 66, None], 55, 66, [5, 55, 66, None]) self._assertShapeInference([5, 50, 60, None], 55, 66, [5, 55, 66, None]) self._assertShapeInference([5, None, None, None], 55, 66, [5, 55, 66, None]) self._assertShapeInference([None, None, None, None], 55, 66, [None, 55, 66, None]) def testNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) self._assertReturns(x, x_shape, x, x_shape) def testPad(self): # Reduce vertical dimension x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 1, 3, 0] y_shape = [1, 4, 1] self._assertReturns(x, x_shape, y, y_shape) # Reduce horizontal dimension x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [1, 3, 0, 0] y_shape = [2, 2, 1] self._assertReturns(x, x_shape, y, y_shape) x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [1, 3] y_shape = [1, 2, 1] self._assertReturns(x, x_shape, y, y_shape) # half_pixel_centers not supported by XLA @test_util.for_all_test_methods(test_util.disable_xla, "b/127616992") class ResizeImageWithPadV2Test(test_util.TensorFlowTestCase): def _ResizeImageWithPad(self, x, target_height, target_width, use_tensor_inputs): if use_tensor_inputs: target_height = ops.convert_to_tensor(target_height) target_width = ops.convert_to_tensor(target_width) x_tensor = ops.convert_to_tensor(x) else: x_tensor = x with self.cached_session(): return self.evaluate( image_ops.resize_image_with_pad_v2(x_tensor, target_height, target_width)) def _assertReturns(self, x, x_shape, y, y_shape, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageWithPad(x, target_height, target_width, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertRaises(self, x, x_shape, target_height, target_width, err_msg, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] x = np.array(x).reshape(x_shape) for use_tensor_inputs in use_tensor_inputs_options: with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), err_msg): self._ResizeImageWithPad(x, target_height, target_width, use_tensor_inputs) def _assertShapeInference(self, pre_shape, height, width, post_shape): image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.resize_image_with_pad_v1(image, height, width) self.assertEqual(y.get_shape().as_list(), post_shape) def testShapeInference(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): # Test with 3-D tensors. self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None]) self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None]) self._assertShapeInference([None, None, None], 55, 66, [55, 66, None]) self._assertShapeInference(None, 55, 66, [55, 66, None]) # Test with 4-D tensors. self._assertShapeInference([5, 55, 66, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, 50, 60, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, None, 66, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, None, 60, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, 55, None, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, 50, None, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, None, None, 3], 55, 66, [5, 55, 66, 3]) self._assertShapeInference([5, 55, 66, None], 55, 66, [5, 55, 66, None]) self._assertShapeInference([5, 50, 60, None], 55, 66, [5, 55, 66, None]) self._assertShapeInference([5, None, None, None], 55, 66, [5, 55, 66, None]) self._assertShapeInference([None, None, None, None], 55, 66, [None, 55, 66, None]) def testNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) self._assertReturns(x, x_shape, x, x_shape) def testPad(self): # Reduce vertical dimension x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 3.5, 5.5, 0] y_shape = [1, 4, 1] self._assertReturns(x, x_shape, y, y_shape) # Reduce horizontal dimension x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [3.5, 5.5, 0, 0] y_shape = [2, 2, 1] self._assertReturns(x, x_shape, y, y_shape) x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [3.5, 5.5] y_shape = [1, 2, 1] self._assertReturns(x, x_shape, y, y_shape) class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase): def _ResizeImageWithCropOrPad(self, x, target_height, target_width, use_tensor_inputs): if use_tensor_inputs: target_height = ops.convert_to_tensor(target_height) target_width = ops.convert_to_tensor(target_width) x_tensor = ops.convert_to_tensor(x) else: x_tensor = x @def_function.function def resize_crop_or_pad(*args): return image_ops.resize_image_with_crop_or_pad(*args) with self.cached_session(): return self.evaluate( resize_crop_or_pad(x_tensor, target_height, target_width)) def _assertReturns(self, x, x_shape, y, y_shape, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertRaises(self, x, x_shape, target_height, target_width, err_msg, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] x = np.array(x).reshape(x_shape) for use_tensor_inputs in use_tensor_inputs_options: with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), err_msg): self._ResizeImageWithCropOrPad(x, target_height, target_width, use_tensor_inputs) def _assertShapeInference(self, pre_shape, height, width, post_shape): image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.resize_image_with_crop_or_pad(image, height, width) self.assertEqual(y.get_shape().as_list(), post_shape) def testNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) self._assertReturns(x, x_shape, x, x_shape) def testPad(self): # Pad even along col. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0] y_shape = [2, 6, 1] self._assertReturns(x, x_shape, y, y_shape) # Pad odd along col. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0] y_shape = [2, 7, 1] self._assertReturns(x, x_shape, y, y_shape) # Pad even along row. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0] y_shape = [4, 4, 1] self._assertReturns(x, x_shape, y, y_shape) # Pad odd along row. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0] y_shape = [5, 4, 1] self._assertReturns(x, x_shape, y, y_shape) def testCrop(self): # Crop even along col. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [2, 3, 6, 7] y_shape = [2, 2, 1] self._assertReturns(x, x_shape, y, y_shape) # Crop odd along col. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] x_shape = [2, 6, 1] y = [2, 3, 4, 8, 9, 10] y_shape = [2, 3, 1] self._assertReturns(x, x_shape, y, y_shape) # Crop even along row. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [4, 2, 1] y = [3, 4, 5, 6] y_shape = [2, 2, 1] self._assertReturns(x, x_shape, y, y_shape) # Crop odd along row. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] x_shape = [8, 2, 1] y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12] y_shape = [5, 2, 1] self._assertReturns(x, x_shape, y, y_shape) def testCropAndPad(self): # Pad along row but crop along col. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 0, 2, 3, 6, 7, 0, 0] y_shape = [4, 2, 1] self._assertReturns(x, x_shape, y, y_shape) # Crop along row but pad along col. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [4, 2, 1] y = [0, 3, 4, 0, 0, 5, 6, 0] y_shape = [2, 4, 1] self._assertReturns(x, x_shape, y, y_shape) def testShapeInference(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None]) self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None]) self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None]) self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None]) self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None]) self._assertShapeInference([None, None, None], 55, 66, [55, 66, None]) self._assertShapeInference(None, 55, 66, [55, 66, None]) def testNon3DInput(self): # Input image is not 3D x = [0] * 15 target_height, target_width = [4, 4] for x_shape in ([3, 5],): self._assertRaises(x, x_shape, target_height, target_width, "must have either 3 or 4 dimensions.") for x_shape in ([1, 3, 5, 1, 1],): self._assertRaises(x, x_shape, target_height, target_width, "must have either 3 or 4 dimensions.") def testZeroLengthInput(self): # Input image has 0-length dimension(s). target_height, target_width = [1, 1] x = [] for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]): self._assertRaises( x, x_shape, target_height, target_width, "inner 3 dims of 'image.shape' must be > 0", use_tensor_inputs_options=[False]) # The original error message does not contain back slashes. However, they # are added by either the assert op or the runtime. If this behavior # changes in the future, the match string will also needs to be changed. self._assertRaises( x, x_shape, target_height, target_width, "inner 3 dims of \\'image.shape\\' must be > 0", use_tensor_inputs_options=[True]) def testBadParams(self): x_shape = [4, 4, 1] x = np.zeros(x_shape) # target_height <= 0 target_height, target_width = [0, 5] self._assertRaises(x, x_shape, target_height, target_width, "target_height must be > 0") # target_width <= 0 target_height, target_width = [5, 0] self._assertRaises(x, x_shape, target_height, target_width, "target_width must be > 0") def testNameScope(self): # Testing name scope requires placeholders and a graph. with ops.Graph().as_default(): image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3]) y = image_ops.resize_image_with_crop_or_pad(image, 55, 66) self.assertTrue(y.op.name.startswith("resize_image_with_crop_or_pad")) def simple_color_ramp(): """Build a simple color ramp RGB image.""" w, h = 256, 200 i = np.arange(h)[:, None] j = np.arange(w) image = np.empty((h, w, 3), dtype=np.uint8) image[:, :, 0] = i image[:, :, 1] = j image[:, :, 2] = (i + j) >> 1 return image class JpegTest(test_util.TensorFlowTestCase): # TODO(irving): Add self.assertAverageLess or similar to test_util def averageError(self, image0, image1): self.assertEqual(image0.shape, image1.shape) image0 = image0.astype(int) # Avoid overflow return np.abs(image0 - image1).sum() / np.prod(image0.shape) def testExisting(self): # Read a real jpeg and verify shape path = ("tensorflow/core/lib/jpeg/testdata/" "jpeg_merge_test1.jpg") with self.cached_session(): jpeg0 = io_ops.read_file(path) image0 = image_ops.decode_jpeg(jpeg0) image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0)) jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1]) self.assertEqual(len(jpeg0), 3771) self.assertEqual(image0.shape, (256, 128, 3)) self.assertLess(self.averageError(image0, image1), 1.4) def testCmyk(self): # Confirm that CMYK reads in as RGB base = "tensorflow/core/lib/jpeg/testdata" rgb_path = os.path.join(base, "jpeg_merge_test1.jpg") cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg") shape = 256, 128, 3 for channels in 3, 0: with self.cached_session(): rgb = image_ops.decode_jpeg( io_ops.read_file(rgb_path), channels=channels) cmyk = image_ops.decode_jpeg( io_ops.read_file(cmyk_path), channels=channels) rgb, cmyk = self.evaluate([rgb, cmyk]) self.assertEqual(rgb.shape, shape) self.assertEqual(cmyk.shape, shape) error = self.averageError(rgb, cmyk) self.assertLess(error, 4) def testCropAndDecodeJpeg(self): with self.cached_session() as sess: # Encode it, then decode it, then encode it base = "tensorflow/core/lib/jpeg/testdata" jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg")) h, w, _ = 256, 128, 3 crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5], [h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]] for crop_window in crop_windows: # Explicit two stages: decode + crop. image1 = image_ops.decode_jpeg(jpeg0) y, x, h, w = crop_window image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w) # Combined decode+crop. image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window, channels=3) # Combined decode+crop should have the same shape inference on image # sizes. image1_shape = image1_crop.get_shape().as_list() image2_shape = image2.get_shape().as_list() self.assertAllEqual(image1_shape, image2_shape) # CropAndDecode should be equal to DecodeJpeg+Crop. image1_crop, image2 = self.evaluate([image1_crop, image2]) self.assertAllEqual(image1_crop, image2) def testCropAndDecodeJpegWithInvalidCropWindow(self): with self.cached_session() as sess: # Encode it, then decode it, then encode it base = "tensorflow/core/lib/jpeg/testdata" jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg")) h, w, _ = 256, 128, 3 # Invalid crop windows. crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11], [11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0], [0, 0, h + 1, w], [0, 0, h, w + 1]] for crop_window in crop_windows: with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), "Invalid JPEG data or crop window"): result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window) self.evaluate(result) def testSynthetic(self): with self.cached_session(): # Encode it, then decode it, then encode it image0 = constant_op.constant(simple_color_ramp()) jpeg0 = image_ops.encode_jpeg(image0) image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE") image2 = image_ops.decode_jpeg( image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE") jpeg0, image0, image1, image2 = self.evaluate( [jpeg0, image0, image1, image2]) # The decoded-encoded image should be similar to the input self.assertLess(self.averageError(image0, image1), 0.6) # We should be very close to a fixpoint self.assertLess(self.averageError(image1, image2), 0.02) # Smooth ramps compress well (input size is 153600) self.assertGreaterEqual(len(jpeg0), 5000) self.assertLessEqual(len(jpeg0), 6000) def testSyntheticFasterAlgorithm(self): with self.cached_session(): # Encode it, then decode it, then encode it image0 = constant_op.constant(simple_color_ramp()) jpeg0 = image_ops.encode_jpeg(image0) image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST") image2 = image_ops.decode_jpeg( image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST") jpeg0, image0, image1, image2 = self.evaluate( [jpeg0, image0, image1, image2]) # The decoded-encoded image should be similar to the input, but # note this is worse than the slower algorithm because it is # less accurate. self.assertLess(self.averageError(image0, image1), 0.95) # Repeated compression / decompression will have a higher error # with a lossier algorithm. self.assertLess(self.averageError(image1, image2), 1.05) # Smooth ramps compress well (input size is 153600) self.assertGreaterEqual(len(jpeg0), 5000) self.assertLessEqual(len(jpeg0), 6000) def testDefaultDCTMethodIsIntegerFast(self): with self.cached_session(): # Compare decoding with both dct_option=INTEGER_FAST and # default. They should be the same. image0 = constant_op.constant(simple_color_ramp()) jpeg0 = image_ops.encode_jpeg(image0) image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST") image2 = image_ops.decode_jpeg(jpeg0) image1, image2 = self.evaluate([image1, image2]) # The images should be the same. self.assertAllClose(image1, image2) def testShape(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): with self.cached_session(): jpeg = constant_op.constant("nonsense") for channels in 0, 1, 3: image = image_ops.decode_jpeg(jpeg, channels=channels) self.assertEqual(image.get_shape().as_list(), [None, None, channels or None]) def testExtractJpegShape(self): # Read a real jpeg and verify shape. path = ("tensorflow/core/lib/jpeg/testdata/" "jpeg_merge_test1.jpg") with self.cached_session(): jpeg = io_ops.read_file(path) # Extract shape without decoding. image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg)) self.assertAllEqual(image_shape, [256, 128, 3]) def testExtractJpegShapeforCmyk(self): # Read a cmyk jpeg image, and verify its shape. path = ("tensorflow/core/lib/jpeg/testdata/" "jpeg_merge_test1_cmyk.jpg") with self.cached_session(): jpeg = io_ops.read_file(path) image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg)) # Cmyk jpeg image has 4 channels. self.assertAllEqual(image_shape, [256, 128, 4]) def testRandomJpegQuality(self): # Previous implementation of random_jpeg_quality had a bug. # This unit test tests the fixed version, but due to forward compatibility # this test can only be done when fixed version is used. # Test jpeg quality dynamic randomization. with ops.Graph().as_default(), self.test_session(): np.random.seed(7) path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg") jpeg = io_ops.read_file(path) image = image_ops.decode_jpeg(jpeg) random_jpeg_image = image_ops.random_jpeg_quality(image, 40, 100) with self.cached_session() as sess: # Test randomization. random_jpeg_images = [sess.run(random_jpeg_image) for _ in range(5)] are_images_equal = [] for i in range(1, len(random_jpeg_images)): # Most of them should be different if randomization is occurring # correctly. are_images_equal.append( np.array_equal(random_jpeg_images[0], random_jpeg_images[i])) self.assertFalse(all(are_images_equal)) # TODO(b/162345082): stateless random op generates different random number # with xla_gpu. Update tests such that there is a single ground truth result # to test against. def testStatelessRandomJpegQuality(self): # Test deterministic randomness in jpeg quality by checking that the same # sequence of jpeg quality adjustments are returned each round given the # same seed. with test_util.use_gpu(): path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg") jpeg = io_ops.read_file(path) image = image_ops.decode_jpeg(jpeg) jpeg_quality = (40, 100) seeds_list = [(1, 2), (3, 4)] iterations = 2 random_jpeg_images_all = [[] for _ in range(iterations)] for random_jpeg_images in random_jpeg_images_all: for seed in seeds_list: distorted_jpeg = image_ops.stateless_random_jpeg_quality( image, jpeg_quality[0], jpeg_quality[1], seed=seed) # Verify that the random jpeg image is different from the original # jpeg image. self.assertNotAllEqual(image, distorted_jpeg) random_jpeg_images.append(self.evaluate(distorted_jpeg)) # Verify that the results are identical given the same seed. for i in range(1, iterations): self.assertAllEqual(random_jpeg_images_all[0], random_jpeg_images_all[i]) def testAdjustJpegQuality(self): # Test if image_ops.adjust_jpeg_quality works when jpeq quality # is an int (not tensor) for backward compatibility. with ops.Graph().as_default(), self.test_session(): np.random.seed(7) jpeg_quality = np.random.randint(40, 100) path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg") jpeg = io_ops.read_file(path) image = image_ops.decode_jpeg(jpeg) adjust_jpeg_quality_image = image_ops.adjust_jpeg_quality( image, jpeg_quality) with self.cached_session() as sess: sess.run(adjust_jpeg_quality_image) def testAdjustJpegQualityShape(self): with self.cached_session(): image = constant_op.constant( np.arange(24, dtype=np.uint8).reshape([2, 4, 3])) adjusted_image = image_ops.adjust_jpeg_quality(image, 80) adjusted_image.shape.assert_is_compatible_with([None, None, 3]) class PngTest(test_util.TensorFlowTestCase): def testExisting(self): # Read some real PNGs, converting to different channel numbers prefix = "tensorflow/core/lib/png/testdata/" inputs = ((1, "lena_gray.png"), (4, "lena_rgba.png"), (3, "lena_palette.png"), (4, "lena_palette_trns.png")) for channels_in, filename in inputs: for channels in 0, 1, 3, 4: with self.cached_session(): png0 = io_ops.read_file(prefix + filename) image0 = image_ops.decode_png(png0, channels=channels) png0, image0 = self.evaluate([png0, image0]) self.assertEqual(image0.shape, (26, 51, channels or channels_in)) if channels == channels_in: image1 = image_ops.decode_png(image_ops.encode_png(image0)) self.assertAllEqual(image0, self.evaluate(image1)) def testSynthetic(self): with self.cached_session(): # Encode it, then decode it image0 = constant_op.constant(simple_color_ramp()) png0 = image_ops.encode_png(image0, compression=7) image1 = image_ops.decode_png(png0) png0, image0, image1 = self.evaluate([png0, image0, image1]) # PNG is lossless self.assertAllEqual(image0, image1) # Smooth ramps compress well, but not too well self.assertGreaterEqual(len(png0), 400) self.assertLessEqual(len(png0), 750) def testSyntheticUint16(self): with self.cached_session(): # Encode it, then decode it image0 = constant_op.constant(simple_color_ramp(), dtype=dtypes.uint16) png0 = image_ops.encode_png(image0, compression=7) image1 = image_ops.decode_png(png0, dtype=dtypes.uint16) png0, image0, image1 = self.evaluate([png0, image0, image1]) # PNG is lossless self.assertAllEqual(image0, image1) # Smooth ramps compress well, but not too well self.assertGreaterEqual(len(png0), 800) self.assertLessEqual(len(png0), 1500) def testSyntheticTwoChannel(self): with self.cached_session(): # Strip the b channel from an rgb image to get a two-channel image. gray_alpha = simple_color_ramp()[:, :, 0:2] image0 = constant_op.constant(gray_alpha) png0 = image_ops.encode_png(image0, compression=7) image1 = image_ops.decode_png(png0) png0, image0, image1 = self.evaluate([png0, image0, image1]) self.assertEqual(2, image0.shape[-1]) self.assertAllEqual(image0, image1) def testSyntheticTwoChannelUint16(self): with self.cached_session(): # Strip the b channel from an rgb image to get a two-channel image. gray_alpha = simple_color_ramp()[:, :, 0:2] image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16) png0 = image_ops.encode_png(image0, compression=7) image1 = image_ops.decode_png(png0, dtype=dtypes.uint16) png0, image0, image1 = self.evaluate([png0, image0, image1]) self.assertEqual(2, image0.shape[-1]) self.assertAllEqual(image0, image1) def testShape(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): with self.cached_session(): png = constant_op.constant("nonsense") for channels in 0, 1, 3: image = image_ops.decode_png(png, channels=channels) self.assertEqual(image.get_shape().as_list(), [None, None, channels or None]) class GifTest(test_util.TensorFlowTestCase): def _testValid(self, filename): # Read some real GIFs prefix = "tensorflow/core/lib/gif/testdata/" WIDTH = 20 HEIGHT = 40 STRIDE = 5 shape = (12, HEIGHT, WIDTH, 3) with self.cached_session(): gif0 = io_ops.read_file(prefix + filename) image0 = image_ops.decode_gif(gif0) gif0, image0 = self.evaluate([gif0, image0]) self.assertEqual(image0.shape, shape) for frame_idx, frame in enumerate(image0): gt = np.zeros(shape[1:], dtype=np.uint8) start = frame_idx * STRIDE end = (frame_idx + 1) * STRIDE print(frame_idx) if end <= WIDTH: gt[:, start:end, :] = 255 else: start -= WIDTH end -= WIDTH gt[start:end, :, :] = 255 self.assertAllClose(frame, gt) def testValid(self): self._testValid("scan.gif") self._testValid("optimized.gif") def testShape(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): with self.cached_session(): gif = constant_op.constant("nonsense") image = image_ops.decode_gif(gif) self.assertEqual(image.get_shape().as_list(), [None, None, None, 3]) def testAnimatedGif(self): # Test if all frames in the animated GIF file is properly decoded. with self.cached_session(): base = "tensorflow/core/lib/gif/testdata" gif = io_ops.read_file(os.path.join(base, "pendulum_sm.gif")) gt_frame0 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame0.png")) gt_frame1 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame1.png")) gt_frame2 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame2.png")) image = image_ops.decode_gif(gif) frame0 = image_ops.decode_png(gt_frame0) frame1 = image_ops.decode_png(gt_frame1) frame2 = image_ops.decode_png(gt_frame2) image, frame0, frame1, frame2 = self.evaluate([image, frame0, frame1, frame2]) # Compare decoded gif frames with ground-truth data. self.assertAllEqual(image[0], frame0) self.assertAllEqual(image[1], frame1) self.assertAllEqual(image[2], frame2) class ConvertImageTest(test_util.TensorFlowTestCase): def _convert(self, original, original_dtype, output_dtype, expected): x_np = np.array(original, dtype=original_dtype.as_numpy_dtype()) y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype()) with self.cached_session(): image = constant_op.constant(x_np) y = image_ops.convert_image_dtype(image, output_dtype) self.assertTrue(y.dtype == output_dtype) self.assertAllClose(y, y_np, atol=1e-5) if output_dtype in [ dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64 ]: y_saturate = image_ops.convert_image_dtype( image, output_dtype, saturate=True) self.assertTrue(y_saturate.dtype == output_dtype) self.assertAllClose(y_saturate, y_np, atol=1e-5) def testNoConvert(self): # Tests with Tensor.op requires a graph. with ops.Graph().as_default(): # Make sure converting to the same data type creates only an identity op with self.cached_session(): image = constant_op.constant([1], dtype=dtypes.uint8) image_ops.convert_image_dtype(image, dtypes.uint8) y = image_ops.convert_image_dtype(image, dtypes.uint8) self.assertEqual(y.op.type, "Identity") self.assertEqual(y.op.inputs[0], image) def testConvertBetweenInteger(self): # Make sure converting to between integer types scales appropriately with self.cached_session(): self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128]) self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255]) self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1]) self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32]) def testConvertBetweenFloat(self): # Make sure converting to between float types does nothing interesting with self.cached_session(): self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64, [-1.0, 0, 1.0, 200000]) self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32, [-1.0, 0, 1.0, 200000]) def testConvertBetweenIntegerAndFloat(self): # Make sure converting from and to a float type scales appropriately with self.cached_session(): self._convert([0, 1, 255], dtypes.uint8, dtypes.float32, [0, 1.0 / 255.0, 1]) self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8, [0, 1, 255]) def testConvertBetweenInt16AndInt8(self): with self.cached_session(): # uint8, uint16 self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255]) self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256]) # int8, uint16 self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127]) self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256]) # int16, uint16 self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128]) self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256]) class TotalVariationTest(test_util.TensorFlowTestCase): """Tests the function total_variation() in image_ops. We test a few small handmade examples, as well as some larger examples using an equivalent numpy implementation of the total_variation() function. We do NOT test for overflows and invalid / edge-case arguments. """ def _test(self, x_np, y_np): """Test that the TensorFlow implementation of total_variation(x_np) calculates the values in y_np. Note that these may be float-numbers so we only test for approximate equality within some narrow error-bound. """ # Create a TensorFlow session. with self.cached_session(): # Add a constant to the TensorFlow graph that holds the input. x_tf = constant_op.constant(x_np, shape=x_np.shape) # Add ops for calculating the total variation using TensorFlow. y = image_ops.total_variation(images=x_tf) # Run the TensorFlow session to calculate the result. y_tf = self.evaluate(y) # Assert that the results are as expected within # some small error-bound in case they are float-values. self.assertAllClose(y_tf, y_np) def _total_variation_np(self, x_np): """Calculate the total variation of x_np using numpy. This implements the same function as TensorFlow but using numpy instead. Args: x_np: Numpy array with 3 or 4 dimensions. """ dim = len(x_np.shape) if dim == 3: # Calculate differences for neighboring pixel-values using slices. dif1 = x_np[1:, :, :] - x_np[:-1, :, :] dif2 = x_np[:, 1:, :] - x_np[:, :-1, :] # Sum for all axis. sum_axis = None elif dim == 4: # Calculate differences for neighboring pixel-values using slices. dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :] dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :] # Only sum for the last 3 axis. sum_axis = (1, 2, 3) else: # This should not occur in this test-code. pass tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \ np.sum(np.abs(dif2), axis=sum_axis) return tot_var def _test_tensorflow_vs_numpy(self, x_np): """Test the TensorFlow implementation against a numpy implementation. Args: x_np: Numpy array with 3 or 4 dimensions. """ # Calculate the y-values using the numpy implementation. y_np = self._total_variation_np(x_np) self._test(x_np, y_np) def _generateArray(self, shape): """Generate an array of the given shape for use in testing. The numbers are calculated as the cumulative sum, which causes the difference between neighboring numbers to vary.""" # Flattened length of the array. flat_len = np.prod(shape) a = np.array(range(flat_len), dtype=int) a = np.cumsum(a) a = a.reshape(shape) return a # TODO(b/133851381): re-enable this test. def disabledtestTotalVariationNumpy(self): """Test the TensorFlow implementation against a numpy implementation. The two implementations are very similar so it is possible that both have the same bug, which would not be detected by this test. It is therefore necessary to test with manually crafted data as well.""" # Generate a test-array. # This is an 'image' with 100x80 pixels and 3 color channels. a = self._generateArray(shape=(100, 80, 3)) # Test the TensorFlow implementation vs. numpy implementation. # We use a numpy implementation to check the results that are # calculated using TensorFlow are correct. self._test_tensorflow_vs_numpy(a) self._test_tensorflow_vs_numpy(a + 1) self._test_tensorflow_vs_numpy(-a) self._test_tensorflow_vs_numpy(1.1 * a) # Expand to a 4-dim array. b = a[np.newaxis, :] # Combine several variations of the image into a single 4-dim array. multi = np.vstack((b, b + 1, -b, 1.1 * b)) # Test that the TensorFlow function can also handle 4-dim arrays. self._test_tensorflow_vs_numpy(multi) def testTotalVariationHandmade(self): """Test the total variation for a few handmade examples.""" # We create an image that is 2x2 pixels with 3 color channels. # The image is very small so we can check the result by hand. # Red color channel. # The following are the sum of absolute differences between the pixels. # sum row dif = (4-1) + (7-2) = 3 + 5 = 8 # sum col dif = (2-1) + (7-4) = 1 + 3 = 4 r = [[1, 2], [4, 7]] # Blue color channel. # sum row dif = 18 + 29 = 47 # sum col dif = 7 + 18 = 25 g = [[11, 18], [29, 47]] # Green color channel. # sum row dif = 120 + 193 = 313 # sum col dif = 47 + 120 = 167 b = [[73, 120], [193, 313]] # Combine the 3 color channels into a single 3-dim array. # The shape is (2, 2, 3) corresponding to (height, width and color). a = np.dstack((r, g, b)) # Total variation for this image. # Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564 tot_var = 564 # Calculate the total variation using TensorFlow and assert it is correct. self._test(a, tot_var) # If we add 1 to all pixel-values then the total variation is unchanged. self._test(a + 1, tot_var) # If we negate all pixel-values then the total variation is unchanged. self._test(-a, tot_var) # pylint: disable=invalid-unary-operand-type # Scale the pixel-values by a float. This scales the total variation as # well. b = 1.1 * a self._test(b, 1.1 * tot_var) # Scale by another float. c = 1.2 * a self._test(c, 1.2 * tot_var) # Combine these 3 images into a single array of shape (3, 2, 2, 3) # where the first dimension is for the image-number. multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :])) # Check that TensorFlow correctly calculates the total variation # for each image individually and returns the correct array. self._test(multi, tot_var * np.array([1.0, 1.1, 1.2])) class FormatTest(test_util.TensorFlowTestCase): def testFormats(self): prefix = "tensorflow/core/lib" paths = ("png/testdata/lena_gray.png", "jpeg/testdata/jpeg_merge_test1.jpg", "gif/testdata/lena.gif") decoders = { "jpeg": functools.partial(image_ops.decode_jpeg, channels=3), "png": functools.partial(image_ops.decode_png, channels=3), "gif": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0), } with self.cached_session(): for path in paths: contents = self.evaluate(io_ops.read_file(os.path.join(prefix, path))) images = {} for name, decode in decoders.items(): image = self.evaluate(decode(contents)) self.assertEqual(image.ndim, 3) for prev_name, prev in images.items(): print("path %s, names %s %s, shapes %s %s" % (path, name, prev_name, image.shape, prev.shape)) self.assertAllEqual(image, prev) images[name] = image def testError(self): path = "tensorflow/core/lib/gif/testdata/scan.gif" with self.cached_session(): for decode in image_ops.decode_jpeg, image_ops.decode_png: with self.assertRaisesOpError(r"Got 12 frames"): decode(io_ops.read_file(path)).eval() class CombinedNonMaxSuppressionTest(test_util.TensorFlowTestCase): # NOTE(b/142795960): parameterized tests do not work well with tf.tensor # inputs. Due to failures, creating another test `testInvalidTensorInput` # which is identical to this one except that the input here is a scalar as # opposed to a tensor. def testInvalidPyInput(self): boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]] scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]] max_output_size_per_class = 5 max_total_size = 2**31 with self.assertRaisesRegex( (TypeError, ValueError), "type int64 that does not match expected type of int32|" "Tensor conversion requested dtype int32 for Tensor with dtype int64"): image_ops.combined_non_max_suppression( boxes=boxes_np, scores=scores_np, max_output_size_per_class=max_output_size_per_class, max_total_size=max_total_size) # NOTE(b/142795960): parameterized tests do not work well with tf.tensor # inputs. Due to failures, creating another this test which is identical to # `testInvalidPyInput` except that the input is a tensor here as opposed # to a scalar. def testInvalidTensorInput(self): boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]] scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]] max_output_size_per_class = 5 max_total_size = ops.convert_to_tensor(2**31) with self.assertRaisesRegex( (TypeError, ValueError), "type int64 that does not match expected type of int32|" "Tensor conversion requested dtype int32 for Tensor with dtype int64"): image_ops.combined_non_max_suppression( boxes=boxes_np, scores=scores_np, max_output_size_per_class=max_output_size_per_class, max_total_size=max_total_size) class NonMaxSuppressionTest(test_util.TensorFlowTestCase): def testNonMaxSuppression(self): boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3] max_output_size_np = 3 iou_threshold_np = 0.5 with self.cached_session(): boxes = constant_op.constant(boxes_np) scores = constant_op.constant(scores_np) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np) selected_indices = image_ops.non_max_suppression( boxes, scores, max_output_size, iou_threshold) self.assertAllClose(selected_indices, [3, 0, 5]) def testInvalidShape(self): def nms_func(box, score, max_output_size, iou_thres): return image_ops.non_max_suppression(box, score, max_output_size, iou_thres) max_output_size = 3 iou_thres = 0.5 # The boxes should be 2D of shape [num_boxes, 4]. with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0]) scores = constant_op.constant([0.9]) nms_func(boxes, scores, max_output_size, iou_thres) # Dimensions must be 4 (but is 3) with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): boxes = constant_op.constant([[0.0, 0, 1.0]]) scores = constant_op.constant([0.9]) nms_func(boxes, scores, max_output_size, iou_thres) # The boxes is of shape [num_boxes, 4], and the scores is # of shape [num_boxes]. So an error will be thrown bc 1 != 2. with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]) scores = constant_op.constant([0.9]) nms_func(boxes, scores, max_output_size, iou_thres) # The scores should be 1D of shape [num_boxes]. with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]]) scores = constant_op.constant([[0.9]]) nms_func(boxes, scores, max_output_size, iou_thres) # The max output size should be a scalar (0-D). with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]]) scores = constant_op.constant([0.9]) nms_func(boxes, scores, [[max_output_size]], iou_thres) # The iou_threshold should be a scalar (0-D). with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]]) scores = constant_op.constant([0.9]) nms_func(boxes, scores, max_output_size, [[iou_thres]]) @test_util.xla_allow_fallback( "non_max_suppression with dynamic output shape unsupported.") def testTensors(self): with context.eager_mode(): boxes_tensor = constant_op.constant([[6.625, 6.688, 272., 158.5], [6.625, 6.75, 270.5, 158.4], [5.375, 5., 272., 157.5]]) scores_tensor = constant_op.constant([0.84, 0.7944, 0.7715]) max_output_size = 100 iou_threshold = 0.5 score_threshold = 0.3 soft_nms_sigma = 0.25 pad_to_max_output_size = False # gen_image_ops.non_max_suppression_v5. for dtype in [np.float16, np.float32]: boxes = math_ops.cast(boxes_tensor, dtype=dtype) scores = math_ops.cast(scores_tensor, dtype=dtype) _, _, num_selected = gen_image_ops.non_max_suppression_v5( boxes, scores, max_output_size, iou_threshold, score_threshold, soft_nms_sigma, pad_to_max_output_size) self.assertEqual(num_selected.numpy(), 1) @test_util.xla_allow_fallback( "non_max_suppression with dynamic output shape unsupported.") def testDataTypes(self): # Test case for GitHub issue 20199. boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3] max_output_size_np = 3 iou_threshold_np = 0.5 score_threshold_np = float("-inf") # Note: There are multiple versions of non_max_suppression v2, v3, v4. # gen_image_ops.non_max_suppression_v2: for dtype in [np.float16, np.float32]: with self.cached_session(): boxes = constant_op.constant(boxes_np, dtype=dtype) scores = constant_op.constant(scores_np, dtype=dtype) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype) selected_indices = gen_image_ops.non_max_suppression_v2( boxes, scores, max_output_size, iou_threshold) selected_indices = self.evaluate(selected_indices) self.assertAllClose(selected_indices, [3, 0, 5]) # gen_image_ops.non_max_suppression_v3 for dtype in [np.float16, np.float32]: with self.cached_session(): boxes = constant_op.constant(boxes_np, dtype=dtype) scores = constant_op.constant(scores_np, dtype=dtype) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype) score_threshold = constant_op.constant(score_threshold_np, dtype=dtype) selected_indices = gen_image_ops.non_max_suppression_v3( boxes, scores, max_output_size, iou_threshold, score_threshold) selected_indices = self.evaluate(selected_indices) self.assertAllClose(selected_indices, [3, 0, 5]) # gen_image_ops.non_max_suppression_v4. for dtype in [np.float16, np.float32]: with self.cached_session(): boxes = constant_op.constant(boxes_np, dtype=dtype) scores = constant_op.constant(scores_np, dtype=dtype) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype) score_threshold = constant_op.constant(score_threshold_np, dtype=dtype) selected_indices, _ = gen_image_ops.non_max_suppression_v4( boxes, scores, max_output_size, iou_threshold, score_threshold) selected_indices = self.evaluate(selected_indices) self.assertAllClose(selected_indices, [3, 0, 5]) # gen_image_ops.non_max_suppression_v5. soft_nms_sigma_np = float(0.0) for dtype in [np.float16, np.float32]: with self.cached_session(): boxes = constant_op.constant(boxes_np, dtype=dtype) scores = constant_op.constant(scores_np, dtype=dtype) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype) score_threshold = constant_op.constant(score_threshold_np, dtype=dtype) soft_nms_sigma = constant_op.constant(soft_nms_sigma_np, dtype=dtype) selected_indices, _, _ = gen_image_ops.non_max_suppression_v5( boxes, scores, max_output_size, iou_threshold, score_threshold, soft_nms_sigma) selected_indices = self.evaluate(selected_indices) self.assertAllClose(selected_indices, [3, 0, 5]) def testZeroIOUThreshold(self): boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] scores_np = [1., 1., 1., 1., 1., 1.] max_output_size_np = 3 iou_threshold_np = 0.0 with self.cached_session(): boxes = constant_op.constant(boxes_np) scores = constant_op.constant(scores_np) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np) selected_indices = image_ops.non_max_suppression( boxes, scores, max_output_size, iou_threshold) self.assertAllClose(selected_indices, [0, 3, 5]) class NonMaxSuppressionWithScoresTest(test_util.TensorFlowTestCase): @test_util.xla_allow_fallback( "non_max_suppression with dynamic output shape unsupported.") def testSelectFromThreeClustersWithSoftNMS(self): boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3] max_output_size_np = 6 iou_threshold_np = 0.5 score_threshold_np = 0.0 soft_nms_sigma_np = 0.5 boxes = constant_op.constant(boxes_np) scores = constant_op.constant(scores_np) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np) score_threshold = constant_op.constant(score_threshold_np) soft_nms_sigma = constant_op.constant(soft_nms_sigma_np) selected_indices, selected_scores = \ image_ops.non_max_suppression_with_scores( boxes, scores, max_output_size, iou_threshold, score_threshold, soft_nms_sigma) selected_indices, selected_scores = self.evaluate( [selected_indices, selected_scores]) self.assertAllClose(selected_indices, [3, 0, 1, 5, 4, 2]) self.assertAllClose(selected_scores, [0.95, 0.9, 0.384, 0.3, 0.256, 0.197], rtol=1e-2, atol=1e-2) class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase, parameterized.TestCase): @test_util.disable_xla( "b/141236442: " "non_max_suppression with dynamic output shape unsupported.") def testSelectFromThreeClustersV1(self): with ops.Graph().as_default(): boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3] max_output_size_np = 5 iou_threshold_np = 0.5 boxes = constant_op.constant(boxes_np) scores = constant_op.constant(scores_np) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np) selected_indices_padded, num_valid_padded = \ image_ops.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, pad_to_max_output_size=True) selected_indices, num_valid = image_ops.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, pad_to_max_output_size=False) # The output shape of the padded operation must be fully defined. self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True) self.assertEqual(selected_indices.shape.is_fully_defined(), False) with self.cached_session(): self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0]) self.assertEqual(num_valid_padded.eval(), 3) self.assertAllClose(selected_indices, [3, 0, 5]) self.assertEqual(num_valid.eval(), 3) @parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)]) @test_util.disable_xla( "b/141236442: " "non_max_suppression with dynamic output shape unsupported.") def testSelectFromThreeClustersV2(self, run_func_eagerly): if not context.executing_eagerly() and run_func_eagerly: # Skip running tf.function eagerly in V1 mode. self.skipTest("Skip test that runs tf.function eagerly in V1 mode.") else: @def_function.function def func(boxes, scores, max_output_size, iou_threshold): boxes = constant_op.constant(boxes_np) scores = constant_op.constant(scores_np) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np) yp, nvp = image_ops.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, pad_to_max_output_size=True) y, n = image_ops.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, pad_to_max_output_size=False) # The output shape of the padded operation must be fully defined. self.assertEqual(yp.shape.is_fully_defined(), True) self.assertEqual(y.shape.is_fully_defined(), False) return yp, nvp, y, n boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3] max_output_size_np = 5 iou_threshold_np = 0.5 selected_indices_padded, num_valid_padded, selected_indices, num_valid = \ func(boxes_np, scores_np, max_output_size_np, iou_threshold_np) with self.cached_session(): with test_util.run_functions_eagerly(run_func_eagerly): self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0]) self.assertEqual(self.evaluate(num_valid_padded), 3) self.assertAllClose(selected_indices, [3, 0, 5]) self.assertEqual(self.evaluate(num_valid), 3) @test_util.xla_allow_fallback( "non_max_suppression with dynamic output shape unsupported.") def testSelectFromContinuousOverLapV1(self): with ops.Graph().as_default(): boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4], [0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]] scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3] max_output_size_np = 3 iou_threshold_np = 0.5 score_threshold_np = 0.1 boxes = constant_op.constant(boxes_np) scores = constant_op.constant(scores_np) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np) score_threshold = constant_op.constant(score_threshold_np) selected_indices, num_valid = image_ops.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, score_threshold) # The output shape of the padded operation must be fully defined. self.assertEqual(selected_indices.shape.is_fully_defined(), False) with self.cached_session(): self.assertAllClose(selected_indices, [0, 2, 4]) self.assertEqual(num_valid.eval(), 3) @parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)]) @test_util.xla_allow_fallback( "non_max_suppression with dynamic output shape unsupported.") def testSelectFromContinuousOverLapV2(self, run_func_eagerly): if not context.executing_eagerly() and run_func_eagerly: # Skip running tf.function eagerly in V1 mode. self.skipTest("Skip test that runs tf.function eagerly in V1 mode.") else: @def_function.function def func(boxes, scores, max_output_size, iou_threshold, score_threshold): boxes = constant_op.constant(boxes) scores = constant_op.constant(scores) max_output_size = constant_op.constant(max_output_size) iou_threshold = constant_op.constant(iou_threshold) score_threshold = constant_op.constant(score_threshold) y, nv = image_ops.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, score_threshold) # The output shape of the padded operation must be fully defined. self.assertEqual(y.shape.is_fully_defined(), False) return y, nv boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4], [0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]] scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3] max_output_size_np = 3 iou_threshold_np = 0.5 score_threshold_np = 0.1 selected_indices, num_valid = func(boxes_np, scores_np, max_output_size_np, iou_threshold_np, score_threshold_np) with self.cached_session(): with test_util.run_functions_eagerly(run_func_eagerly): self.assertAllClose(selected_indices, [0, 2, 4]) self.assertEqual(self.evaluate(num_valid), 3) def testInvalidDtype(self): boxes_np = [[4.0, 6.0, 3.0, 6.0], [2.0, 1.0, 5.0, 4.0], [9.0, 0.0, 9.0, 9.0]] scores = [5.0, 6.0, 5.0] max_output_size = 2**31 with self.assertRaisesRegex( (TypeError, ValueError), "type int64 that does not match type int32"): boxes = constant_op.constant(boxes_np) image_ops.non_max_suppression_padded(boxes, scores, max_output_size) class NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase): def testSelectOneFromThree(self): overlaps_np = [ [1.0, 0.7, 0.2], [0.7, 1.0, 0.0], [0.2, 0.0, 1.0], ] scores_np = [0.7, 0.9, 0.1] max_output_size_np = 3 overlaps = constant_op.constant(overlaps_np) scores = constant_op.constant(scores_np) max_output_size = constant_op.constant(max_output_size_np) overlap_threshold = 0.6 score_threshold = 0.4 selected_indices = image_ops.non_max_suppression_with_overlaps( overlaps, scores, max_output_size, overlap_threshold, score_threshold) with self.cached_session(): self.assertAllClose(selected_indices, [1]) class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase): """Tests utility function used by ssim() and psnr().""" def testWrongDims(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): img = array_ops.placeholder(dtype=dtypes.float32) img_np = np.array((2, 2)) with self.cached_session() as sess: _, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img) with self.assertRaises(errors.InvalidArgumentError): sess.run(checks, {img: img_np}) def testShapeMismatch(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): img1 = array_ops.placeholder(dtype=dtypes.float32) img2 = array_ops.placeholder(dtype=dtypes.float32) img1_np = np.array([1, 2, 2, 1]) img2_np = np.array([1, 3, 3, 1]) with self.cached_session() as sess: _, _, checks = image_ops_impl._verify_compatible_image_shapes( img1, img2) with self.assertRaises(errors.InvalidArgumentError): sess.run(checks, {img1: img1_np, img2: img2_np}) class PSNRTest(test_util.TensorFlowTestCase): """Tests for PSNR.""" def _LoadTestImage(self, sess, filename): content = io_ops.read_file(os.path.join( "tensorflow/core/lib/psnr/testdata", filename)) im = image_ops.decode_jpeg(content, dct_method="INTEGER_ACCURATE") im = image_ops.convert_image_dtype(im, dtypes.float32) im, = self.evaluate([im]) return np.expand_dims(im, axis=0) def _LoadTestImages(self): with self.cached_session() as sess: q20 = self._LoadTestImage(sess, "cat_q20.jpg") q72 = self._LoadTestImage(sess, "cat_q72.jpg") q95 = self._LoadTestImage(sess, "cat_q95.jpg") return q20, q72, q95 def _PSNR_NumPy(self, orig, target, max_value): """Numpy implementation of PSNR.""" mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1)) return 20 * np.log10(max_value) - 10 * np.log10(mse) def _RandomImage(self, shape, max_val): """Returns an image or image batch with given shape.""" return np.random.rand(*shape).astype(np.float32) * max_val def testPSNRSingleImage(self): image1 = self._RandomImage((8, 8, 1), 1) image2 = self._RandomImage((8, 8, 1), 1) psnr = self._PSNR_NumPy(image1, image2, 1) with self.cached_session(): tf_image1 = constant_op.constant(image1, shape=image1.shape, dtype=dtypes.float32) tf_image2 = constant_op.constant(image2, shape=image2.shape, dtype=dtypes.float32) tf_psnr = self.evaluate(image_ops.psnr(tf_image1, tf_image2, 1.0, "psnr")) self.assertAllClose(psnr, tf_psnr, atol=0.001) def testPSNRMultiImage(self): image1 = self._RandomImage((10, 8, 8, 1), 1) image2 = self._RandomImage((10, 8, 8, 1), 1) psnr = self._PSNR_NumPy(image1, image2, 1) with self.cached_session(): tf_image1 = constant_op.constant(image1, shape=image1.shape, dtype=dtypes.float32) tf_image2 = constant_op.constant(image2, shape=image2.shape, dtype=dtypes.float32) tf_psnr = self.evaluate(image_ops.psnr(tf_image1, tf_image2, 1, "psnr")) self.assertAllClose(psnr, tf_psnr, atol=0.001) def testGoldenPSNR(self): q20, q72, q95 = self._LoadTestImages() # Verify NumPy implementation first. # Golden values are generated using GNU Octave's psnr() function. psnr1 = self._PSNR_NumPy(q20, q72, 1) self.assertNear(30.321, psnr1, 0.001, msg="q20.dtype=" + str(q20.dtype)) psnr2 = self._PSNR_NumPy(q20, q95, 1) self.assertNear(29.994, psnr2, 0.001) psnr3 = self._PSNR_NumPy(q72, q95, 1) self.assertNear(35.302, psnr3, 0.001) # Test TensorFlow implementation. with self.cached_session(): tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32) tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32) tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32) tf_psnr1 = self.evaluate(image_ops.psnr(tf_q20, tf_q72, 1, "psnr1")) tf_psnr2 = self.evaluate(image_ops.psnr(tf_q20, tf_q95, 1, "psnr2")) tf_psnr3 = self.evaluate(image_ops.psnr(tf_q72, tf_q95, 1, "psnr3")) self.assertAllClose(psnr1, tf_psnr1, atol=0.001) self.assertAllClose(psnr2, tf_psnr2, atol=0.001) self.assertAllClose(psnr3, tf_psnr3, atol=0.001) def testInfinity(self): q20, _, _ = self._LoadTestImages() psnr = self._PSNR_NumPy(q20, q20, 1) with self.cached_session(): tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32) tf_psnr = self.evaluate(image_ops.psnr(tf_q20, tf_q20, 1, "psnr")) self.assertAllClose(psnr, tf_psnr, atol=0.001) def testInt(self): img1 = self._RandomImage((10, 8, 8, 1), 255) img2 = self._RandomImage((10, 8, 8, 1), 255) img1 = constant_op.constant(img1, dtypes.uint8) img2 = constant_op.constant(img2, dtypes.uint8) psnr_uint8 = image_ops.psnr(img1, img2, 255) img1 = image_ops.convert_image_dtype(img1, dtypes.float32) img2 = image_ops.convert_image_dtype(img2, dtypes.float32) psnr_float32 = image_ops.psnr(img1, img2, 1.0) with self.cached_session(): self.assertAllClose( self.evaluate(psnr_uint8), self.evaluate(psnr_float32), atol=0.001) class SSIMTest(test_util.TensorFlowTestCase): """Tests for SSIM.""" _filenames = ["checkerboard1.png", "checkerboard2.png", "checkerboard3.png",] _ssim = np.asarray([[1.000000, 0.230880, 0.231153], [0.230880, 1.000000, 0.996828], [0.231153, 0.996828, 1.000000]]) def _LoadTestImage(self, sess, filename): content = io_ops.read_file(os.path.join( "tensorflow/core/lib/ssim/testdata", filename)) im = image_ops.decode_png(content) im = image_ops.convert_image_dtype(im, dtypes.float32) im, = self.evaluate([im]) return np.expand_dims(im, axis=0) def _LoadTestImages(self): with self.cached_session() as sess: return [self._LoadTestImage(sess, f) for f in self._filenames] def _RandomImage(self, shape, max_val): """Returns an image or image batch with given shape.""" return np.random.rand(*shape).astype(np.float32) * max_val def testAgainstMatlab(self): """Tests against values produced by Matlab.""" img = self._LoadTestImages() expected = self._ssim[np.triu_indices(3)] def ssim_func(x): return image_ops.ssim( *x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(): scores = [ self.evaluate(ssim_func(t)) for t in itertools.combinations_with_replacement(img, 2) ] self.assertAllClose(expected, np.squeeze(scores), atol=1e-4) def testBatch(self): img = self._LoadTestImages() expected = self._ssim[np.triu_indices(3, k=1)] img1, img2 = zip(*itertools.combinations(img, 2)) img1 = np.concatenate(img1) img2 = np.concatenate(img2) ssim = image_ops.ssim( constant_op.constant(img1), constant_op.constant(img2), 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(): self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4) def testBatchNumpyInputs(self): img = self._LoadTestImages() expected = self._ssim[np.triu_indices(3, k=1)] img1, img2 = zip(*itertools.combinations(img, 2)) img1 = np.concatenate(img1) img2 = np.concatenate(img2) with self.cached_session(): img1 = self.evaluate(constant_op.constant(img1)) img2 = self.evaluate(constant_op.constant(img2)) ssim = image_ops.ssim( img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(): self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4) def testBroadcast(self): img = self._LoadTestImages()[:2] expected = self._ssim[:2, :2] img = constant_op.constant(np.concatenate(img)) img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2. img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1. ssim = image_ops.ssim( img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(): self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4) def testNegative(self): """Tests against negative SSIM index.""" step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0) img1 = np.tile(step, (16, 1)) img2 = np.fliplr(img1) img1 = img1.reshape((1, 16, 16, 1)) img2 = img2.reshape((1, 16, 16, 1)) ssim = image_ops.ssim( constant_op.constant(img1), constant_op.constant(img2), 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(): self.assertLess(self.evaluate(ssim), 0) def testInt(self): img1 = self._RandomImage((1, 16, 16, 3), 255) img2 = self._RandomImage((1, 16, 16, 3), 255) img1 = constant_op.constant(img1, dtypes.uint8) img2 = constant_op.constant(img2, dtypes.uint8) ssim_uint8 = image_ops.ssim( img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) img1 = image_ops.convert_image_dtype(img1, dtypes.float32) img2 = image_ops.convert_image_dtype(img2, dtypes.float32) ssim_float32 = image_ops.ssim( img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(): self.assertAllClose( self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001) class MultiscaleSSIMTest(test_util.TensorFlowTestCase): """Tests for MS-SSIM.""" _filenames = ["checkerboard1.png", "checkerboard2.png", "checkerboard3.png",] _msssim = np.asarray([[1.000000, 0.091016, 0.091025], [0.091016, 1.000000, 0.999567], [0.091025, 0.999567, 1.000000]]) def _LoadTestImage(self, sess, filename): content = io_ops.read_file(os.path.join( "tensorflow/core/lib/ssim/testdata", filename)) im = image_ops.decode_png(content) im = image_ops.convert_image_dtype(im, dtypes.float32) im, = self.evaluate([im]) return np.expand_dims(im, axis=0) def _LoadTestImages(self): with self.cached_session() as sess: return [self._LoadTestImage(sess, f) for f in self._filenames] def _RandomImage(self, shape, max_val): """Returns an image or image batch with given shape.""" return np.random.rand(*shape).astype(np.float32) * max_val def testAgainstMatlab(self): """Tests against MS-SSIM computed with Matlab implementation. For color images, MS-SSIM scores are averaged over color channels. """ img = self._LoadTestImages() expected = self._msssim[np.triu_indices(3)] def ssim_func(x): return image_ops.ssim_multiscale( *x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(): scores = [ self.evaluate(ssim_func(t)) for t in itertools.combinations_with_replacement(img, 2) ] self.assertAllClose(expected, np.squeeze(scores), atol=1e-4) def testUnweightedIsDifferentiable(self): img = self._LoadTestImages() @def_function.function def msssim_func(x1, x2, scalar): return image_ops.ssim_multiscale( x1 * scalar, x2 * scalar, max_val=1.0, power_factors=(1, 1, 1, 1, 1), filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) scalar = constant_op.constant(1.0, dtype=dtypes.float32) with backprop.GradientTape() as tape: tape.watch(scalar) y = msssim_func(img[0], img[1], scalar) grad = tape.gradient(y, scalar) np_grads = self.evaluate(grad) self.assertTrue(np.isfinite(np_grads).all()) def testUnweightedIsDifferentiableEager(self): if not context.executing_eagerly(): self.skipTest("Eager mode only") img = self._LoadTestImages() def msssim_func(x1, x2, scalar): return image_ops.ssim_multiscale( x1 * scalar, x2 * scalar, max_val=1.0, power_factors=(1, 1, 1, 1, 1), filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) scalar = constant_op.constant(1.0, dtype=dtypes.float32) with backprop.GradientTape() as tape: tape.watch(scalar) y = msssim_func(img[0], img[1], scalar) grad = tape.gradient(y, scalar) np_grads = self.evaluate(grad) self.assertTrue(np.isfinite(np_grads).all()) def testBatch(self): """Tests MS-SSIM computed in batch.""" img = self._LoadTestImages() expected = self._msssim[np.triu_indices(3, k=1)] img1, img2 = zip(*itertools.combinations(img, 2)) img1 = np.concatenate(img1) img2 = np.concatenate(img2) msssim = image_ops.ssim_multiscale( constant_op.constant(img1), constant_op.constant(img2), 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(): self.assertAllClose(expected, self.evaluate(msssim), 1e-4) def testBroadcast(self): """Tests MS-SSIM broadcasting.""" img = self._LoadTestImages()[:2] expected = self._msssim[:2, :2] img = constant_op.constant(np.concatenate(img)) img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2. img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1. score_tensor = image_ops.ssim_multiscale( img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(): self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4) def testRange(self): """Tests against low MS-SSIM score. MS-SSIM is a geometric mean of SSIM and CS scores of various scales. If any of the value is negative so that the geometric mean is not well-defined, then treat the MS-SSIM score as zero. """ with self.cached_session() as sess: img1 = self._LoadTestImage(sess, "checkerboard1.png") img2 = self._LoadTestImage(sess, "checkerboard3.png") images = [img1, img2, np.zeros_like(img1), np.full_like(img1, fill_value=255)] images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images] msssim_ops = [ image_ops.ssim_multiscale( x, y, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) for x, y in itertools.combinations(images, 2) ] msssim = self.evaluate(msssim_ops) msssim = np.squeeze(msssim) self.assertTrue(np.all(msssim >= 0.0)) self.assertTrue(np.all(msssim <= 1.0)) def testInt(self): img1 = self._RandomImage((1, 180, 240, 3), 255) img2 = self._RandomImage((1, 180, 240, 3), 255) img1 = constant_op.constant(img1, dtypes.uint8) img2 = constant_op.constant(img2, dtypes.uint8) ssim_uint8 = image_ops.ssim_multiscale( img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) img1 = image_ops.convert_image_dtype(img1, dtypes.float32) img2 = image_ops.convert_image_dtype(img2, dtypes.float32) ssim_float32 = image_ops.ssim_multiscale( img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(): self.assertAllClose( self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001) def testNumpyInput(self): """Test case for GitHub issue 28241.""" image = np.random.random([512, 512, 1]) score_tensor = image_ops.ssim_multiscale(image, image, max_val=1.0) with self.cached_session(): _ = self.evaluate(score_tensor) class ImageGradientsTest(test_util.TensorFlowTestCase): def testImageGradients(self): shape = [1, 2, 4, 1] img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]]) img = array_ops.reshape(img, shape) expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape) expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape) dy, dx = image_ops.image_gradients(img) with self.cached_session(): actual_dy = self.evaluate(dy) actual_dx = self.evaluate(dx) self.assertAllClose(expected_dy, actual_dy) self.assertAllClose(expected_dx, actual_dx) def testImageGradientsMultiChannelBatch(self): batch = [[[[1, 2], [2, 5], [3, 3]], [[8, 4], [5, 1], [9, 8]]], [[[5, 3], [7, 9], [1, 6]], [[1, 2], [6, 3], [6, 3]]]] expected_dy = [[[[7, 2], [3, -4], [6, 5]], [[0, 0], [0, 0], [0, 0]]], [[[-4, -1], [-1, -6], [5, -3]], [[0, 0], [0, 0], [0, 0]]]] expected_dx = [[[[1, 3], [1, -2], [0, 0]], [[-3, -3], [4, 7], [0, 0]]], [[[2, 6], [-6, -3], [0, 0]], [[5, 1], [0, 0], [0, 0]]]] batch = constant_op.constant(batch) assert batch.get_shape().as_list() == [2, 2, 3, 2] dy, dx = image_ops.image_gradients(batch) with self.cached_session(): actual_dy = self.evaluate(dy) actual_dx = self.evaluate(dx) self.assertAllClose(expected_dy, actual_dy) self.assertAllClose(expected_dx, actual_dx) def testImageGradientsBadShape(self): # [2 x 4] image but missing batch and depth dimensions. img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]]) with self.assertRaises(ValueError): image_ops.image_gradients(img) class SobelEdgesTest(test_util.TensorFlowTestCase): def disabled_testSobelEdges1x2x3x1(self): img = constant_op.constant([[1, 3, 6], [4, 1, 5]], dtype=dtypes.float32, shape=[1, 2, 3, 1]) expected = np.reshape([[[0, 0], [0, 12], [0, 0]], [[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2]) sobel = image_ops.sobel_edges(img) with self.cached_session(): actual_sobel = self.evaluate(sobel) self.assertAllClose(expected, actual_sobel) def testSobelEdges5x3x4x2(self): batch_size = 5 plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]], [1, 3, 4, 1]) two_channel = np.concatenate([plane, plane], axis=3) batch = np.concatenate([two_channel] * batch_size, axis=0) img = constant_op.constant(batch, dtype=dtypes.float32, shape=[batch_size, 3, 4, 2]) expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]], [[6, 0], [0, 6], [-6, 10], [-6, 0]], [[0, 0], [0, 0], [0, 10], [0, 0]]], [1, 3, 4, 1, 2]) expected_two_channel = np.concatenate( [expected_plane, expected_plane], axis=3) expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0) sobel = image_ops.sobel_edges(img) with self.cached_session(): actual_sobel = self.evaluate(sobel) self.assertAllClose(expected_batch, actual_sobel) @test_util.run_all_in_graph_and_eager_modes class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase): _FORWARD_COMPATIBILITY_HORIZONS = [ (2020, 1, 1), (2020, 7, 14), (2525, 1, 1), # future behavior ] def testBmpChannels(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): with test_util.use_gpu(): base = "tensorflow/core/lib/bmp/testdata" # `rgba_transparent.bmp` has 4 channels with transparent pixels. # Test consistency between `decode_image` and `decode_bmp` functions. bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp")) image0 = image_ops.decode_image(bmp0, channels=4) image1 = image_ops.decode_bmp(bmp0, channels=4) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) # Test that 3 channels is returned with user request of `channels=3` # even though image has 4 channels. # Note that this operation simply drops 4th channel information. This # is the same behavior as `decode_png`. # e.g. pixel values [25, 25, 25, 100] becomes [25, 25, 25]. bmp1 = io_ops.read_file(os.path.join(base, "rgb_small.bmp")) image2 = image_ops.decode_bmp(bmp0, channels=3) image3 = image_ops.decode_bmp(bmp1) image2, image3 = self.evaluate([image2, image3]) self.assertAllEqual(image2, image3) # Test that 4 channels is returned with user request of `channels=4` # even though image has 3 channels. Alpha channel should be set to # UINT8_MAX. bmp3 = io_ops.read_file(os.path.join(base, "rgb_small_255.bmp")) bmp4 = io_ops.read_file(os.path.join(base, "rgba_small_255.bmp")) image4 = image_ops.decode_bmp(bmp3, channels=4) image5 = image_ops.decode_bmp(bmp4) image4, image5 = self.evaluate([image4, image5]) self.assertAllEqual(image4, image5) # Test that 3 channels is returned with user request of `channels=3` # even though image has 1 channel (grayscale). bmp6 = io_ops.read_file(os.path.join(base, "grayscale_small.bmp")) bmp7 = io_ops.read_file( os.path.join(base, "grayscale_small_3channels.bmp")) image6 = image_ops.decode_bmp(bmp6, channels=3) image7 = image_ops.decode_bmp(bmp7) image6, image7 = self.evaluate([image6, image7]) self.assertAllEqual(image6, image7) # Test that 4 channels is returned with user request of `channels=4` # even though image has 1 channel (grayscale). Alpha channel should be # set to UINT8_MAX. bmp9 = io_ops.read_file( os.path.join(base, "grayscale_small_4channels.bmp")) image8 = image_ops.decode_bmp(bmp6, channels=4) image9 = image_ops.decode_bmp(bmp9) image8, image9 = self.evaluate([image8, image9]) self.assertAllEqual(image8, image9) def testJpegUint16(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): with self.cached_session(): base = "tensorflow/core/lib/jpeg/testdata" jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg")) image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16) image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0), dtypes.uint16) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testPngUint16(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): with self.cached_session(): base = "tensorflow/core/lib/png/testdata" png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png")) image0 = image_ops.decode_image(png0, dtype=dtypes.uint16) image1 = image_ops.convert_image_dtype( image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) # NumPy conversions should happen before x = np.random.randint(256, size=(4, 4, 3), dtype=np.uint16) x_str = image_ops_impl.encode_png(x) x_dec = image_ops_impl.decode_image( x_str, channels=3, dtype=dtypes.uint16) self.assertAllEqual(x, x_dec) def testGifUint16(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): with self.cached_session(): base = "tensorflow/core/lib/gif/testdata" gif0 = io_ops.read_file(os.path.join(base, "scan.gif")) image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16) image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0), dtypes.uint16) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testBmpUint16(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): with self.cached_session(): base = "tensorflow/core/lib/bmp/testdata" bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp")) image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16) image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0), dtypes.uint16) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testJpegFloat32(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): with self.cached_session(): base = "tensorflow/core/lib/jpeg/testdata" jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg")) image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32) image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0), dtypes.float32) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testPngFloat32(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): with self.cached_session(): base = "tensorflow/core/lib/png/testdata" png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png")) image0 = image_ops.decode_image(png0, dtype=dtypes.float32) image1 = image_ops.convert_image_dtype( image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testGifFloat32(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): with self.cached_session(): base = "tensorflow/core/lib/gif/testdata" gif0 = io_ops.read_file(os.path.join(base, "scan.gif")) image0 = image_ops.decode_image(gif0, dtype=dtypes.float32) image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0), dtypes.float32) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testBmpFloat32(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): with self.cached_session(): base = "tensorflow/core/lib/bmp/testdata" bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp")) image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32) image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0), dtypes.float32) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testExpandAnimations(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): with self.cached_session(): base = "tensorflow/core/lib/gif/testdata" gif0 = io_ops.read_file(os.path.join(base, "scan.gif")) # Test `expand_animations=False` case. image0 = image_ops.decode_image( gif0, dtype=dtypes.float32, expand_animations=False) # image_ops.decode_png() handles GIFs and returns 3D tensors animation = image_ops.decode_gif(gif0) first_frame = array_ops.gather(animation, 0) image1 = image_ops.convert_image_dtype(first_frame, dtypes.float32) image0, image1 = self.evaluate([image0, image1]) self.assertLen(image0.shape, 3) self.assertAllEqual(list(image0.shape), [40, 20, 3]) self.assertAllEqual(image0, image1) # Test `expand_animations=True` case. image2 = image_ops.decode_image(gif0, dtype=dtypes.float32) image3 = image_ops.convert_image_dtype(animation, dtypes.float32) image2, image3 = self.evaluate([image2, image3]) self.assertLen(image2.shape, 4) self.assertAllEqual(list(image2.shape), [12, 40, 20, 3]) self.assertAllEqual(image2, image3) def testImageCropAndResize(self): if test_util.is_gpu_available(): op = image_ops_impl.crop_and_resize_v2( image=array_ops.zeros((2, 1, 1, 1)), boxes=[[1.0e+40, 0, 0, 0]], box_indices=[1], crop_size=[1, 1]) self.evaluate(op) else: message = "Boxes contains at least one element that is not finite" with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError), message): op = image_ops_impl.crop_and_resize_v2( image=array_ops.zeros((2, 1, 1, 1)), boxes=[[1.0e+40, 0, 0, 0]], box_indices=[1], crop_size=[1, 1]) self.evaluate(op) def testImageCropAndResizeWithInvalidInput(self): with self.session(): with self.assertRaises((errors.InvalidArgumentError, ValueError)): op = image_ops_impl.crop_and_resize_v2( image=np.ones((1, 1, 1, 1)), boxes=np.ones((11, 4)), box_indices=np.ones((11)), crop_size=[2065374891, 1145309325]) self.evaluate(op) @parameterized.named_parameters( ("_jpeg", "JPEG", "jpeg_merge_test1.jpg"), ("_png", "PNG", "lena_rgba.png"), ("_gif", "GIF", "scan.gif"), ) def testWrongOpBmp(self, img_format, filename): base_folder = "tensorflow/core/lib" base_path = os.path.join(base_folder, img_format.lower(), "testdata") err_msg = "Trying to decode " + img_format + " format using DecodeBmp op" with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), err_msg): img_bytes = io_ops.read_file(os.path.join(base_path, filename)) img = image_ops.decode_bmp(img_bytes) self.evaluate(img) @parameterized.named_parameters( ("_jpeg", image_ops.decode_jpeg, "DecodeJpeg"), ("_png", image_ops.decode_png, "DecodePng"), ("_gif", image_ops.decode_gif, "DecodeGif"), ) def testWrongOp(self, decode_op, op_used): base = "tensorflow/core/lib/bmp/testdata" bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp")) err_msg = ("Trying to decode BMP format using a wrong op. Use `decode_bmp` " "or `decode_image` instead. Op used: ") + op_used with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), err_msg): img = decode_op(bmp0) self.evaluate(img) @parameterized.named_parameters( ("_png", "PNG", "lena_rgba.png"), ("_gif", "GIF", "scan.gif"), ("_bmp", "BMP", "rgba_small.bmp"), ) def testWrongOpJpeg(self, img_format, filename): base_folder = "tensorflow/core/lib" base_path = os.path.join(base_folder, img_format.lower(), "testdata") err_msg = ("DecodeAndCropJpeg operation can run on JPEG only, but " "detected ") + img_format with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), err_msg): img_bytes = io_ops.read_file(os.path.join(base_path, filename)) img = image_ops.decode_and_crop_jpeg(img_bytes, [1, 1, 2, 2]) self.evaluate(img) def testGifFramesWithDiffSize(self): """Test decoding an animated GIF. This test verifies that `decode_image` op can decode animated GIFs whose first frame does not fill the canvas. The unoccupied areas should be filled with zeros (black). `squares.gif` is animated with two images of different sizes. It alternates between a smaller image of size 10 x 10 and a larger image of size 16 x 16. Because it starts animating with the smaller image, the first frame does not fill the canvas. (Canvas size is equal to max frame width x max frame height.) `red_black.gif` has just a single image in a GIF format. It is the same image as the smaller image (size 10 x 10) of the two images in `squares.gif`. The only difference is that its background (canvas - smaller image) is pre-filled with zeros (black); it is the groundtruth. """ base = "tensorflow/core/lib/gif/testdata" gif_bytes0 = io_ops.read_file(os.path.join(base, "squares.gif")) image0 = image_ops.decode_image(gif_bytes0, dtype=dtypes.float32, expand_animations=False) gif_bytes1 = io_ops.read_file(os.path.join(base, "red_black.gif")) image1 = image_ops.decode_image(gif_bytes1, dtype=dtypes.float32) image1_0 = array_ops.gather(image1, 0) image0, image1_0 = self.evaluate([image0, image1_0]) self.assertAllEqual(image0, image1_0) if __name__ == "__main__": googletest.main()
tensorflow/tensorflow
tensorflow/python/ops/image_ops_test.py
Python
apache-2.0
250,893
"""Entry point for launching an IPython kernel. This is separate from the ipykernel package so we can avoid doing imports until after removing the cwd from sys.path. """ import sys if __name__ == '__main__': # Remove the CWD from sys.path while we load stuff. # This is added back by InteractiveShellApp.init_path() if sys.path[0] == '': del sys.path[0] from ipykernel import kernelapp as app app.launch_new_instance()
unnikrishnankgs/va
venv/lib/python3.5/site-packages/ipykernel_launcher.py
Python
bsd-2-clause
451
import datetime from decimal import Decimal as D from django.test import TestCase from django.core import exceptions from django.utils.timezone import utc from django_dynamic_fixture import G from oscar.apps.voucher.models import Voucher from oscar.apps.order.models import Order from oscar.core.compat import get_user_model START_DATETIME = datetime.datetime(2011, 1, 1).replace(tzinfo=utc) END_DATETIME = datetime.datetime(2012, 1, 1).replace(tzinfo=utc) User = get_user_model() class TestSavingAVoucher(TestCase): def test_saves_code_as_uppercase(self): voucher = Voucher(code='lower', start_datetime=START_DATETIME, end_datetime=END_DATETIME) voucher.save() self.assertEqual('LOWER', voucher.code) def test_verifies_dates_are_sensible(self): with self.assertRaises(exceptions.ValidationError): voucher = Voucher.objects.create( code='lower', start_datetime=END_DATETIME, end_datetime=START_DATETIME) voucher.clean() class TestAVoucher(TestCase): def setUp(self): self.voucher = Voucher(start_datetime=START_DATETIME, end_datetime=END_DATETIME) def test_is_active_between_start_and_end_dates(self): test = datetime.datetime(2011, 6, 10).replace(tzinfo=utc) self.assertTrue(self.voucher.is_active(test)) def test_is_active_on_end_date(self): self.assertTrue(self.voucher.is_active(END_DATETIME)) def test_is_active_on_start_date(self): self.assertTrue(self.voucher.is_active(START_DATETIME)) def test_is_inactive_outside_of_start_and_end_dates(self): test = datetime.datetime(2012, 3, 10).replace(tzinfo=utc) self.assertFalse(self.voucher.is_active(test)) def test_increments_total_discount_when_recording_usage(self): voucher = G(Voucher) voucher.record_discount({'discount': D('10.00')}) self.assertEqual(voucher.total_discount, D('10.00')) voucher.record_discount({'discount': D('10.00')}) self.assertEqual(voucher.total_discount, D('20.00')) class TestMultiuseVoucher(TestCase): def setUp(self): self.voucher = G(Voucher, usage=Voucher.MULTI_USE) def test_is_available_to_same_user_multiple_times(self): user, order = G(User), G(Order) for i in range(10): self.voucher.record_usage(order, user) is_voucher_available_to_user, __ = self.voucher.is_available_to_user(user=user) self.assertTrue(is_voucher_available_to_user) class TestOncePerCustomerVoucher(TestCase): def setUp(self): self.voucher = G(Voucher, usage=Voucher.ONCE_PER_CUSTOMER) def test_is_available_to_a_user_once(self): user, order = G(User), G(Order) is_voucher_available_to_user, __ = self.voucher.is_available_to_user(user=user) self.assertTrue(is_voucher_available_to_user) self.voucher.record_usage(order, user) is_voucher_available_to_user, __ = self.voucher.is_available_to_user(user=user) self.assertFalse(is_voucher_available_to_user) def test_is_available_to_different_users(self): users, order = [G(User), G(User)], G(Order) for user in users: is_voucher_available_to_user, __ = self.voucher.is_available_to_user(user=user) self.assertTrue(is_voucher_available_to_user) self.voucher.record_usage(order, user) is_voucher_available_to_user, __ = self.voucher.is_available_to_user(user=user) self.assertFalse(is_voucher_available_to_user)
jinnykoo/wuyisj.com
tests/unit/voucher/model_tests.py
Python
bsd-3-clause
3,630
import pyvision as pv import scipy as sp if __name__ == '__main__': im = pv.Image(sp.zeros((128,128))) pts = [pv.Point(48,55),pv.Point(80,55)] im.annotatePoints(pts) elipse = pv.CenteredRect(64,64,96,96) im.annotateEllipse(elipse) im.annotateLabel(pv.Point(40,36),"MMM") im.annotateLabel(pv.Point(72,36),"MMM") im.annotateLabel(pv.Point(58,64),"db") im.annotatePolygon([pv.Point(48,90), pv.Point(80,90),pv.Point(64,100)]) im.show(delay=0)
svohara/pyvision
samples/WACV2012_Tutorial/tutorials/TutorialAnnotations.py
Python
bsd-3-clause
508
# encoding: utf-8 """ med.py Created by Thomas Mangin on 2009-11-05. Copyright (c) 2009-2015 Exa Networks. All rights reserved. """ from struct import pack from struct import unpack from exabgp.bgp.message.update.attribute.attribute import Attribute # ====================================================================== MED (4) # @Attribute.register() class MED (Attribute): ID = Attribute.CODE.MED FLAG = Attribute.Flag.OPTIONAL CACHING = True __slots__ = ['med','_packed'] def __init__ (self, med, packed=None): self.med = med self._packed = self._attribute(packed if packed is not None else pack('!L',med)) def __eq__ (self, other): return \ self.ID == other.ID and \ self.FLAG == other.FLAG and \ self.med == other.med def __ne__ (self, other): return not self.__eq__(other) def pack (self, negotiated=None): return self._packed def __len__ (self): return 4 def __repr__ (self): return str(self.med) def __hash__ (self): return hash(self.med) @classmethod def unpack (cls, data, negotiated): return cls(unpack('!L',data)[0])
earies/exabgp
lib/exabgp/bgp/message/update/attribute/med.py
Python
bsd-3-clause
1,087
#!/usr/bin/env python __authors__ = "Ian Goodfellow" __copyright__ = "Copyright 2012, Universite de Montreal" __credits__ = ["Ian Goodfellow"] __license__ = "3-clause BSD" __maintainer__ = "LISA Lab" """ Usage: python show_samples <path_to_a_saved_DBM.pkl> Displays a batch of data from the DBM's training set. Then interactively allows the user to run Gibbs steps starting from that seed data to see how the DBM's MCMC sampling changes the data. """ from pylearn2.utils import serial import sys from pylearn2.config import yaml_parse from pylearn2.gui.patch_viewer import PatchViewer import time from theano import function from theano.sandbox.rng_mrg import MRG_RandomStreams import numpy as np from pylearn2.expr.basic import is_binary rows = 10 cols = 10 m = rows * cols _, model_path = sys.argv print 'Loading model...' model = serial.load(model_path) model.set_batch_size(m) dataset_yaml_src = model.dataset_yaml_src print 'Loading data (used for setting up visualization and seeding gibbs chain) ...' dataset = yaml_parse.load(dataset_yaml_src) vis_batch = dataset.get_batch_topo(m) _, patch_rows, patch_cols, channels = vis_batch.shape assert _ == m mapback = hasattr(dataset, 'mapback_for_viewer') pv = PatchViewer((rows,cols*(1+mapback)), (patch_rows,patch_cols), is_color = (channels==3)) def show(): display_batch = dataset.adjust_for_viewer(vis_batch) if display_batch.ndim == 2: display_batch = dataset.get_topological_view(display_batch) if mapback: design_vis_batch = vis_batch if design_vis_batch.ndim != 2: design_vis_batch = dataset.get_design_matrix(design_vis_batch) mapped_batch_design = dataset.mapback_for_viewer(design_vis_batch) mapped_batch = dataset.get_topological_view(mapped_batch_design) for i in xrange(rows): row_start = cols * i for j in xrange(cols): pv.add_patch(display_batch[row_start+j,:,:,:], rescale = False) if mapback: pv.add_patch(mapped_batch[row_start+j,:,:,:], rescale = False) pv.show() if hasattr(model.visible_layer, 'beta'): beta = model.visible_layer.beta.get_value() #model.visible_layer.beta.set_value(beta * 100.) print 'beta: ',(beta.min(), beta.mean(), beta.max()) print 'showing seed data...' show() print 'How many Gibbs steps should I run with the seed data clamped? (negative = ignore seed data) ' x = int(input()) # Make shared variables representing the sampling state of the model layer_to_state = model.make_layer_to_state(m) # Seed the sampling with the data batch vis_sample = layer_to_state[model.visible_layer] def validate_all_samples(): # Run some checks on the samples, this should help catch any bugs layers = [ model.visible_layer ] + model.hidden_layers def check_batch_size(l): if isinstance(l, (list, tuple)): map(check_batch_size, l) else: assert l.get_value().shape[0] == m for layer in layers: state = layer_to_state[layer] space = layer.get_total_state_space() space.validate(state) if 'DenseMaxPool' in str(type(layer)): p, h = state p = p.get_value() h = h.get_value() assert np.all(p == h) assert is_binary(p) if 'BinaryVisLayer' in str(type(layer)): v = state.get_value() assert is_binary(v) if 'Softmax' in str(type(layer)): y = state.get_value() assert is_binary(y) s = y.sum(axis=1) assert np.all(s == 1 ) if 'Ising' in str(type(layer)): s = state.get_value() assert is_binary((s + 1.) / 2.) validate_all_samples() if x >= 0: if vis_sample.ndim == 4: vis_sample.set_value(vis_batch) else: vis_sample.set_value(dataset.get_design_matrix(vis_batch)) validate_all_samples() theano_rng = MRG_RandomStreams(2012+9+18) if x > 0: sampling_updates = model.get_sampling_updates(layer_to_state, theano_rng, layer_to_clamp = { model.visible_layer : True }, num_steps = x) t1 = time.time() sample_func = function([], updates=sampling_updates) t2 = time.time() print 'Clamped sampling function compilation took',t2-t1 sample_func() # Now compile the full sampling update sampling_updates = model.get_sampling_updates(layer_to_state, theano_rng) assert layer_to_state[model.visible_layer] in sampling_updates t1 = time.time() sample_func = function([], updates=sampling_updates) t2 = time.time() print 'Sampling function compilation took',t2-t1 while True: print 'Displaying samples. How many steps to take next? (q to quit, ENTER=1)' while True: x = raw_input() if x == 'q': quit() if x == '': x = 1 break else: try: x = int(x) break except: print 'Invalid input, try again' for i in xrange(x): print i sample_func() validate_all_samples() vis_batch = vis_sample.get_value() show() if 'Softmax' in str(type(model.hidden_layers[-1])): state = layer_to_state[model.hidden_layers[-1]] value = state.get_value() y = np.argmax(value, axis=1) assert y.ndim == 1 for i in xrange(0, y.shape[0], cols): print y[i:i+cols]
skearnes/pylearn2
pylearn2/scripts/dbm/show_samples.py
Python
bsd-3-clause
5,434
# Django settings for protected_downloads project. import os.path DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@domain.com'), ) MANAGERS = ADMINS PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(PROJECT_ROOT, 'download.db'), } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'n309^dwk=@+g72ko--8vjyz&1v0u%xf#*0=wzr=2n#f3hb0a=l' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', 'django.template.loaders.app_directories.load_template_source', # 'django.template.loaders.eggs.load_template_source', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) ROOT_URLCONF = 'protected_downloads.urls' TEMPLATE_DIRS = ( os.path.join(PROJECT_ROOT, 'templates'), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.admin', 'download', 'sendfile', ) # SENDFILE settings SENDFILE_BACKEND = 'sendfile.backends.development' #SENDFILE_BACKEND = 'sendfile.backends.xsendfile' #SENDFILE_BACKEND = 'sendfile.backends.nginx' SENDFILE_ROOT = os.path.join(PROJECT_ROOT, 'protected') SENDFILE_URL = '/protected'
nathanielvarona/django-sendfile
examples/protected_downloads/settings.py
Python
bsd-3-clause
2,706
# -*- coding: utf-8 -*- """ sphinx.websupport.search.xapiansearch ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Xapian search adapter. :copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import xapian from sphinx.util.osutil import ensuredir from sphinx.websupport.search import BaseSearch class XapianSearch(BaseSearch): # Adapted from the GSOC 2009 webapp project. # Xapian metadata constants DOC_PATH = 0 DOC_TITLE = 1 def __init__(self, db_path): self.db_path = db_path def init_indexing(self, changed=[]): ensuredir(self.db_path) self.database = xapian.WritableDatabase(self.db_path, xapian.DB_CREATE_OR_OPEN) self.indexer = xapian.TermGenerator() stemmer = xapian.Stem("english") self.indexer.set_stemmer(stemmer) def finish_indexing(self): # Ensure the db lock is removed. del self.database def add_document(self, path, title, text): self.database.begin_transaction() # sphinx_page_path is used to easily retrieve documents by path. sphinx_page_path = '"sphinxpagepath%s"' % path.replace('/', '_') # Delete the old document if it exists. self.database.delete_document(sphinx_page_path) doc = xapian.Document() doc.set_data(text) doc.add_value(self.DOC_PATH, path) doc.add_value(self.DOC_TITLE, title) self.indexer.set_document(doc) self.indexer.index_text(text) doc.add_term(sphinx_page_path) for word in text.split(): doc.add_posting(word, 1) self.database.add_document(doc) self.database.commit_transaction() def handle_query(self, q): database = xapian.Database(self.db_path) enquire = xapian.Enquire(database) qp = xapian.QueryParser() stemmer = xapian.Stem("english") qp.set_stemmer(stemmer) qp.set_database(database) qp.set_stemming_strategy(xapian.QueryParser.STEM_SOME) query = qp.parse_query(q) # Find the top 100 results for the query. enquire.set_query(query) matches = enquire.get_mset(0, 100) results = [] for m in matches: context = self.extract_context(m.document.get_data()) results.append((m.document.get_value(self.DOC_PATH), m.document.get_value(self.DOC_TITLE), ''.join(context))) return results
WhySoGeeky/DroidPot
venv/lib/python2.7/site-packages/sphinx/websupport/search/xapiansearch.py
Python
mit
2,581
#!/usr/bin/python # Author: Chris Liechti # Contact: cliechti@gmx.net # Revision: $Revision: 4156 $ # Date: $Date: 2005-12-08 05:43:13 +0100 (Thu, 08 Dec 2005) $ # Copyright: This module has been placed in the public domain. """ A minimal front end to the Docutils Publisher, producing HTML slides using the S5 template system. """ try: import locale locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline, default_description description = ('Generates S5 (X)HTML slideshow documents from standalone ' 'reStructuredText sources. ' + default_description) publish_cmdline(writer_name='s5', description=description)
alon/polinax
libs/external_libs/docutils-0.4/tools/rst2s5.py
Python
gpl-2.0
684
# -*- coding: utf-8 -*- # Copyright 2008 Jaap Karssenberg <jaap.karssenberg@gmail.com> '''Test cases for the zim.notebook module.''' import tests import os from zim.fs import File, Dir from zim.config import ConfigManager, XDG_CONFIG_HOME from zim.notebook import * from zim.index import * import zim.errors from zim.formats import ParseTree class TestNotebookInfo(tests.TestCase): def runTest(self): for location, uri in ( (File('file:///foo/bar'), 'file:///foo/bar'), ('file:///foo/bar', 'file:///foo/bar'), ('zim+file:///foo?bar', 'zim+file:///foo?bar'), # specifically ensure the "?" does not get url encoded ): info = NotebookInfo(location) self.assertEqual(info.uri, uri) @tests.slowTest class TestNotebookInfoList(tests.TestCase): def setUp(self): config = ConfigManager() list = config.get_config_file('notebooks.list') file = list.file if file.exists(): file.remove() def runTest(self): root = Dir(self.create_tmp_dir(u'some_utf8_here_\u0421\u0430\u0439')) # Start empty - see this is no issue list = get_notebook_list() self.assertTrue(isinstance(list, NotebookInfoList)) self.assertTrue(len(list) == 0) info = list.get_by_name('foo') self.assertIsNone(info) # Now create it dir = root.subdir('/notebook') init_notebook(dir, name='foo') # And put it in the list and resolve it by name list = get_notebook_list() list.append(NotebookInfo(dir.uri, name='foo')) list.write() self.assertTrue(len(list) == 1) self.assertTrue(isinstance(list[0], NotebookInfo)) info = list.get_by_name('foo') self.assertEqual(info.uri, dir.uri) self.assertEqual(info.name, 'foo') newlist = get_notebook_list() # just to be sure re-laoding works.. self.assertTrue(len(list) == 1) info = newlist.get_by_name('foo') self.assertEqual(info.uri, dir.uri) self.assertEqual(info.name, 'foo') # Add a second entry if os.name == 'nt': uri1 = 'file:///C:/foo/bar' else: uri1 = 'file:///foo/bar' list = get_notebook_list() self.assertTrue(len(list) == 1) list.append(NotebookInfo(uri1, interwiki='foobar')) # on purpose do not set name, should default to basename list.write() self.assertTrue(len(list) == 2) self.assertEqual(list[:], [NotebookInfo(dir.uri), NotebookInfo(uri1)]) # And check all works OK info = list.get_by_name('foo') self.assertEqual(info.uri, dir.uri) nb, path = build_notebook(info) self.assertIsInstance(nb, Notebook) self.assertIsNone(path) for name in ('bar', 'Bar'): info = list.get_by_name(name) self.assertEqual(info.uri, uri1) self.assertRaises(FileNotFoundError, build_notebook, info) # path should not exist # Test default list.set_default(uri1) list.write() list = get_notebook_list() self.assertIsNotNone(list.default) self.assertEqual(list.default.uri, uri1) # Check interwiki parsing - included here since it interacts with the notebook list self.assertEqual(interwiki_link('wp?Foo'), 'http://en.wikipedia.org/wiki/Foo') self.assertEqual(interwiki_link('foo?Foo'), 'zim+' + dir.uri + '?Foo') self.assertEqual(interwiki_link('foobar?Foo'), 'zim+' + uri1 + '?Foo') # interwiki key self.assertEqual(interwiki_link('FooBar?Foo'), 'zim+' + uri1 + '?Foo') # interwiki key self.assertEqual(interwiki_link('bar?Foo'), 'zim+' + uri1 + '?Foo') # name self.assertEqual(interwiki_link('Bar?Foo'), 'zim+' + uri1 + '?Foo') # name # Check backward compatibility file = File('tests/data/notebook-list-old-format.list') list = NotebookInfoList(file) self.assertEqual(list[:], [ NotebookInfo(Dir(path).uri) for path in ('~/Notes', '/home/user/code/zim.debug', '/home/user/Foo Bar') ]) self.assertEqual(list.default, NotebookInfo(Dir('/home/user/code/zim.debug').uri) ) @tests.slowTest class TestResolveNotebook(tests.TestCase): def setUp(self): config = ConfigManager() list = config.get_config_file('notebooks.list') file = list.file if file.exists(): file.remove() def runTest(self): # First test some paths for input, uri in ( ('file:///foo/bar', 'file:///foo/bar'), ('~/bar', Dir('~/bar').uri), ): info = resolve_notebook(input) self.assertEqual(info.uri, uri) # Then test with (empty) notebook list info = resolve_notebook('foobar') self.assertIsNone(info) # add an entry and show we get it dir = Dir(self.create_tmp_dir()).subdir('foo') init_notebook(dir, name='foo') list = get_notebook_list() list.append(NotebookInfo(dir.uri, name='foo')) list.write() info = resolve_notebook('foo') self.assertIsNotNone(info) self.assertEqual(info.uri, dir.uri) @tests.slowTest class TestBuildNotebook(tests.TestCase): # Test including automount ! def setUp(self): self.tmpdir = Dir(self.get_tmp_name()) self.notebookdir = self.tmpdir.subdir('notebook') script = self.tmpdir.file('mount.py') script.write('''\ import os import sys notebook = sys.argv[1] os.mkdir(notebook) os.mkdir(notebook + '/foo') for path in ( notebook + "/notebook.zim", notebook + "/foo/bar.txt" ): fh = open(path, 'w') fh.write("") fh.close() ''') automount = XDG_CONFIG_HOME.file('zim/automount.conf') assert not automount.exists() automount.write('''\ [Path %s] mount=%s %s ''' % (self.notebookdir.path, script.path, self.notebookdir.path)) #~ def tearDown(self): #~ automount = XDG_CONFIG_HOME.file('zim/automount.conf') #~ automount.remove() def runTest(self): def mockconstructor(dir): return dir for uri, path in ( (self.notebookdir.uri, None), (self.notebookdir.file('notebook.zim').uri, None), (self.notebookdir.file('foo/bar.txt').uri, Path('foo:bar')), #~ ('zim+' + tmpdir.uri + '?aaa:bbb:ccc', Path('aaa:bbb:ccc')), ): #~ print ">>", uri info = NotebookInfo(uri) nb, p = build_notebook(info, notebookclass=mockconstructor) self.assertEqual(nb, self.notebookdir) self.assertEqual(p, path) info = NotebookInfo(self.notebookdir.file('nonexistingfile.txt')) self.assertRaises(FileNotFoundError, build_notebook, info) class TestNotebook(tests.TestCase): def setUp(self): path = self.get_tmp_name() self.notebook = tests.new_notebook(fakedir=path) def testAPI(self): '''Test various notebook methods''' # TODO now do the same with multiple stores self.assertEqual( self.notebook.get_store(':foo'), self.notebook._stores['']) self.assertTrue( isinstance(self.notebook.get_home_page(), Page)) page1 = self.notebook.get_page(Path('Tree:foo')) page2 = self.notebook.get_page(Path('Tree:foo')) self.assertTrue(page1.valid) self.assertTrue(id(page2) == id(page1)) # check usage of weakref self.notebook.flush_page_cache(Path('Tree:foo')) page3 = self.notebook.get_page(Path('Tree:foo')) self.assertTrue(id(page3) != id(page1)) self.assertFalse(page1.valid) page = self.notebook.get_page(Path('Test:foo')) text = page.dump('plain') newtext = ['Some new content\n'] assert newtext != text self.assertEqual(page.dump('plain'), text) page.parse('plain', newtext) self.assertEqual(page.dump('plain'), newtext) self.assertTrue(page.modified) re = self.notebook.revert_page(page) self.assertFalse(re) # no return value self.assertEqual(page.dump('plain'), text) # object reverted self.assertFalse(page.modified) self.notebook.flush_page_cache(page) page = self.notebook.get_page(page) # new object self.assertEqual(page.dump('plain'), text) page.parse('plain', newtext) self.assertEqual(page.dump('plain'), newtext) self.notebook.store_page(page) self.notebook.flush_page_cache(page) page = self.notebook.get_page(page) # new object self.assertEqual(page.dump('plain'), newtext) pages = list(self.notebook.get_pagelist(Path(':'))) self.assertTrue(len(pages) > 0) for page in pages: self.assertTrue(isinstance(page, Page)) index = set() for page in self.notebook.walk(): self.assertTrue(isinstance(page, Page)) index.add(page.name) self.assertTrue(index.issuperset(self.notebook.testdata_manifest)) def testManipulate(self): '''Test renaming, moving and deleting pages in the notebook''' # check test setup OK for path in (Path('Test:BAR'), Path('NewPage')): page = self.notebook.get_page(path) self.assertFalse(page.haschildren) self.assertFalse(page.hascontent) self.assertFalse(page.exists()) for path in (Path('Test:foo'), Path('TaskList')): page = self.notebook.get_page(path) self.assertTrue(page.haschildren or page.hascontent) self.assertTrue(page.exists()) # check errors self.assertRaises(PageExistsError, self.notebook.move_page, Path('Test:foo'), Path('TaskList')) self.notebook.index.update_async() self.assertTrue(self.notebook.index.updating) self.assertRaises(IndexBusyError, self.notebook.move_page, Path('Test:foo'), Path('Test:BAR')) self.notebook.index.ensure_update() # non-existing page - just check no errors here self.notebook.move_page(Path('NewPage'), Path('Test:NewPage')), self.notebook.index.ensure_update() # Test actual moving for oldpath, newpath in ( (Path('Test:foo'), Path('Test:BAR')), (Path('TaskList'), Path('NewPage:Foo:Bar:Baz')), ): page = self.notebook.get_page(oldpath) text = page.dump('wiki') self.assertTrue(page.haschildren) self.notebook.move_page(oldpath, newpath) self.notebook.index.ensure_update() # newpath should exist and look like the old one page = self.notebook.get_page(newpath) self.assertTrue(page.haschildren) text = [l.replace('[[foo:bar]]', '[[+bar]]') for l in text] # fix one updated link self.assertEqual(page.dump('wiki'), text) # oldpath should be deleted page = self.notebook.get_page(oldpath) self.assertFalse(page.haschildren) self.assertFalse(page.hascontent) # Test moving a page below it's own namespace oldpath = Path('Test:Bar') newpath = Path('Test:Bar:newsubpage') page = self.notebook.get_page(oldpath) page.parse('wiki', 'Test 123') self.notebook.store_page(page) self.notebook.move_page(oldpath, newpath) self.notebook.index.ensure_update() page = self.notebook.get_page(newpath) self.assertEqual(page.dump('wiki'), ['Test 123\n']) page = self.notebook.get_page(oldpath) self.assertTrue(page.haschildren) self.assertFalse(page.hascontent) # Check delete and cleanup path = Path('AnotherNewPage:Foo:bar') page = self.notebook.get_page(path) page.parse('plain', 'foo bar\n') self.notebook.store_page(page) page = self.notebook.get_page(Path('SomePageWithLinks')) page.parse('wiki', '[[:AnotherNewPage:Foo:bar]]\n' '**bold** [[:AnotherNewPage]]\n' ) self.notebook.store_page(page) page = self.notebook.get_page(Path('AnotherNewPage')) self.assertTrue(page.haschildren) self.assertFalse(page.hascontent) nlinks = self.notebook.index.n_list_links_to_tree(page, LINK_DIR_BACKWARD) self.assertEqual(nlinks, 2) self.notebook.delete_page(Path('AnotherNewPage:Foo:bar')) page = self.notebook.get_page(path) self.assertFalse(page.haschildren) self.assertFalse(page.hascontent) self.assertRaises(ValueError, self.notebook.index.n_list_links_to_tree, page, LINK_DIR_BACKWARD) # if links are removed and placeholder is cleaned up the # page doesn't exist anymore in the index so we get this error page = self.notebook.get_page(Path('SomePageWithLinks')) content = page.dump('wiki') self.assertEqual(''.join(content), ':AnotherNewPage:Foo:bar\n' '**bold** [[:AnotherNewPage]]\n' ) self.notebook.delete_page(path) # now should fail silently page = self.notebook.get_page(Path('AnotherNewPage')) self.assertFalse(page.haschildren) self.assertFalse(page.hascontent) nlinks = self.notebook.index.n_list_links_to_tree(page, LINK_DIR_BACKWARD) self.assertEqual(nlinks, 1) self.notebook.delete_page(page) self.assertRaises(ValueError, self.notebook.index.n_list_links_to_tree, page, LINK_DIR_BACKWARD) # if links are removed and placeholder is cleaned up the # page doesn't exist anymore in the index so we get this error page = self.notebook.get_page(Path('SomePageWithLinks')) content = page.dump('wiki') self.assertEqual(''.join(content), ':AnotherNewPage:Foo:bar\n' '**bold** :AnotherNewPage\n' ) # Try trashing try: self.notebook.trash_page(Path('TrashMe')) except TrashNotSupportedError: print 'trashing not supported' #~ print '\n==== DB ====' #~ self.notebook.index.ensure_update() #~ cursor = self.notebook.index.db.cursor() #~ cursor.execute('select * from pages') #~ for row in cursor: #~ print row #~ cursor.execute('select * from links') #~ for row in cursor: #~ print row # Try rename page = self.notebook.get_page(Path('Test:wiki')) self.assertTrue(page.hascontent) copy = page # we now have a copy of the page object - this is an important # part of the test - see if caching of page objects doesn't bite self.notebook.index.ensure_update() self.notebook.rename_page(Path('Test:wiki'), 'foo') page = self.notebook.get_page(Path('Test:wiki')) self.assertFalse(page.hascontent) page = self.notebook.get_page(Path('Test:foo')) # If we get an error here because notebook resolves Test:Foo # probably the index did not clean up placeholders correctly self.assertTrue(page.hascontent) self.assertFalse(copy.valid) self.notebook.index.ensure_update() self.notebook.rename_page(Path('Test:foo'), 'Foo') page = self.notebook.get_page(Path('Test:foo')) self.assertFalse(page.hascontent) page = self.notebook.get_page(Path('Test:Foo')) self.assertTrue(page.hascontent) def testUpdateLinks(self): '''Test logic for updating links on move''' # creating relative paths for source, href, link in ( ('Foo:Bar', 'Foo:Bar', 'Bar'), ('Foo:Bar', 'Foo:Bar:Baz', '+Baz'), ('Foo:Bar:Baz', 'Foo:Dus', 'Foo:Dus'), ('Foo:Bar:Baz', 'Foo:Bar:Dus', 'Dus'), ('Foo:Bar', 'Dus:Ja', 'Dus:Ja'), ('Foo:Bar', 'Foo:Ja', 'Ja'), ('Foo:Bar:Baz', 'Foo:Bar', 'Bar'), ('Foo:Bar:Baz', 'Foo', 'Foo'), ('Foo:Bar:Baz', 'Bar', ':Bar'), # conflict with anchor ): #~ print '>', source, href, link self.assertEqual( self.notebook.relative_link(Path(source), Path(href)), link) # update the page that was moved itself # moving from Dus:Baz to foo:bar:Baz or renaming to Dus:Bar text = u'''\ http://foo.org # urls are untouched [[:Hmmm:OK]] # link way outside move [[Baz:Ja]] # relative link that does not need change on move, but does on rename [[Ja]] # relative link that needs updating on move, but not on rename [[Ja|Grrr]] # relative link that needs updating on move, but not on rename - with name [[:foo:bar:Dus]] # Link that could be made relative, but isn't ''' wanted1 = u'''\ http://foo.org # urls are untouched [[:Hmmm:OK]] # link way outside move [[Baz:Ja]] # relative link that does not need change on move, but does on rename [[Dus:Ja]] # relative link that needs updating on move, but not on rename [[Dus:Ja|Grrr]] # relative link that needs updating on move, but not on rename - with name [[:foo:bar:Dus]] # Link that could be made relative, but isn't ''' wanted2 = u'''\ http://foo.org # urls are untouched [[:Hmmm:OK]] # link way outside move [[+Ja]] # relative link that does not need change on move, but does on rename [[Ja]] # relative link that needs updating on move, but not on rename [[Ja|Grrr]] # relative link that needs updating on move, but not on rename - with name [[:foo:bar:Dus]] # Link that could be made relative, but isn't ''' # "move" Dus:Baz -> foo:bar:Baz page = self.notebook.get_page(Path('foo:bar:Baz')) page.parse('wiki', text) self.notebook._update_links_from(page, Path('Dus:Baz'), page, Path('Dus:Baz')) self.assertEqual(u''.join(page.dump('wiki')), wanted1) print '--' # "rename" Dus:Baz -> Dus:Bar page = self.notebook.get_page(Path('Dus:Bar')) page.parse('wiki', text) self.notebook._update_links_from(page, Path('Dus:Baz'), page, Path('Dus:Baz')) self.assertEqual(u''.join(page.dump('wiki')), wanted2) # updating links to the page that was moved # moving from Dus:Baz to foo:bar:Baz or renaming to Dus:Bar - updating links in Dus:Ja text = u'''\ http://foo.org # urls are untouched [[:Hmmm:OK]] # link way outside move [[Baz:Ja]] # relative link that needs updating [[Baz:Ja|Grr]] # relative link that needs updating - with name [[Dus:Foo]] # relative link that does not need updating [[:Dus:Baz]] # absolute link that needs updating [[:Dus:Baz:Hmm]] # absolute link that needs updating [[:Dus:Baz:Hmm:Ja]] # absolute link that needs updating ''' wanted1 = u'''\ http://foo.org # urls are untouched [[:Hmmm:OK]] # link way outside move [[foo:bar:Baz:Ja]] # relative link that needs updating [[foo:bar:Baz:Ja|Grr]] # relative link that needs updating - with name [[Dus:Foo]] # relative link that does not need updating [[foo:bar:Baz]] # absolute link that needs updating [[foo:bar:Baz:Hmm]] # absolute link that needs updating [[foo:bar:Baz:Hmm:Ja]] # absolute link that needs updating ''' wanted2 = u'''\ http://foo.org # urls are untouched [[:Hmmm:OK]] # link way outside move [[Bar:Ja]] # relative link that needs updating [[Bar:Ja|Grr]] # relative link that needs updating - with name [[Dus:Foo]] # relative link that does not need updating [[Bar]] # absolute link that needs updating [[Bar:Hmm]] # absolute link that needs updating [[Bar:Hmm:Ja]] # absolute link that needs updating ''' page = self.notebook.get_page(Path('Dus:Ja')) page.parse('wiki', text) self.notebook._update_links_in_page(page, Path('Dus:Baz'), Path('foo:bar:Baz')) self.assertEqual(u''.join(page.dump('wiki')), wanted1) page = self.notebook.get_page(Path('Dus:Ja')) page.parse('wiki', text) self.notebook._update_links_in_page(page, Path('Dus:Baz'), Path('Dus:Bar')) self.assertEqual(u''.join(page.dump('wiki')), wanted2) # now test actual move on full notebook def links(source, href): #~ print '====' for link in self.notebook.index.list_links(source, LINK_DIR_FORWARD): #~ print 'FOUND LINK', link if link.href == href: return True else: return False path = Path('Linking:Dus:Ja') newpath = Path('Linking:Hmm:Ok') self.assertTrue(links(path, Path('Linking:Dus'))) self.assertTrue(links(path, Path('Linking:Foo:Bar'))) self.assertTrue(links(Path('Linking:Foo:Bar'), path)) self.assertFalse(links(newpath, Path('Linking:Dus'))) self.assertFalse(links(newpath, Path('Linking:Foo:Bar'))) self.assertFalse(links(Path('Linking:Foo:Bar'), newpath)) self.notebook.move_page(path, newpath, update_links=True) self.assertFalse(links(path, Path('Linking:Dus'))) self.assertFalse(links(path, Path('Linking:Foo:Bar'))) self.assertFalse(links(Path('Linking:Foo:Bar'), path)) self.assertTrue(links(newpath, Path('Linking:Dus'))) self.assertTrue(links(newpath, Path('Linking:Foo:Bar'))) self.assertTrue(links(Path('Linking:Foo:Bar'), newpath)) def testResolvePath(self): '''Test notebook.resolve_path()''' # cleaning absolute paths for name, wanted in ( ('foo:::bar', 'foo:bar'), ('::foo:bar:', 'foo:bar'), (':foo', 'foo'), (':Bar', 'Bar'), (':Foo (Bar)', 'Foo (Bar)'), # TODO more ambigous test cases ): self.assertEqual( self.notebook.resolve_path(name), Path(wanted) ) # resolving relative paths for name, ns, wanted in ( ('foo:bar', 'Test:xxx', 'Test:foo:bar'), ('test', 'Test:xxx', 'Test'), ('+test', 'Test:xxx', 'Test:xxx:test'), ('foo', 'Test:xxx', 'Test:foo'), ('+foo', 'Test:xxx', 'Test:xxx:foo'), ('Test', 'TaskList:bar', 'Test'), ('test:me', 'TaskList:bar', 'Test:me'), ): self.assertEqual( self.notebook.resolve_path(name, Path(ns)), Path(wanted) ) self.assertRaises(PageNameError, self.notebook.resolve_path, ':::') self.assertRaises(PageNameError, self.notebook.resolve_path, '/foo') self.assertRaises(PageNameError, self.notebook.resolve_path, ':foo:(bar)') def testResolveFile(self): '''Test notebook.resolve_file()''' path = Path('Foo:Bar') dir = self.notebook.dir self.notebook.config['Notebook']['document_root'] = './notebook_document_root' self.notebook.do_properties_changed() # parse config doc_root = self.notebook.document_root self.assertEqual(doc_root, dir.subdir('notebook_document_root')) for link, wanted, cleaned in ( ('~/test.txt', File('~/test.txt'), '~/test.txt'), (r'~\test.txt', File('~/test.txt'), '~/test.txt'), ('file:///test.txt', File('file:///test.txt'), None), ('file:/test.txt', File('file:///test.txt'), None), ('file://localhost/test.txt', File('file:///test.txt'), None), ('/test.txt', doc_root.file('test.txt'), '/test.txt'), ('../../notebook_document_root/test.txt', doc_root.file('test.txt'), '/test.txt'), ('./test.txt', dir.file('Foo/Bar/test.txt'), './test.txt'), (r'.\test.txt', dir.file('Foo/Bar/test.txt'), './test.txt'), ('../test.txt', dir.file('Foo/test.txt'), '../test.txt'), (r'..\test.txt', dir.file('Foo/test.txt'), '../test.txt'), ('../Bar/Baz/test.txt', dir.file('Foo/Bar/Baz/test.txt'), './Baz/test.txt'), (r'C:\foo\bar', File('file:///C:/foo/bar'), None), (r'Z:\foo\bar', File('file:///Z:/foo/bar'), None), ): #~ print link, '>>', self.notebook.resolve_file(link, path) self.assertEqual( self.notebook.resolve_file(link, path), wanted) self.assertEqual( self.notebook.relative_filepath(wanted, path), cleaned) # check relative path without Path self.assertEqual( self.notebook.relative_filepath(doc_root.file('foo.txt')), '/foo.txt') self.assertEqual( self.notebook.relative_filepath(dir.file('foo.txt')), './foo.txt') # def testResolveLink(self): # '''Test page.resolve_link()''' # page = self.notebook.get_page(':Test:foo') # for link, wanted in ( #~ (':foo:bar', ('page', ':foo:bar')), # ('foo:bar', ('page', ':Test:foo:bar')), # ('Test', ('page', ':Test')), # ('Test:non-existent', ('page', ':Test:non-existent')), # ('user@domain.com', ('mailto', 'mailto:user@domain.com')), # ('mailto:user@domain.com', ('mailto', 'mailto:user@domain.com')), # ('http://zim-wiki.org', ('http', 'http://zim-wiki.org')), # ('foo://zim-wiki.org', ('foo', 'foo://zim-wiki.org')), #~ ('file://'), #~ ('/foo/bar', ('file', '/foo/bar')), #~ ('man?test', ('man', 'test')), # ): self.assertEqual(self.notebook.resolve_link(link, page), wanted) #~ def testResolveName(self): #~ '''Test store.resolve_name().''' #~ print '\n'+'='*10+'\nSTORE: %s' % self.store #~ #~ # First make sure basic list function is working #~ def list_pages(name): #~ for page in self.store.get_pages(name): #~ yield page.basename #~ self.assertTrue('Test' in list_pages('')) #~ self.assertTrue('foo' in list_pages(':Test')) #~ self.assertTrue('bar' in list_pages(':Test:foo')) #~ self.assertFalse('Dus' in list_pages(':Test:foo')) #~ #~ # Now test the resolving algorithm - only testing low level #~ # function in store, so path "anchor" does not work, search #~ # is strictly right to left through the namespace, if any #~ for link, namespace, name in ( #~ ('BAR','Test:foo','Test:foo:bar'), #~ ('test',None,'Test'), #~ ('test','Test:foo:bar','Test'), #~ ('FOO:Dus','Test:foo:bar','Test:foo:Dus'), #~ # FIXME more ambigous test data #~ ): #~ print '-'*10+'\nLINK %s (%s)' % (link, namespace) #~ r = self.store.resolve_name(link, namespace=namespace) #~ print 'RESULT %s' % r #~ self.assertEqual(r, name) class TestPath(tests.TestCase): '''Test path object''' def generator(self, name): return Path(name) def runTest(self): '''Test Path object''' for name, namespace, basename in [ ('Test:foo', 'Test', 'foo'), ('Test', '', 'Test'), ]: path = self.generator(name) # test basic properties self.assertEqual(path.name, name) self.assertEqual(path.basename, basename) self.assertEqual(path.namespace, namespace) self.assertTrue(path.name in path.__repr__()) # TODO test operators on paths > < + - >= <= == != class TestPage(TestPath): '''Test page object''' def setUp(self): self.notebook = tests.new_notebook() def generator(self, name): return self.notebook.get_page(Path(name)) def testMain(self): '''Test Page object''' TestPath.runTest(self) tree = ParseTree().fromstring('''\ <zim-tree> <link href='foo:bar'>foo:bar</link> <link href='bar'>bar</link> <tag name='baz'>@baz</tag> </zim-tree> ''' ) page = Page(Path('Foo')) page.readonly = False page.set_parsetree(tree) links = list(page.get_links()) self.assertEqual(links, [ ('page', 'foo:bar', {}), ('page', 'bar', {}), ] ) tags = list(page.get_tags()) self.assertEqual(tags, [ ('@baz', {'name': 'baz'}), ]) self.assertEqual(page.get_parsetree().tostring(), tree.tostring()) # ensure we didn't change the tree # TODO test get / set parse tree with and without source tree = ParseTree().fromstring('<zim-tree></zim-tree>') self.assertFalse(tree.hascontent) page.set_parsetree(tree) self.assertFalse(page.hascontent) def testShouldAutochangeHeading(self): page = Page(Path("Foo")) page.readonly = False tree = ParseTree().fromstring('<zim-tree></zim-tree>') tree.set_heading("Foo") page.set_parsetree(tree) self.assertTrue(page.heading_matches_pagename()) tree.set_heading("Bar") page.set_parsetree(tree) self.assertFalse(page.heading_matches_pagename()) class TestIndexPage(tests.TestCase): def setUp(self): self.notebook = tests.new_notebook() self.notebook.index.update() def runTest(self): '''Test index page generation''' indexpage = IndexPage(self.notebook, Path(':')) tree = indexpage.get_parsetree() self.assertTrue(tree) links = [link[1] for link in indexpage.get_links()] self.assertTrue(len(links) > 1) #~ print links self.assertTrue('Test:foo' in links) class TestNewNotebook(tests.TestCase): def setUp(self): self.notebook = Notebook(index=Index(dbfile=':memory:')) self.notebook.add_store(Path(':'), 'memory') # Explicitly not run index.update() here def runTest(self): '''Try populating a notebook from scratch''' # Based on bug lp:511481 - should reproduce bug with updating links to child pages notebook = self.notebook index = self.notebook.index for name, text in ( ('page1', 'Foo bar\n'), ('page1:child', 'I have backlinks !\n'), ('page2', '[[page1:child]] !\n'), ('page3', 'Hmm\n'), ): path = Path(name) page = self.notebook.get_page(path) page.parse('wiki', text) notebook.store_page(page) for name, forw, backw in ( ('page1', 0, 0), ('page1:child', 0, 1), ('page2', 1, 0), ('page3', 0, 0), ('page3:page1', 0, 0), ('page3:page1:child', 0, 0), ): path = Path(name) #~ print path, \ #~ list(index.list_links(path, LINK_DIR_FORWARD)), \ #~ list(index.list_links(path, LINK_DIR_BACKWARD)) self.assertEqual( index.n_list_links(path, LINK_DIR_FORWARD), forw) self.assertEqual( index.n_list_links(path, LINK_DIR_BACKWARD), backw) notebook.move_page(Path('page1'), Path('page3:page1')) for name, forw, backw in ( ('page1', 0, 0), ('page1:child', 0, 0), ('page2', 1, 0), ('page3', 0, 0), ('page3:page1', 0, 0), ('page3:page1:child', 0, 1), ): path = Path(name) #~ print path, \ #~ list(index.list_links(path, LINK_DIR_FORWARD)), \ #~ list(index.list_links(path, LINK_DIR_BACKWARD)) self.assertEqual( index.n_list_links(path, LINK_DIR_FORWARD), forw) self.assertEqual( index.n_list_links(path, LINK_DIR_BACKWARD), backw) text = ''.join(notebook.get_page(Path('page3:page1:child')).dump('wiki')) self.assertEqual(text, 'I have backlinks !\n')
fabricehong/zim-desktop
tests/notebook.py
Python
gpl-2.0
27,661
# -*- test-case-name: openid.test.test_consumer -*- """OpenID support for Relying Parties (aka Consumers). This module documents the main interface with the OpenID consumer library. The only part of the library which has to be used and isn't documented in full here is the store required to create an C{L{Consumer}} instance. More on the abstract store type and concrete implementations of it that are provided in the documentation for the C{L{__init__<Consumer.__init__>}} method of the C{L{Consumer}} class. OVERVIEW ======== The OpenID identity verification process most commonly uses the following steps, as visible to the user of this library: 1. The user enters their OpenID into a field on the consumer's site, and hits a login button. 2. The consumer site discovers the user's OpenID provider using the Yadis protocol. 3. The consumer site sends the browser a redirect to the OpenID provider. This is the authentication request as described in the OpenID specification. 4. The OpenID provider's site sends the browser a redirect back to the consumer site. This redirect contains the provider's response to the authentication request. The most important part of the flow to note is the consumer's site must handle two separate HTTP requests in order to perform the full identity check. LIBRARY DESIGN ============== This consumer library is designed with that flow in mind. The goal is to make it as easy as possible to perform the above steps securely. At a high level, there are two important parts in the consumer library. The first important part is this module, which contains the interface to actually use this library. The second is the C{L{openid.store.interface}} module, which describes the interface to use if you need to create a custom method for storing the state this library needs to maintain between requests. In general, the second part is less important for users of the library to know about, as several implementations are provided which cover a wide variety of situations in which consumers may use the library. This module contains a class, C{L{Consumer}}, with methods corresponding to the actions necessary in each of steps 2, 3, and 4 described in the overview. Use of this library should be as easy as creating an C{L{Consumer}} instance and calling the methods appropriate for the action the site wants to take. SESSIONS, STORES, AND STATELESS MODE ==================================== The C{L{Consumer}} object keeps track of two types of state: 1. State of the user's current authentication attempt. Things like the identity URL, the list of endpoints discovered for that URL, and in case where some endpoints are unreachable, the list of endpoints already tried. This state needs to be held from Consumer.begin() to Consumer.complete(), but it is only applicable to a single session with a single user agent, and at the end of the authentication process (i.e. when an OP replies with either C{id_res} or C{cancel}) it may be discarded. 2. State of relationships with servers, i.e. shared secrets (associations) with servers and nonces seen on signed messages. This information should persist from one session to the next and should not be bound to a particular user-agent. These two types of storage are reflected in the first two arguments of Consumer's constructor, C{session} and C{store}. C{session} is a dict-like object and we hope your web framework provides you with one of these bound to the user agent. C{store} is an instance of L{openid.store.interface.OpenIDStore}. Since the store does hold secrets shared between your application and the OpenID provider, you should be careful about how you use it in a shared hosting environment. If the filesystem or database permissions of your web host allow strangers to read from them, do not store your data there! If you have no safe place to store your data, construct your consumer with C{None} for the store, and it will operate only in stateless mode. Stateless mode may be slower, put more load on the OpenID provider, and trusts the provider to keep you safe from replay attacks. Several store implementation are provided, and the interface is fully documented so that custom stores can be used as well. See the documentation for the C{L{Consumer}} class for more information on the interface for stores. The implementations that are provided allow the consumer site to store the necessary data in several different ways, including several SQL databases and normal files on disk. IMMEDIATE MODE ============== In the flow described above, the user may need to confirm to the OpenID provider that it's ok to disclose his or her identity. The provider may draw pages asking for information from the user before it redirects the browser back to the consumer's site. This is generally transparent to the consumer site, so it is typically ignored as an implementation detail. There can be times, however, where the consumer site wants to get a response immediately. When this is the case, the consumer can put the library in immediate mode. In immediate mode, there is an extra response possible from the server, which is essentially the server reporting that it doesn't have enough information to answer the question yet. USING THIS LIBRARY ================== Integrating this library into an application is usually a relatively straightforward process. The process should basically follow this plan: Add an OpenID login field somewhere on your site. When an OpenID is entered in that field and the form is submitted, it should make a request to the your site which includes that OpenID URL. First, the application should L{instantiate a Consumer<Consumer.__init__>} with a session for per-user state and store for shared state. using the store of choice. Next, the application should call the 'C{L{begin<Consumer.begin>}}' method on the C{L{Consumer}} instance. This method takes the OpenID URL. The C{L{begin<Consumer.begin>}} method returns an C{L{AuthRequest}} object. Next, the application should call the C{L{redirectURL<AuthRequest.redirectURL>}} method on the C{L{AuthRequest}} object. The parameter C{return_to} is the URL that the OpenID server will send the user back to after attempting to verify his or her identity. The C{realm} parameter is the URL (or URL pattern) that identifies your web site to the user when he or she is authorizing it. Send a redirect to the resulting URL to the user's browser. That's the first half of the authentication process. The second half of the process is done after the user's OpenID Provider sends the user's browser a redirect back to your site to complete their login. When that happens, the user will contact your site at the URL given as the C{return_to} URL to the C{L{redirectURL<AuthRequest.redirectURL>}} call made above. The request will have several query parameters added to the URL by the OpenID provider as the information necessary to finish the request. Get an C{L{Consumer}} instance with the same session and store as before and call its C{L{complete<Consumer.complete>}} method, passing in all the received query arguments. There are multiple possible return types possible from that method. These indicate the whether or not the login was successful, and include any additional information appropriate for their type. @var SUCCESS: constant used as the status for L{SuccessResponse<openid.consumer.consumer.SuccessResponse>} objects. @var FAILURE: constant used as the status for L{FailureResponse<openid.consumer.consumer.FailureResponse>} objects. @var CANCEL: constant used as the status for L{CancelResponse<openid.consumer.consumer.CancelResponse>} objects. @var SETUP_NEEDED: constant used as the status for L{SetupNeededResponse<openid.consumer.consumer.SetupNeededResponse>} objects. """ import cgi import copy from urlparse import urlparse, urldefrag from openid import fetchers from openid.consumer.discover import discover, OpenIDServiceEndpoint, \ DiscoveryFailure, OPENID_1_0_TYPE, OPENID_1_1_TYPE, OPENID_2_0_TYPE from openid.message import Message, OPENID_NS, OPENID2_NS, OPENID1_NS, \ IDENTIFIER_SELECT, no_default, BARE_NS from openid import cryptutil from openid import oidutil from openid.association import Association, default_negotiator, \ SessionNegotiator from openid.dh import DiffieHellman from openid.store.nonce import mkNonce, split as splitNonce from openid.yadis.manager import Discovery __all__ = ['AuthRequest', 'Consumer', 'SuccessResponse', 'SetupNeededResponse', 'CancelResponse', 'FailureResponse', 'SUCCESS', 'FAILURE', 'CANCEL', 'SETUP_NEEDED', ] def makeKVPost(request_message, server_url): """Make a Direct Request to an OpenID Provider and return the result as a Message object. @raises openid.fetchers.HTTPFetchingError: if an error is encountered in making the HTTP post. @rtype: L{openid.message.Message} """ # XXX: TESTME resp = fetchers.fetch(server_url, body=request_message.toURLEncoded()) # Process response in separate function that can be shared by async code. return _httpResponseToMessage(resp, server_url) def _httpResponseToMessage(response, server_url): """Adapt a POST response to a Message. @type response: L{openid.fetchers.HTTPResponse} @param response: Result of a POST to an OpenID endpoint. @rtype: L{openid.message.Message} @raises openid.fetchers.HTTPFetchingError: if the server returned a status of other than 200 or 400. @raises ServerError: if the server returned an OpenID error. """ # Should this function be named Message.fromHTTPResponse instead? response_message = Message.fromKVForm(response.body) if response.status == 400: raise ServerError.fromMessage(response_message) elif response.status != 200: fmt = 'bad status code from server %s: %s' error_message = fmt % (server_url, response.status) raise fetchers.HTTPFetchingError(error_message) return response_message class Consumer(object): """An OpenID consumer implementation that performs discovery and does session management. @ivar consumer: an instance of an object implementing the OpenID protocol, but doing no discovery or session management. @type consumer: GenericConsumer @ivar session: A dictionary-like object representing the user's session data. This is used for keeping state of the OpenID transaction when the user is redirected to the server. @cvar session_key_prefix: A string that is prepended to session keys to ensure that they are unique. This variable may be changed to suit your application. """ session_key_prefix = "_openid_consumer_" _token = 'last_token' _discover = staticmethod(discover) def __init__(self, session, store, consumer_class=None): """Initialize a Consumer instance. You should create a new instance of the Consumer object with every HTTP request that handles OpenID transactions. @param session: See L{the session instance variable<openid.consumer.consumer.Consumer.session>} @param store: an object that implements the interface in C{L{openid.store.interface.OpenIDStore}}. Several implementations are provided, to cover common database environments. @type store: C{L{openid.store.interface.OpenIDStore}} @see: L{openid.store.interface} @see: L{openid.store} """ self.session = session if consumer_class is None: consumer_class = GenericConsumer self.consumer = consumer_class(store) self._token_key = self.session_key_prefix + self._token def begin(self, user_url, anonymous=False): """Start the OpenID authentication process. See steps 1-2 in the overview at the top of this file. @param user_url: Identity URL given by the user. This method performs a textual transformation of the URL to try and make sure it is normalized. For example, a user_url of example.com will be normalized to http://example.com/ normalizing and resolving any redirects the server might issue. @type user_url: unicode @param anonymous: Whether to make an anonymous request of the OpenID provider. Such a request does not ask for an authorization assertion for an OpenID identifier, but may be used with extensions to pass other data. e.g. "I don't care who you are, but I'd like to know your time zone." @type anonymous: bool @returns: An object containing the discovered information will be returned, with a method for building a redirect URL to the server, as described in step 3 of the overview. This object may also be used to add extension arguments to the request, using its L{addExtensionArg<openid.consumer.consumer.AuthRequest.addExtensionArg>} method. @returntype: L{AuthRequest<openid.consumer.consumer.AuthRequest>} @raises openid.consumer.discover.DiscoveryFailure: when I fail to find an OpenID server for this URL. If the C{yadis} package is available, L{openid.consumer.discover.DiscoveryFailure} is an alias for C{yadis.discover.DiscoveryFailure}. """ disco = Discovery(self.session, user_url, self.session_key_prefix) try: service = disco.getNextService(self._discover) except fetchers.HTTPFetchingError, why: raise DiscoveryFailure( 'Error fetching XRDS document: %s' % (why[0],), None) if service is None: raise DiscoveryFailure( 'No usable OpenID services found for %s' % (user_url,), None) else: return self.beginWithoutDiscovery(service, anonymous) def beginWithoutDiscovery(self, service, anonymous=False): """Start OpenID verification without doing OpenID server discovery. This method is used internally by Consumer.begin after discovery is performed, and exists to provide an interface for library users needing to perform their own discovery. @param service: an OpenID service endpoint descriptor. This object and factories for it are found in the L{openid.consumer.discover} module. @type service: L{OpenIDServiceEndpoint<openid.consumer.discover.OpenIDServiceEndpoint>} @returns: an OpenID authentication request object. @rtype: L{AuthRequest<openid.consumer.consumer.AuthRequest>} @See: Openid.consumer.consumer.Consumer.begin @see: openid.consumer.discover """ auth_req = self.consumer.begin(service) self.session[self._token_key] = auth_req.endpoint try: auth_req.setAnonymous(anonymous) except ValueError, why: raise ProtocolError(str(why)) return auth_req def complete(self, query, return_to): """Called to interpret the server's response to an OpenID request. It is called in step 4 of the flow described in the consumer overview. @param query: A dictionary of the query parameters for this HTTP request. @param return_to: The return URL used to invoke the application. Extract the URL from your application's web request framework and specify it here to have it checked against the openid.return_to value in the response. If the return_to URL check fails, the status of the completion will be FAILURE. @returns: a subclass of Response. The type of response is indicated by the status attribute, which will be one of SUCCESS, CANCEL, FAILURE, or SETUP_NEEDED. @see: L{SuccessResponse<openid.consumer.consumer.SuccessResponse>} @see: L{CancelResponse<openid.consumer.consumer.CancelResponse>} @see: L{SetupNeededResponse<openid.consumer.consumer.SetupNeededResponse>} @see: L{FailureResponse<openid.consumer.consumer.FailureResponse>} """ endpoint = self.session.get(self._token_key) oidutil.log('Got endpoint from session: %r' % (endpoint,)) message = Message.fromPostArgs(query) response = self.consumer.complete(message, endpoint, return_to) try: del self.session[self._token_key] except KeyError: pass if (response.status in ['success', 'cancel'] and response.identity_url is not None): disco = Discovery(self.session, response.identity_url, self.session_key_prefix) # This is OK to do even if we did not do discovery in # the first place. disco.cleanup(force=True) return response def setAssociationPreference(self, association_preferences): """Set the order in which association types/sessions should be attempted. For instance, to only allow HMAC-SHA256 associations created with a DH-SHA256 association session: >>> consumer.setAssociationPreference([('HMAC-SHA256', 'DH-SHA256')]) Any association type/association type pair that is not in this list will not be attempted at all. @param association_preferences: The list of allowed (association type, association session type) pairs that should be allowed for this consumer to use, in order from most preferred to least preferred. @type association_preferences: [(str, str)] @returns: None @see: C{L{openid.association.SessionNegotiator}} """ self.consumer.negotiator = SessionNegotiator(association_preferences) class DiffieHellmanSHA1ConsumerSession(object): session_type = 'DH-SHA1' hash_func = staticmethod(cryptutil.sha1) secret_size = 20 allowed_assoc_types = ['HMAC-SHA1'] def __init__(self, dh=None): if dh is None: dh = DiffieHellman.fromDefaults() self.dh = dh def getRequest(self): cpub = cryptutil.longToBase64(self.dh.public) args = {'dh_consumer_public': cpub} if not self.dh.usingDefaultValues(): args.update({ 'dh_modulus': cryptutil.longToBase64(self.dh.modulus), 'dh_gen': cryptutil.longToBase64(self.dh.generator), }) return args def extractSecret(self, response): dh_server_public64 = response.getArg( OPENID_NS, 'dh_server_public', no_default) enc_mac_key64 = response.getArg(OPENID_NS, 'enc_mac_key', no_default) dh_server_public = cryptutil.base64ToLong(dh_server_public64) enc_mac_key = oidutil.fromBase64(enc_mac_key64) return self.dh.xorSecret(dh_server_public, enc_mac_key, self.hash_func) class DiffieHellmanSHA256ConsumerSession(DiffieHellmanSHA1ConsumerSession): session_type = 'DH-SHA256' hash_func = staticmethod(cryptutil.sha256) secret_size = 32 allowed_assoc_types = ['HMAC-SHA256'] class PlainTextConsumerSession(object): session_type = 'no-encryption' allowed_assoc_types = ['HMAC-SHA1', 'HMAC-SHA256'] def getRequest(self): return {} def extractSecret(self, response): mac_key64 = response.getArg(OPENID_NS, 'mac_key', no_default) return oidutil.fromBase64(mac_key64) class SetupNeededError(Exception): """Internally-used exception that indicates that an immediate-mode request cancelled.""" def __init__(self, user_setup_url=None): Exception.__init__(self, user_setup_url) self.user_setup_url = user_setup_url class ProtocolError(ValueError): """Exception that indicates that a message violated the protocol. It is raised and caught internally to this file.""" class TypeURIMismatch(ProtocolError): """A protocol error arising from type URIs mismatching """ def __init__(self, expected, endpoint): ProtocolError.__init__(self, expected, endpoint) self.expected = expected self.endpoint = endpoint def __str__(self): s = '<%s.%s: Required type %s not found in %s for endpoint %s>' % ( self.__class__.__module__, self.__class__.__name__, self.expected, self.endpoint.type_uris, self.endpoint) return s class ServerError(Exception): """Exception that is raised when the server returns a 400 response code to a direct request.""" def __init__(self, error_text, error_code, message): Exception.__init__(self, error_text) self.error_text = error_text self.error_code = error_code self.message = message def fromMessage(cls, message): """Generate a ServerError instance, extracting the error text and the error code from the message.""" error_text = message.getArg( OPENID_NS, 'error', '<no error message supplied>') error_code = message.getArg(OPENID_NS, 'error_code') return cls(error_text, error_code, message) fromMessage = classmethod(fromMessage) class GenericConsumer(object): """This is the implementation of the common logic for OpenID consumers. It is unaware of the application in which it is running. @ivar negotiator: An object that controls the kind of associations that the consumer makes. It defaults to C{L{openid.association.default_negotiator}}. Assign a different negotiator to it if you have specific requirements for how associations are made. @type negotiator: C{L{openid.association.SessionNegotiator}} """ # The name of the query parameter that gets added to the return_to # URL when using OpenID1. You can change this value if you want or # need a different name, but don't make it start with openid, # because it's not a standard protocol thing for OpenID1. For # OpenID2, the library will take care of the nonce using standard # OpenID query parameter names. openid1_nonce_query_arg_name = 'janrain_nonce' # Another query parameter that gets added to the return_to for # OpenID 1; if the user's session state is lost, use this claimed # identifier to do discovery when verifying the response. openid1_return_to_identifier_name = 'openid1_claimed_id' session_types = { 'DH-SHA1':DiffieHellmanSHA1ConsumerSession, 'DH-SHA256':DiffieHellmanSHA256ConsumerSession, 'no-encryption':PlainTextConsumerSession, } _discover = staticmethod(discover) def __init__(self, store): self.store = store self.negotiator = default_negotiator.copy() def begin(self, service_endpoint): """Create an AuthRequest object for the specified service_endpoint. This method will create an association if necessary.""" if self.store is None: assoc = None else: assoc = self._getAssociation(service_endpoint) request = AuthRequest(service_endpoint, assoc) request.return_to_args[self.openid1_nonce_query_arg_name] = mkNonce() if request.message.isOpenID1(): request.return_to_args[self.openid1_return_to_identifier_name] = \ request.endpoint.claimed_id return request def complete(self, message, endpoint, return_to): """Process the OpenID message, using the specified endpoint and return_to URL as context. This method will handle any OpenID message that is sent to the return_to URL. """ mode = message.getArg(OPENID_NS, 'mode', '<No mode set>') modeMethod = getattr(self, '_complete_' + mode, self._completeInvalid) return modeMethod(message, endpoint, return_to) def _complete_cancel(self, message, endpoint, _): return CancelResponse(endpoint) def _complete_error(self, message, endpoint, _): error = message.getArg(OPENID_NS, 'error') contact = message.getArg(OPENID_NS, 'contact') reference = message.getArg(OPENID_NS, 'reference') return FailureResponse(endpoint, error, contact=contact, reference=reference) def _complete_setup_needed(self, message, endpoint, _): if not message.isOpenID2(): return self._completeInvalid(message, endpoint, _) return SetupNeededResponse(endpoint) def _complete_id_res(self, message, endpoint, return_to): try: self._checkSetupNeeded(message) except SetupNeededError, why: return SetupNeededResponse(endpoint, why.user_setup_url) else: try: return self._doIdRes(message, endpoint, return_to) except (ProtocolError, DiscoveryFailure), why: return FailureResponse(endpoint, why[0]) def _completeInvalid(self, message, endpoint, _): mode = message.getArg(OPENID_NS, 'mode', '<No mode set>') return FailureResponse(endpoint, 'Invalid openid.mode: %r' % (mode,)) def _checkReturnTo(self, message, return_to): """Check an OpenID message and its openid.return_to value against a return_to URL from an application. Return True on success, False on failure. """ # Check the openid.return_to args against args in the original # message. try: self._verifyReturnToArgs(message.toPostArgs()) except ProtocolError, why: oidutil.log("Verifying return_to arguments: %s" % (why[0],)) return False # Check the return_to base URL against the one in the message. msg_return_to = message.getArg(OPENID_NS, 'return_to') # The URL scheme, authority, and path MUST be the same between # the two URLs. app_parts = urlparse(return_to) msg_parts = urlparse(msg_return_to) # (addressing scheme, network location, path) must be equal in # both URLs. for part in range(0, 3): if app_parts[part] != msg_parts[part]: return False return True _makeKVPost = staticmethod(makeKVPost) def _checkSetupNeeded(self, message): """Check an id_res message to see if it is a checkid_immediate cancel response. @raises SetupNeededError: if it is a checkid_immediate cancellation """ # In OpenID 1, we check to see if this is a cancel from # immediate mode by the presence of the user_setup_url # parameter. if message.isOpenID1(): user_setup_url = message.getArg(OPENID1_NS, 'user_setup_url') if user_setup_url is not None: raise SetupNeededError(user_setup_url) def _doIdRes(self, message, endpoint, return_to): """Handle id_res responses that are not cancellations of immediate mode requests. @param message: the response paramaters. @param endpoint: the discovered endpoint object. May be None. @raises ProtocolError: If the message contents are not well-formed according to the OpenID specification. This includes missing fields or not signing fields that should be signed. @raises DiscoveryFailure: If the subject of the id_res message does not match the supplied endpoint, and discovery on the identifier in the message fails (this should only happen when using OpenID 2) @returntype: L{Response} """ # Checks for presence of appropriate fields (and checks # signed list fields) self._idResCheckForFields(message) if not self._checkReturnTo(message, return_to): raise ProtocolError( "return_to does not match return URL. Expected %r, got %r" % (return_to, message.getArg(OPENID_NS, 'return_to'))) # Verify discovery information: endpoint = self._verifyDiscoveryResults(message, endpoint) oidutil.log("Received id_res response from %s using association %s" % (endpoint.server_url, message.getArg(OPENID_NS, 'assoc_handle'))) self._idResCheckSignature(message, endpoint.server_url) # Will raise a ProtocolError if the nonce is bad self._idResCheckNonce(message, endpoint) signed_list_str = message.getArg(OPENID_NS, 'signed', no_default) signed_list = signed_list_str.split(',') signed_fields = ["openid." + s for s in signed_list] return SuccessResponse(endpoint, message, signed_fields) def _idResGetNonceOpenID1(self, message, endpoint): """Extract the nonce from an OpenID 1 response. Return the nonce from the BARE_NS since we independently check the return_to arguments are the same as those in the response message. See the openid1_nonce_query_arg_name class variable @returns: The nonce as a string or None """ return message.getArg(BARE_NS, self.openid1_nonce_query_arg_name) def _idResCheckNonce(self, message, endpoint): if message.isOpenID1(): # This indicates that the nonce was generated by the consumer nonce = self._idResGetNonceOpenID1(message, endpoint) server_url = '' else: nonce = message.getArg(OPENID2_NS, 'response_nonce') server_url = endpoint.server_url if nonce is None: raise ProtocolError('Nonce missing from response') try: timestamp, salt = splitNonce(nonce) except ValueError, why: raise ProtocolError('Malformed nonce: %s' % (why[0],)) if (self.store is not None and not self.store.useNonce(server_url, timestamp, salt)): raise ProtocolError('Nonce already used or out of range') def _idResCheckSignature(self, message, server_url): assoc_handle = message.getArg(OPENID_NS, 'assoc_handle') if self.store is None: assoc = None else: assoc = self.store.getAssociation(server_url, assoc_handle) if assoc: if assoc.getExpiresIn() <= 0: # XXX: It might be a good idea sometimes to re-start the # authentication with a new association. Doing it # automatically opens the possibility for # denial-of-service by a server that just returns expired # associations (or really short-lived associations) raise ProtocolError( 'Association with %s expired' % (server_url,)) if not assoc.checkMessageSignature(message): raise ProtocolError('Bad signature') else: # It's not an association we know about. Stateless mode is our # only possible path for recovery. # XXX - async framework will not want to block on this call to # _checkAuth. if not self._checkAuth(message, server_url): raise ProtocolError('Server denied check_authentication') def _idResCheckForFields(self, message): # XXX: this should be handled by the code that processes the # response (that is, if a field is missing, we should not have # to explicitly check that it's present, just make sure that # the fields are actually being used by the rest of the code # in tests). Although, which fields are signed does need to be # checked somewhere. basic_fields = ['return_to', 'assoc_handle', 'sig', 'signed'] basic_sig_fields = ['return_to', 'identity'] require_fields = { OPENID2_NS: basic_fields + ['op_endpoint'], OPENID1_NS: basic_fields + ['identity'], } require_sigs = { OPENID2_NS: basic_sig_fields + ['response_nonce', 'claimed_id', 'assoc_handle',], OPENID1_NS: basic_sig_fields, } for field in require_fields[message.getOpenIDNamespace()]: if not message.hasKey(OPENID_NS, field): raise ProtocolError('Missing required field %r' % (field,)) signed_list_str = message.getArg(OPENID_NS, 'signed', no_default) signed_list = signed_list_str.split(',') for field in require_sigs[message.getOpenIDNamespace()]: # Field is present and not in signed list if message.hasKey(OPENID_NS, field) and field not in signed_list: raise ProtocolError('"%s" not signed' % (field,)) def _verifyReturnToArgs(query): """Verify that the arguments in the return_to URL are present in this response. """ message = Message.fromPostArgs(query) return_to = message.getArg(OPENID_NS, 'return_to') if return_to is None: raise ProtocolError('Response has no return_to') parsed_url = urlparse(return_to) rt_query = parsed_url[4] parsed_args = cgi.parse_qsl(rt_query) for rt_key, rt_value in parsed_args: try: value = query[rt_key] if rt_value != value: format = ("parameter %s value %r does not match " "return_to's value %r") raise ProtocolError(format % (rt_key, value, rt_value)) except KeyError: format = "return_to parameter %s absent from query %r" raise ProtocolError(format % (rt_key, query)) # Make sure all non-OpenID arguments in the response are also # in the signed return_to. bare_args = message.getArgs(BARE_NS) for pair in bare_args.iteritems(): if pair not in parsed_args: raise ProtocolError("Parameter %s not in return_to URL" % (pair[0],)) _verifyReturnToArgs = staticmethod(_verifyReturnToArgs) def _verifyDiscoveryResults(self, resp_msg, endpoint=None): """ Extract the information from an OpenID assertion message and verify it against the original @param endpoint: The endpoint that resulted from doing discovery @param resp_msg: The id_res message object @returns: the verified endpoint """ if resp_msg.getOpenIDNamespace() == OPENID2_NS: return self._verifyDiscoveryResultsOpenID2(resp_msg, endpoint) else: return self._verifyDiscoveryResultsOpenID1(resp_msg, endpoint) def _verifyDiscoveryResultsOpenID2(self, resp_msg, endpoint): to_match = OpenIDServiceEndpoint() to_match.type_uris = [OPENID_2_0_TYPE] to_match.claimed_id = resp_msg.getArg(OPENID2_NS, 'claimed_id') to_match.local_id = resp_msg.getArg(OPENID2_NS, 'identity') # Raises a KeyError when the op_endpoint is not present to_match.server_url = resp_msg.getArg( OPENID2_NS, 'op_endpoint', no_default) # claimed_id and identifier must both be present or both # be absent if (to_match.claimed_id is None and to_match.local_id is not None): raise ProtocolError( 'openid.identity is present without openid.claimed_id') elif (to_match.claimed_id is not None and to_match.local_id is None): raise ProtocolError( 'openid.claimed_id is present without openid.identity') # This is a response without identifiers, so there's really no # checking that we can do, so return an endpoint that's for # the specified `openid.op_endpoint' elif to_match.claimed_id is None: return OpenIDServiceEndpoint.fromOPEndpointURL(to_match.server_url) # The claimed ID doesn't match, so we have to do discovery # again. This covers not using sessions, OP identifier # endpoints and responses that didn't match the original # request. if not endpoint: oidutil.log('No pre-discovered information supplied.') endpoint = self._discoverAndVerify(to_match) else: # The claimed ID matches, so we use the endpoint that we # discovered in initiation. This should be the most common # case. try: self._verifyDiscoverySingle(endpoint, to_match) except ProtocolError, e: oidutil.log("Error attempting to use stored discovery information: " + str(e)) oidutil.log("Attempting discovery to verify endpoint") endpoint = self._discoverAndVerify(to_match) # The endpoint we return should have the claimed ID from the # message we just verified, fragment and all. if endpoint.claimed_id != to_match.claimed_id: endpoint = copy.copy(endpoint) endpoint.claimed_id = to_match.claimed_id return endpoint def _verifyDiscoveryResultsOpenID1(self, resp_msg, endpoint): claimed_id = resp_msg.getArg(BARE_NS, self.openid1_return_to_identifier_name) if endpoint is None and claimed_id is None: raise RuntimeError( 'When using OpenID 1, the claimed ID must be supplied, ' 'either by passing it through as a return_to parameter ' 'or by using a session, and supplied to the GenericConsumer ' 'as the argument to complete()') elif endpoint is not None and claimed_id is None: claimed_id = endpoint.claimed_id to_match = OpenIDServiceEndpoint() to_match.type_uris = [OPENID_1_1_TYPE] to_match.local_id = resp_msg.getArg(OPENID1_NS, 'identity') # Restore delegate information from the initiation phase to_match.claimed_id = claimed_id if to_match.local_id is None: raise ProtocolError('Missing required field openid.identity') to_match_1_0 = copy.copy(to_match) to_match_1_0.type_uris = [OPENID_1_0_TYPE] if endpoint is not None: try: try: oidutil.log("Calling _verifyDiscoverySingle") self._verifyDiscoverySingle(endpoint, to_match) except TypeURIMismatch: oidutil.log("Got TypeURIMismatch, trying 1.0 endpoint") self._verifyDiscoverySingle(endpoint, to_match_1_0) except ProtocolError, e: oidutil.log("Error attempting to use stored discovery information: " + str(e)) oidutil.log("Attempting discovery to verify endpoint") else: oidutil.log("Success: returning endpoint") return endpoint oidutil.log("Endpoint was None, using _discoverAndVerify") # Endpoint is either bad (failed verification) or None try: return self._discoverAndVerify(to_match) except TypeURIMismatch: return self._discoverAndVerify(to_match_1_0) except: import sys oidutil.log("From _discoverAndVerify: "+str(sys.exc_info()[1])) raise def _verifyDiscoverySingle(self, endpoint, to_match): """Verify that the given endpoint matches the information extracted from the OpenID assertion, and raise an exception if there is a mismatch. @type endpoint: openid.consumer.discover.OpenIDServiceEndpoint @type to_match: openid.consumer.discover.OpenIDServiceEndpoint @rtype: NoneType @raises ProtocolError: when the endpoint does not match the discovered information. """ # Every type URI that's in the to_match endpoint has to be # present in the discovered endpoint. for type_uri in to_match.type_uris: if not endpoint.usesExtension(type_uri): raise TypeURIMismatch(type_uri, endpoint) # Fragments do not influence discovery, so we can't compare a # claimed identifier with a fragment to discovered information. defragged_claimed_id, _ = urldefrag(to_match.claimed_id) if defragged_claimed_id != endpoint.claimed_id: raise ProtocolError( 'Claimed ID does not match (different subjects!), ' 'Expected %s, got %s' % (defragged_claimed_id, endpoint.claimed_id)) if to_match.getLocalID() != endpoint.getLocalID(): raise ProtocolError('local_id mismatch. Expected %s, got %s' % (to_match.getLocalID(), endpoint.getLocalID())) # If the server URL is None, this must be an OpenID 1 # response, because op_endpoint is a required parameter in # OpenID 2. In that case, we don't actually care what the # discovered server_url is, because signature checking or # check_auth should take care of that check for us. if to_match.server_url is None: assert to_match.preferredNamespace() == OPENID1_NS, ( """The code calling this must ensure that OpenID 2 responses have a non-none `openid.op_endpoint' and that it is set as the `server_url' attribute of the `to_match' endpoint.""") elif to_match.server_url != endpoint.server_url: raise ProtocolError('OP Endpoint mismatch. Expected %s, got %s' % (to_match.server_url, endpoint.server_url)) def _discoverAndVerify(self, to_match): """Given an endpoint object created from the information in an OpenID response, perform discovery and verify the discovery results, returning the matching endpoint that is the result of doing that discovery. @type to_match: openid.consumer.discover.OpenIDServiceEndpoint @param to_match: The endpoint whose information we're confirming @rtype: openid.consumer.discover.OpenIDServiceEndpoint @returns: The result of performing discovery on the claimed identifier in `to_match' @raises DiscoveryFailure: when discovery fails. """ oidutil.log('Performing discovery on %s' % (to_match.claimed_id,)) _, services = self._discover(to_match.claimed_id) if not services: raise DiscoveryFailure('No OpenID information found at %s' % (to_match.claimed_id,), None) return self._verifyDiscoveredServices(services, to_match) def _verifyDiscoveredServices(self, services, to_match): """See @L{_discoverAndVerify}""" # Search the services resulting from discovery to find one # that matches the information from the assertion failure_messages = [] for endpoint in services: try: self._verifyDiscoverySingle(endpoint, to_match) except ProtocolError, why: failure_messages.append(str(why)) else: # It matches, so discover verification has # succeeded. Return this endpoint. return endpoint else: oidutil.log('Discovery verification failure for %s' % (to_match.claimed_id,)) for failure_message in failure_messages: oidutil.log(' * Endpoint mismatch: ' + failure_message) raise DiscoveryFailure( 'No matching endpoint found after discovering %s' % (to_match.claimed_id,), None) def _checkAuth(self, message, server_url): """Make a check_authentication request to verify this message. @returns: True if the request is valid. @rtype: bool """ oidutil.log('Using OpenID check_authentication') request = self._createCheckAuthRequest(message) if request is None: return False try: response = self._makeKVPost(request, server_url) except (fetchers.HTTPFetchingError, ServerError), e: oidutil.log('check_authentication failed: %s' % (e[0],)) return False else: return self._processCheckAuthResponse(response, server_url) def _createCheckAuthRequest(self, message): """Generate a check_authentication request message given an id_res message. """ # Arguments that are always passed to the server and not # included in the signature. whitelist = ['assoc_handle', 'sig', 'signed', 'invalidate_handle'] check_args = {} for k in whitelist: val = message.getArg(OPENID_NS, k) if val is not None: check_args[k] = val signed = message.getArg(OPENID_NS, 'signed') if signed: for k in signed.split(','): val = message.getAliasedArg(k) # Signed value is missing if val is None: oidutil.log('Missing signed field %r' % (k,)) return None check_args[k] = val check_args['mode'] = 'check_authentication' return Message.fromOpenIDArgs(check_args) def _processCheckAuthResponse(self, response, server_url): """Process the response message from a check_authentication request, invalidating associations if requested. """ is_valid = response.getArg(OPENID_NS, 'is_valid', 'false') invalidate_handle = response.getArg(OPENID_NS, 'invalidate_handle') if invalidate_handle is not None: oidutil.log( 'Received "invalidate_handle" from server %s' % (server_url,)) if self.store is None: oidutil.log('Unexpectedly got invalidate_handle without ' 'a store!') else: self.store.removeAssociation(server_url, invalidate_handle) if is_valid == 'true': return True else: oidutil.log('Server responds that checkAuth call is not valid') return False def _getAssociation(self, endpoint): """Get an association for the endpoint's server_url. First try seeing if we have a good association in the store. If we do not, then attempt to negotiate an association with the server. If we negotiate a good association, it will get stored. @returns: A valid association for the endpoint's server_url or None @rtype: openid.association.Association or NoneType """ assoc = self.store.getAssociation(endpoint.server_url) if assoc is None or assoc.expiresIn <= 0: assoc = self._negotiateAssociation(endpoint) if assoc is not None: self.store.storeAssociation(endpoint.server_url, assoc) return assoc def _negotiateAssociation(self, endpoint): """Make association requests to the server, attempting to create a new association. @returns: a new association object @rtype: L{openid.association.Association} """ # Get our preferred session/association type from the negotiatior. assoc_type, session_type = self.negotiator.getAllowedType() try: assoc = self._requestAssociation( endpoint, assoc_type, session_type) except ServerError, why: supportedTypes = self._extractSupportedAssociationType(why, endpoint, assoc_type) if supportedTypes is not None: assoc_type, session_type = supportedTypes # Attempt to create an association from the assoc_type # and session_type that the server told us it # supported. try: assoc = self._requestAssociation( endpoint, assoc_type, session_type) except ServerError, why: # Do not keep trying, since it rejected the # association type that it told us to use. oidutil.log('Server %s refused its suggested association ' 'type: session_type=%s, assoc_type=%s' % (endpoint.server_url, session_type, assoc_type)) return None else: return assoc else: return assoc def _extractSupportedAssociationType(self, server_error, endpoint, assoc_type): """Handle ServerErrors resulting from association requests. @returns: If server replied with an C{unsupported-type} error, return a tuple of supported C{association_type}, C{session_type}. Otherwise logs the error and returns None. @rtype: tuple or None """ # Any error message whose code is not 'unsupported-type' # should be considered a total failure. if server_error.error_code != 'unsupported-type' or \ server_error.message.isOpenID1(): oidutil.log( 'Server error when requesting an association from %r: %s' % (endpoint.server_url, server_error.error_text)) return None # The server didn't like the association/session type # that we sent, and it sent us back a message that # might tell us how to handle it. oidutil.log( 'Unsupported association type %s: %s' % (assoc_type, server_error.error_text,)) # Extract the session_type and assoc_type from the # error message assoc_type = server_error.message.getArg(OPENID_NS, 'assoc_type') session_type = server_error.message.getArg(OPENID_NS, 'session_type') if assoc_type is None or session_type is None: oidutil.log('Server responded with unsupported association ' 'session but did not supply a fallback.') return None elif not self.negotiator.isAllowed(assoc_type, session_type): fmt = ('Server sent unsupported session/association type: ' 'session_type=%s, assoc_type=%s') oidutil.log(fmt % (session_type, assoc_type)) return None else: return assoc_type, session_type def _requestAssociation(self, endpoint, assoc_type, session_type): """Make and process one association request to this endpoint's OP endpoint URL. @returns: An association object or None if the association processing failed. @raises ServerError: when the remote OpenID server returns an error. """ assoc_session, args = self._createAssociateRequest( endpoint, assoc_type, session_type) try: response = self._makeKVPost(args, endpoint.server_url) except fetchers.HTTPFetchingError, why: oidutil.log('openid.associate request failed: %s' % (why[0],)) return None try: assoc = self._extractAssociation(response, assoc_session) except KeyError, why: oidutil.log('Missing required parameter in response from %s: %s' % (endpoint.server_url, why[0])) return None except ProtocolError, why: oidutil.log('Protocol error parsing response from %s: %s' % ( endpoint.server_url, why[0])) return None else: return assoc def _createAssociateRequest(self, endpoint, assoc_type, session_type): """Create an association request for the given assoc_type and session_type. @param endpoint: The endpoint whose server_url will be queried. The important bit about the endpoint is whether it's in compatiblity mode (OpenID 1.1) @param assoc_type: The association type that the request should ask for. @type assoc_type: str @param session_type: The session type that should be used in the association request. The session_type is used to create an association session object, and that session object is asked for any additional fields that it needs to add to the request. @type session_type: str @returns: a pair of the association session object and the request message that will be sent to the server. @rtype: (association session type (depends on session_type), openid.message.Message) """ session_type_class = self.session_types[session_type] assoc_session = session_type_class() args = { 'mode': 'associate', 'assoc_type': assoc_type, } if not endpoint.compatibilityMode(): args['ns'] = OPENID2_NS # Leave out the session type if we're in compatibility mode # *and* it's no-encryption. if (not endpoint.compatibilityMode() or assoc_session.session_type != 'no-encryption'): args['session_type'] = assoc_session.session_type args.update(assoc_session.getRequest()) message = Message.fromOpenIDArgs(args) return assoc_session, message def _getOpenID1SessionType(self, assoc_response): """Given an association response message, extract the OpenID 1.X session type. This function mostly takes care of the 'no-encryption' default behavior in OpenID 1. If the association type is plain-text, this function will return 'no-encryption' @returns: The association type for this message @rtype: str @raises KeyError: when the session_type field is absent. """ # If it's an OpenID 1 message, allow session_type to default # to None (which signifies "no-encryption") session_type = assoc_response.getArg(OPENID1_NS, 'session_type') # Handle the differences between no-encryption association # respones in OpenID 1 and 2: # no-encryption is not really a valid session type for # OpenID 1, but we'll accept it anyway, while issuing a # warning. if session_type == 'no-encryption': oidutil.log('WARNING: OpenID server sent "no-encryption"' 'for OpenID 1.X') # Missing or empty session type is the way to flag a # 'no-encryption' response. Change the session type to # 'no-encryption' so that it can be handled in the same # way as OpenID 2 'no-encryption' respones. elif session_type == '' or session_type is None: session_type = 'no-encryption' return session_type def _extractAssociation(self, assoc_response, assoc_session): """Attempt to extract an association from the response, given the association response message and the established association session. @param assoc_response: The association response message from the server @type assoc_response: openid.message.Message @param assoc_session: The association session object that was used when making the request @type assoc_session: depends on the session type of the request @raises ProtocolError: when data is malformed @raises KeyError: when a field is missing @rtype: openid.association.Association """ # Extract the common fields from the response, raising an # exception if they are not found assoc_type = assoc_response.getArg( OPENID_NS, 'assoc_type', no_default) assoc_handle = assoc_response.getArg( OPENID_NS, 'assoc_handle', no_default) # expires_in is a base-10 string. The Python parsing will # accept literals that have whitespace around them and will # accept negative values. Neither of these are really in-spec, # but we think it's OK to accept them. expires_in_str = assoc_response.getArg( OPENID_NS, 'expires_in', no_default) try: expires_in = int(expires_in_str) except ValueError, why: raise ProtocolError('Invalid expires_in field: %s' % (why[0],)) # OpenID 1 has funny association session behaviour. if assoc_response.isOpenID1(): session_type = self._getOpenID1SessionType(assoc_response) else: session_type = assoc_response.getArg( OPENID2_NS, 'session_type', no_default) # Session type mismatch if assoc_session.session_type != session_type: if (assoc_response.isOpenID1() and session_type == 'no-encryption'): # In OpenID 1, any association request can result in a # 'no-encryption' association response. Setting # assoc_session to a new no-encryption session should # make the rest of this function work properly for # that case. assoc_session = PlainTextConsumerSession() else: # Any other mismatch, regardless of protocol version # results in the failure of the association session # altogether. fmt = 'Session type mismatch. Expected %r, got %r' message = fmt % (assoc_session.session_type, session_type) raise ProtocolError(message) # Make sure assoc_type is valid for session_type if assoc_type not in assoc_session.allowed_assoc_types: fmt = 'Unsupported assoc_type for session %s returned: %s' raise ProtocolError(fmt % (assoc_session.session_type, assoc_type)) # Delegate to the association session to extract the secret # from the response, however is appropriate for that session # type. try: secret = assoc_session.extractSecret(assoc_response) except ValueError, why: fmt = 'Malformed response for %s session: %s' raise ProtocolError(fmt % (assoc_session.session_type, why[0])) return Association.fromExpiresIn( expires_in, assoc_handle, secret, assoc_type) class AuthRequest(object): """An object that holds the state necessary for generating an OpenID authentication request. This object holds the association with the server and the discovered information with which the request will be made. It is separate from the consumer because you may wish to add things to the request before sending it on its way to the server. It also has serialization options that let you encode the authentication request as a URL or as a form POST. """ def __init__(self, endpoint, assoc): """ Creates a new AuthRequest object. This just stores each argument in an appropriately named field. Users of this library should not create instances of this class. Instances of this class are created by the library when needed. """ self.assoc = assoc self.endpoint = endpoint self.return_to_args = {} self.message = Message() self.message.setOpenIDNamespace(endpoint.preferredNamespace()) self._anonymous = False def setAnonymous(self, is_anonymous): """Set whether this request should be made anonymously. If a request is anonymous, the identifier will not be sent in the request. This is only useful if you are making another kind of request with an extension in this request. Anonymous requests are not allowed when the request is made with OpenID 1. @raises ValueError: when attempting to set an OpenID1 request as anonymous """ if is_anonymous and self.message.isOpenID1(): raise ValueError('OpenID 1 requests MUST include the ' 'identifier in the request') else: self._anonymous = is_anonymous def addExtension(self, extension_request): """Add an extension to this checkid request. @param extension_request: An object that implements the extension interface for adding arguments to an OpenID message. """ extension_request.toMessage(self.message) def addExtensionArg(self, namespace, key, value): """Add an extension argument to this OpenID authentication request. Use caution when adding arguments, because they will be URL-escaped and appended to the redirect URL, which can easily get quite long. @param namespace: The namespace for the extension. For example, the simple registration extension uses the namespace C{sreg}. @type namespace: str @param key: The key within the extension namespace. For example, the nickname field in the simple registration extension's key is C{nickname}. @type key: str @param value: The value to provide to the server for this argument. @type value: str """ self.message.setArg(namespace, key, value) def getMessage(self, realm, return_to=None, immediate=False): """Produce a L{openid.message.Message} representing this request. @param realm: The URL (or URL pattern) that identifies your web site to the user when she is authorizing it. @type realm: str @param return_to: The URL that the OpenID provider will send the user back to after attempting to verify her identity. Not specifying a return_to URL means that the user will not be returned to the site issuing the request upon its completion. @type return_to: str @param immediate: If True, the OpenID provider is to send back a response immediately, useful for behind-the-scenes authentication attempts. Otherwise the OpenID provider may engage the user before providing a response. This is the default case, as the user may need to provide credentials or approve the request before a positive response can be sent. @type immediate: bool @returntype: L{openid.message.Message} """ if return_to: return_to = oidutil.appendArgs(return_to, self.return_to_args) elif immediate: raise ValueError( '"return_to" is mandatory when using "checkid_immediate"') elif self.message.isOpenID1(): raise ValueError('"return_to" is mandatory for OpenID 1 requests') elif self.return_to_args: raise ValueError('extra "return_to" arguments were specified, ' 'but no return_to was specified') if immediate: mode = 'checkid_immediate' else: mode = 'checkid_setup' message = self.message.copy() if message.isOpenID1(): realm_key = 'trust_root' else: realm_key = 'realm' message.updateArgs(OPENID_NS, { realm_key:realm, 'mode':mode, 'return_to':return_to, }) if not self._anonymous: if self.endpoint.isOPIdentifier(): # This will never happen when we're in compatibility # mode, as long as isOPIdentifier() returns False # whenever preferredNamespace() returns OPENID1_NS. claimed_id = request_identity = IDENTIFIER_SELECT else: request_identity = self.endpoint.getLocalID() claimed_id = self.endpoint.claimed_id # This is true for both OpenID 1 and 2 message.setArg(OPENID_NS, 'identity', request_identity) if message.isOpenID2(): message.setArg(OPENID2_NS, 'claimed_id', claimed_id) if self.assoc: message.setArg(OPENID_NS, 'assoc_handle', self.assoc.handle) assoc_log_msg = 'with assocication %s' % (self.assoc.handle,) else: assoc_log_msg = 'using stateless mode.' oidutil.log("Generated %s request to %s %s" % (mode, self.endpoint.server_url, assoc_log_msg)) return message def redirectURL(self, realm, return_to=None, immediate=False): """Returns a URL with an encoded OpenID request. The resulting URL is the OpenID provider's endpoint URL with parameters appended as query arguments. You should redirect the user agent to this URL. OpenID 2.0 endpoints also accept POST requests, see C{L{shouldSendRedirect}} and C{L{formMarkup}}. @param realm: The URL (or URL pattern) that identifies your web site to the user when she is authorizing it. @type realm: str @param return_to: The URL that the OpenID provider will send the user back to after attempting to verify her identity. Not specifying a return_to URL means that the user will not be returned to the site issuing the request upon its completion. @type return_to: str @param immediate: If True, the OpenID provider is to send back a response immediately, useful for behind-the-scenes authentication attempts. Otherwise the OpenID provider may engage the user before providing a response. This is the default case, as the user may need to provide credentials or approve the request before a positive response can be sent. @type immediate: bool @returns: The URL to redirect the user agent to. @returntype: str """ message = self.getMessage(realm, return_to, immediate) return message.toURL(self.endpoint.server_url) def formMarkup(self, realm, return_to=None, immediate=False, form_tag_attrs=None): """Get html for a form to submit this request to the IDP. @param form_tag_attrs: Dictionary of attributes to be added to the form tag. 'accept-charset' and 'enctype' have defaults that can be overridden. If a value is supplied for 'action' or 'method', it will be replaced. @type form_tag_attrs: {unicode: unicode} """ message = self.getMessage(realm, return_to, immediate) return message.toFormMarkup(self.endpoint.server_url, form_tag_attrs) def shouldSendRedirect(self): """Should this OpenID authentication request be sent as a HTTP redirect or as a POST (form submission)? @rtype: bool """ return self.endpoint.compatibilityMode() FAILURE = 'failure' SUCCESS = 'success' CANCEL = 'cancel' SETUP_NEEDED = 'setup_needed' class Response(object): status = None def setEndpoint(self, endpoint): self.endpoint = endpoint if endpoint is None: self.identity_url = None else: self.identity_url = endpoint.claimed_id def getDisplayIdentifier(self): """Return the display identifier for this response. """ if self.endpoint is not None: return self.endpoint.getDisplayIdentifier() return None class SuccessResponse(Response): """A response with a status of SUCCESS. Indicates that this request is a successful acknowledgement from the OpenID server that the supplied URL is, indeed controlled by the requesting agent. @ivar identity_url: The identity URL that has been authenticated @ivar endpoint: The endpoint that authenticated the identifier. You may access other discovered information related to this endpoint, such as the CanonicalID of an XRI, through this object. @type endpoint: L{OpenIDServiceEndpoint<openid.consumer.discover.OpenIDServiceEndpoint>} @ivar signed_fields: The arguments in the server's response that were signed and verified. @cvar status: SUCCESS """ status = SUCCESS def __init__(self, endpoint, message, signed_fields=None): # Don't use setEndpoint, because endpoint should never be None # for a successfull transaction. self.endpoint = endpoint self.identity_url = endpoint.claimed_id self.message = message if signed_fields is None: signed_fields = [] self.signed_fields = signed_fields def isOpenID1(self): """Was this authentication response an OpenID 1 authentication response? """ return self.message.isOpenID1() def isSigned(self, ns_uri, ns_key): """Return whether a particular key is signed, regardless of its namespace alias """ return self.message.getKey(ns_uri, ns_key) in self.signed_fields def getSigned(self, ns_uri, ns_key, default=None): """Return the specified signed field if available, otherwise return default """ if self.isSigned(ns_uri, ns_key): return self.message.getArg(ns_uri, ns_key, default) else: return default def getSignedNS(self, ns_uri): """Get signed arguments from the response message. Return a dict of all arguments in the specified namespace. If any of the arguments are not signed, return None. """ msg_args = self.message.getArgs(ns_uri) for key in msg_args.iterkeys(): if not self.isSigned(ns_uri, key): oidutil.log("SuccessResponse.getSignedNS: (%s, %s) not signed." % (ns_uri, key)) return None return msg_args def extensionResponse(self, namespace_uri, require_signed): """Return response arguments in the specified namespace. @param namespace_uri: The namespace URI of the arguments to be returned. @param require_signed: True if the arguments should be among those signed in the response, False if you don't care. If require_signed is True and the arguments are not signed, return None. """ if require_signed: return self.getSignedNS(namespace_uri) else: return self.message.getArgs(namespace_uri) def getReturnTo(self): """Get the openid.return_to argument from this response. This is useful for verifying that this request was initiated by this consumer. @returns: The return_to URL supplied to the server on the initial request, or C{None} if the response did not contain an C{openid.return_to} argument. @returntype: str """ return self.getSigned(OPENID_NS, 'return_to') def __eq__(self, other): return ( (self.endpoint == other.endpoint) and (self.identity_url == other.identity_url) and (self.message == other.message) and (self.signed_fields == other.signed_fields) and (self.status == other.status)) def __ne__(self, other): return not (self == other) def __repr__(self): return '<%s.%s id=%r signed=%r>' % ( self.__class__.__module__, self.__class__.__name__, self.identity_url, self.signed_fields) class FailureResponse(Response): """A response with a status of FAILURE. Indicates that the OpenID protocol has failed. This could be locally or remotely triggered. @ivar identity_url: The identity URL for which authenitcation was attempted, if it can be determined. Otherwise, None. @ivar message: A message indicating why the request failed, if one is supplied. otherwise, None. @cvar status: FAILURE """ status = FAILURE def __init__(self, endpoint, message=None, contact=None, reference=None): self.setEndpoint(endpoint) self.message = message self.contact = contact self.reference = reference def __repr__(self): return "<%s.%s id=%r message=%r>" % ( self.__class__.__module__, self.__class__.__name__, self.identity_url, self.message) class CancelResponse(Response): """A response with a status of CANCEL. Indicates that the user cancelled the OpenID authentication request. @ivar identity_url: The identity URL for which authenitcation was attempted, if it can be determined. Otherwise, None. @cvar status: CANCEL """ status = CANCEL def __init__(self, endpoint): self.setEndpoint(endpoint) class SetupNeededResponse(Response): """A response with a status of SETUP_NEEDED. Indicates that the request was in immediate mode, and the server is unable to authenticate the user without further interaction. @ivar identity_url: The identity URL for which authenitcation was attempted. @ivar setup_url: A URL that can be used to send the user to the server to set up for authentication. The user should be redirected in to the setup_url, either in the current window or in a new browser window. C{None} in OpenID 2.0. @cvar status: SETUP_NEEDED """ status = SETUP_NEEDED def __init__(self, endpoint, setup_url=None): self.setEndpoint(endpoint) self.setup_url = setup_url
wtanaka/google-app-engine-django-openid
src/openid/consumer/consumer.py
Python
gpl-3.0
75,015
#!/usr/bin/env python # Only needed until we can enable a pylint test for this. We may have to write # one or add it to another existing test (like the one to warn on inappropriate # variable names). Adding to an existing test may be hard as we may have many # other things that are not compliant with that test. import os import re import sys def main(): skip = set([ 'test/sanity/code-smell/%s' % os.path.basename(__file__), # These files currently use _ as a variable. Fix them and then remove them # from this list. Note that we're not sure if we'll translate module return # values. If we decide never to do that, then we can stop checking for those. 'contrib/inventory/gce.py', 'lib/ansible/cli/console.py', 'lib/ansible/compat/selectors/_selectors2.py', 'lib/ansible/executor/playbook_executor.py', 'lib/ansible/executor/task_queue_manager.py', 'lib/ansible/module_utils/facts/network/linux.py', 'lib/ansible/module_utils/urls.py', 'lib/ansible/modules/cloud/amazon/data_pipeline.py', 'lib/ansible/modules/cloud/amazon/ec2_group_facts.py', 'lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py', 'lib/ansible/modules/cloud/amazon/ec2_vpc_vpn.py', 'lib/ansible/modules/cloud/amazon/efs.py', 'lib/ansible/modules/cloud/amazon/efs_facts.py', 'lib/ansible/modules/cloud/amazon/kinesis_stream.py', 'lib/ansible/modules/cloud/amazon/route53_zone.py', 'lib/ansible/modules/cloud/amazon/s3_sync.py', 'lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py', 'lib/ansible/modules/cloud/google/gce.py', 'lib/ansible/modules/cloud/google/gce_eip.py', 'lib/ansible/modules/cloud/google/gce_img.py', 'lib/ansible/modules/cloud/google/gce_instance_template.py', 'lib/ansible/modules/cloud/google/gce_lb.py', 'lib/ansible/modules/cloud/google/gce_mig.py', 'lib/ansible/modules/cloud/google/gce_net.py', 'lib/ansible/modules/cloud/google/gce_pd.py', 'lib/ansible/modules/cloud/google/gce_snapshot.py', 'lib/ansible/modules/cloud/google/gce_tag.py', 'lib/ansible/modules/cloud/google/gcp_backend_service.py', 'lib/ansible/modules/cloud/google/gcp_healthcheck.py', 'lib/ansible/modules/cloud/lxc/lxc_container.py', 'lib/ansible/modules/files/copy.py', 'lib/ansible/modules/files/patch.py', 'lib/ansible/modules/files/synchronize.py', 'lib/ansible/modules/monitoring/statusio_maintenance.py', 'lib/ansible/modules/monitoring/zabbix/zabbix_maintenance.py', 'lib/ansible/modules/net_tools/basics/uri.py', 'lib/ansible/modules/network/cloudengine/ce_acl.py', 'lib/ansible/modules/network/cloudengine/ce_command.py', 'lib/ansible/modules/network/cloudengine/ce_dldp_interface.py', 'lib/ansible/modules/network/cloudengine/ce_mlag_interface.py', 'lib/ansible/modules/network/cloudvision/cv_server_provision.py', 'lib/ansible/modules/network/f5/bigip_remote_syslog.py', 'lib/ansible/modules/network/illumos/dladm_etherstub.py', 'lib/ansible/modules/network/illumos/dladm_iptun.py', 'lib/ansible/modules/network/illumos/dladm_linkprop.py', 'lib/ansible/modules/network/illumos/dladm_vlan.py', 'lib/ansible/modules/network/illumos/dladm_vnic.py', 'lib/ansible/modules/network/illumos/flowadm.py', 'lib/ansible/modules/network/illumos/ipadm_addr.py', 'lib/ansible/modules/network/illumos/ipadm_addrprop.py', 'lib/ansible/modules/network/illumos/ipadm_if.py', 'lib/ansible/modules/network/illumos/ipadm_ifprop.py', 'lib/ansible/modules/network/illumos/ipadm_prop.py', 'lib/ansible/modules/network/vyos/vyos_command.py', 'lib/ansible/modules/packaging/language/pip.py', 'lib/ansible/modules/packaging/os/yum.py', 'lib/ansible/modules/source_control/git.py', 'lib/ansible/modules/system/alternatives.py', 'lib/ansible/modules/system/beadm.py', 'lib/ansible/modules/system/cronvar.py', 'lib/ansible/modules/system/dconf.py', 'lib/ansible/modules/system/filesystem.py', 'lib/ansible/modules/system/gconftool2.py', 'lib/ansible/modules/system/interfaces_file.py', 'lib/ansible/modules/system/iptables.py', 'lib/ansible/modules/system/java_cert.py', 'lib/ansible/modules/system/lvg.py', 'lib/ansible/modules/system/lvol.py', 'lib/ansible/modules/system/parted.py', 'lib/ansible/modules/system/timezone.py', 'lib/ansible/modules/system/ufw.py', 'lib/ansible/modules/utilities/logic/wait_for.py', 'lib/ansible/modules/web_infrastructure/rundeck_acl_policy.py', 'lib/ansible/parsing/vault/__init__.py', 'lib/ansible/playbook/base.py', 'lib/ansible/playbook/helpers.py', 'lib/ansible/playbook/role/__init__.py', 'lib/ansible/playbook/taggable.py', 'lib/ansible/plugins/callback/hipchat.py', 'lib/ansible/plugins/connection/lxc.py', 'lib/ansible/plugins/filter/core.py', 'lib/ansible/plugins/lookup/sequence.py', 'lib/ansible/plugins/strategy/__init__.py', 'lib/ansible/plugins/strategy/linear.py', 'test/legacy/cleanup_gce.py', 'test/legacy/gce_credentials.py', 'test/runner/lib/cloud/cs.py', 'test/runner/lib/core_ci.py', 'test/runner/lib/delegation.py', 'test/runner/lib/docker_util.py', 'test/runner/lib/executor.py', 'test/runner/lib/http.py', 'test/runner/lib/import_analysis.py', 'test/runner/lib/manage_ci.py', 'test/runner/lib/target.py', 'test/runner/lib/util.py', 'test/sanity/import/importer.py', 'test/sanity/validate-modules/main.py', 'test/units/executor/test_play_iterator.py', 'test/units/module_utils/basic/test_run_command.py', 'test/units/modules/cloud/amazon/test_ec2_vpc_nat_gateway.py', 'test/units/modules/cloud/amazon/test_ec2_vpc_vpn.py', 'test/units/modules/system/interfaces_file/test_interfaces_file.py', ]) for path in sys.argv[1:] or sys.stdin.read().splitlines(): if path in skip: continue with open(path, 'r') as path_fd: for line, text in enumerate(path_fd.readlines()): match = re.search(r'(?: |[^C]\()(_)(?:[ ,)])', text) if match: print('%s:%d:%d: use `dummy` instead of `_` for a variable name' % ( path, line + 1, match.start(1) + 1)) if __name__ == '__main__': main()
Jorge-Rodriguez/ansible
test/sanity/code-smell/no-underscore-variable.py
Python
gpl-3.0
6,781
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Getting Things GNOME! - a personal organizer for the GNOME desktop # Copyright (c) 2008-2014 - Lionel Dricot & Bertrand Rousseau # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. # ----------------------------------------------------------------------------- from unittest import TestCase from GTG.tools.networkmanager import is_connection_up class TestNetworkManager(TestCase): def test_is_connection_up_and_doesnt_throw_exception(self): self.assertIn(is_connection_up(), [True, False])
shtrom/gtg
tests/tools/test_networkmanager.py
Python
gpl-3.0
1,196
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Nandor Sivok <nandor@gawker.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: netscaler version_added: "1.1" short_description: Manages Citrix NetScaler entities description: - Manages Citrix NetScaler server and service entities. options: nsc_host: description: - hostname or ip of your netscaler required: true default: null aliases: [] nsc_protocol: description: - protocol used to access netscaler required: false default: https aliases: [] user: description: - username required: true default: null aliases: [] password: description: - password required: true default: null aliases: [] action: description: - the action you want to perform on the entity required: false default: disable choices: ["enable", "disable"] aliases: [] name: description: - name of the entity required: true default: hostname aliases: [] type: description: - type of the entity required: false default: server choices: ["server", "service"] aliases: [] validate_certs: description: - If C(no), SSL certificates for the target url will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: 'yes' choices: ['yes', 'no'] requirements: [] author: "Nandor Sivok (@dominis)" ''' EXAMPLES = ''' # Disable the server - netscaler: nsc_host: nsc.example.com user: apiuser password: apipass # Enable the server - netscaler: nsc_host: nsc.example.com user: apiuser password: apipass action: enable # Disable the service local:8080 - netscaler: nsc_host: nsc.example.com user: apiuser password: apipass name: 'local:8080' type: service action: disable ''' import base64 import json import socket import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils._text import to_native from ansible.module_utils.urls import fetch_url class netscaler(object): _nitro_base_url = '/nitro/v1/' def __init__(self, module): self.module = module def http_request(self, api_endpoint, data_json={}): request_url = self._nsc_protocol + '://' + self._nsc_host + self._nitro_base_url + api_endpoint data_json = urlencode(data_json) if not len(data_json): data_json = None auth = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip() headers = { 'Authorization': 'Basic %s' % auth, 'Content-Type' : 'application/x-www-form-urlencoded', } response, info = fetch_url(self.module, request_url, data=data_json, headers=headers) return json.load(response) def prepare_request(self, action): resp = self.http_request( 'config', { "object": { "params": {"action": action}, self._type: {"name": self._name} } } ) return resp def core(module): n = netscaler(module) n._nsc_host = module.params.get('nsc_host') n._nsc_user = module.params.get('user') n._nsc_pass = module.params.get('password') n._nsc_protocol = module.params.get('nsc_protocol') n._name = module.params.get('name') n._type = module.params.get('type') action = module.params.get('action') r = n.prepare_request(action) return r['errorcode'], r def main(): module = AnsibleModule( argument_spec = dict( nsc_host = dict(required=True), nsc_protocol = dict(default='https'), user = dict(required=True), password = dict(required=True, no_log=True), action = dict(default='enable', choices=['enable','disable']), name = dict(default=socket.gethostname()), type = dict(default='server', choices=['service', 'server']), validate_certs=dict(default='yes', type='bool'), ) ) rc = 0 try: rc, result = core(module) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) if rc != 0: module.fail_json(rc=rc, msg=result) else: result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main()
nrwahl2/ansible
lib/ansible/modules/network/citrix/netscaler.py
Python
gpl-3.0
4,912
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2016, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ A simple tutorial that shows some features of the Spatial Pooler. The following program has the purpose of presenting some basic properties of the Spatial Pooler. It reproduces Figs. 5, 7 and 9 from this paper: http://arxiv.org/abs/1505.02142 To learn more about the Spatial Pooler have a look at BAMI: http://numenta.com/biological-and-machine-intelligence/ or at its class reference in the NuPIC documentation: http://numenta.org/docs/nupic/classnupic_1_1research_1_1spatial__pooler_1_1_spatial_pooler.html The purpose of the Spatial Pooler is to create a sparse representation of its inputs in such a way that similar inputs will be mapped to similar sparse representations. Thus, the Spatial Pooler should exhibit some resilience to noise in its input. """ import matplotlib import numpy as np import random matplotlib.use('Agg') import matplotlib.pyplot as plt from nupic.algorithms.spatial_pooler import SpatialPooler as SP def percentOverlap(x1, x2, size): """ Computes the percentage of overlap between vectors x1 and x2. @param x1 (array) binary vector @param x2 (array) binary vector @param size (int) length of binary vectors @return percentOverlap (float) percentage overlap between x1 and x2 """ nonZeroX1 = np.count_nonzero(x1) nonZeroX2 = np.count_nonzero(x2) minX1X2 = min(nonZeroX1, nonZeroX2) percentOverlap = 0 if minX1X2 > 0: percentOverlap = float(np.dot(x1, x2))/float(minX1X2) return percentOverlap def corruptVector(vector, noiseLevel): """ Corrupts a binary vector by inverting noiseLevel percent of its bits. @param vector (array) binary vector to be corrupted @param noiseLevel (float) amount of noise to be applied on the vector. """ size = len(vector) for i in range(size): rnd = random.random() if rnd < noiseLevel: if vector[i] == 1: vector[i] = 0 else: vector[i] = 1 def resetVector(x1, x2): """ Copies the contents of vector x1 into vector x2. @param x1 (array) binary vector to be copied @param x2 (array) binary vector where x1 is copied """ size = len(x1) for i in range(size): x2[i] = x1[i] random.seed(1) uintType = "uint32" inputDimensions = (1000,1) columnDimensions = (2048,1) inputSize = np.array(inputDimensions).prod() columnNumber = np.array(columnDimensions).prod() inputArray = np.zeros(inputSize, dtype=uintType) for i in range(inputSize): inputArray[i] = random.randrange(2) activeCols = np.zeros(columnNumber, dtype=uintType) sp = SP(inputDimensions, columnDimensions, potentialRadius = int(0.5*inputSize), numActiveColumnsPerInhArea = int(0.02*columnNumber), globalInhibition = True, seed = 1, synPermActiveInc = 0.01, synPermInactiveDec = 0.008 ) # Part 1: # ------- # A column connects to a subset of the input vector (specified # by both the potentialRadius and potentialPct). The overlap score # for a column is the number of connections to the input that become # active when presented with a vector. When learning is 'on' in the SP, # the active connections are reinforced, whereas those inactive are # depressed (according to parameters synPermActiveInc and synPermInactiveDec. # In order for the SP to create a sparse representation of the input, it # will select a small percentage (usually 2%) of its most active columns, # ie. columns with the largest overlap score. # In this first part, we will create a histogram showing the overlap scores # of the Spatial Pooler (SP) after feeding it with a random binary # input. As well, the histogram will show the scores of those columns # that are chosen to build the sparse representation of the input. sp.compute(inputArray, False, activeCols) overlaps = sp.getOverlaps() activeColsScores = [] for i in activeCols.nonzero(): activeColsScores.append(overlaps[i]) print "" print "---------------------------------" print "Figure 1 shows an histogram of the overlap scores" print "from all the columns in the spatial pooler, as well as the" print "overlap scores of those columns that were selected to build a" print "sparse representation of the input (shown in green)." print "The SP chooses 2% of the columns with the largest overlap score" print "to make such sparse representation." print "---------------------------------" print "" bins = np.linspace(min(overlaps), max(overlaps), 28) plt.hist(overlaps, bins, alpha=0.5, label='All cols') plt.hist(activeColsScores, bins, alpha=0.5, label='Active cols') plt.legend(loc='upper right') plt.xlabel("Overlap scores") plt.ylabel("Frequency") plt.title("Figure 1: Column overlap of a SP with random input.") plt.savefig("figure_1") plt.close() # Part 2a: # ------- # The input overlap between two binary vectors is defined as their dot product. In order # to normalize this value we divide by the minimum number of active inputs # (in either vector). This means we are considering the sparser vector as reference. # Two identical binary vectors will have an input overlap of 1, whereas two completely # different vectors (one is the logical NOT of the other) will yield an overlap of 0. # In this section we will see how the input overlap of two binary vectors decrease as we # add noise to one of them. inputX1 = np.zeros(inputSize, dtype=uintType) inputX2 = np.zeros(inputSize, dtype=uintType) outputX1 = np.zeros(columnNumber, dtype=uintType) outputX2 = np.zeros(columnNumber, dtype=uintType) for i in range(inputSize): inputX1[i] = random.randrange(2) x = [] y = [] for noiseLevel in np.arange(0, 1.1, 0.1): resetVector(inputX1, inputX2) corruptVector(inputX2, noiseLevel) x.append(noiseLevel) y.append(percentOverlap(inputX1, inputX2, inputSize)) print "" print "---------------------------------" print "Figure 2 shows the input overlap between 2 identical binary" print "vectors in function of the noise applied to one of them." print "0 noise level means that the vector remains the same, whereas" print "1 means that the vector is the logical negation of the original" print "vector." print "The relationship between overlap and noise level is practically" print "linear and monotonically decreasing." print "---------------------------------" print "" plt.plot(x, y) plt.xlabel("Noise level") plt.ylabel("Input overlap") plt.title("Figure 2: Input overlap between 2 identical vectors in function of noiseLevel.") plt.savefig("figure_2") plt.close() # Part 2b: # ------- # The output overlap between two binary input vectors is the overlap of the # columns that become active once they are fed to the SP. In this part we # turn learning off, and observe the output of the SP as we input two binary # input vectors with varying level of noise. # Starting from two identical vectors (that yield the same active columns) # we would expect that as we add noise to one of them their output overlap # decreases. # In this part we will show how the output overlap behaves in function of the # input overlap between two vectors. # Even with an untrained spatial pooler, we see some noise resilience. # Note that due to the non-linear properties of high dimensional SDRs, overlaps # greater than 10 bits, or 25% in this example, are considered significant. x = [] y = [] for noiseLevel in np.arange(0, 1.1, 0.1): resetVector(inputX1, inputX2) corruptVector(inputX2, noiseLevel) sp.compute(inputX1, False, outputX1) sp.compute(inputX2, False, outputX2) x.append(percentOverlap(inputX1, inputX2, inputSize)) y.append(percentOverlap(outputX1, outputX2, columnNumber)) print "" print "---------------------------------" print "Figure 3 shows the output overlap between two sparse representations" print "in function of their input overlap. Starting from two identical binary vectors" print "(which yield the same active columns) we add noise two one of them" print "feed it to the SP, and estimate the output overlap between the two" print "representations in terms of the common active columns between them." print "As expected, as the input overlap decrease, so does the output overlap." print "---------------------------------" print "" plt.plot(x, y) plt.xlabel("Input overlap") plt.ylabel("Output overlap") plt.title("Figure 3: Output overlap in function of input overlap in a SP without training") plt.savefig("figure_3") plt.close() # Part 3: # ------- # After training, a SP can become less sensitive to noise. For this purpose, we train the SP by # turning learning on, and by exposing it to a variety of random binary vectors. # We will expose the SP to a repetition of input patterns in order to make it learn and distinguish # them once learning is over. This will result in robustness to noise in the inputs. # In this section we will reproduce the plot in the last section after the SP has learned a series # of inputs. Here we will see how the SP exhibits increased resilience to noise after learning. # We will present 10 random vectors to the SP, and repeat this 30 times. # Later you can try changing the number of times we do this to see how it changes the last plot. # Then, you could also modify the number of examples to see how the SP behaves. # Is there a relationship between the number of examples and the number of times that # we expose them to the SP? numExamples = 10 inputVectors = np.zeros((numExamples, inputSize), dtype=uintType) outputColumns = np.zeros((numExamples, columnNumber), dtype=uintType) for i in range(numExamples): for j in range(inputSize): inputVectors[i][j] = random.randrange(2) # This is the number of times that we will present the input vectors to the SP epochs = 30 for _ in range(epochs): for i in range(numExamples): #Feed the examples to the SP sp.compute(inputVectors[i][:], True, outputColumns[i][:]) inputVectorsCorrupted = np.zeros((numExamples, inputSize), dtype=uintType) outputColumnsCorrupted = np.zeros((numExamples, columnNumber), dtype=uintType) x = [] y = [] # We will repeat the experiment in the last section for only one input vector # in the set of input vectors for noiseLevel in np.arange(0, 1.1, 0.1): resetVector(inputVectors[0][:], inputVectorsCorrupted[0][:]) corruptVector(inputVectorsCorrupted[0][:], noiseLevel) sp.compute(inputVectors[0][:], False, outputColumns[0][:]) sp.compute(inputVectorsCorrupted[0][:], False, outputColumnsCorrupted[0][:]) x.append(percentOverlap(inputVectors[0][:], inputVectorsCorrupted[0][:], inputSize)) y.append(percentOverlap(outputColumns[0][:], outputColumnsCorrupted[0][:], columnNumber)) print "" print "---------------------------------" print "How robust is the SP to noise after learning?" print "Figure 4 shows again the output overlap between two binary vectors in function" print "of their input overlap. After training, the SP exhibits more robustness to noise" print "in its input, resulting in a -almost- sigmoid curve. This implies that even if a" print "previous input is presented again with a certain amount of noise its sparse" print "representation still resembles its original." print "---------------------------------" print "" plt.plot(x, y) plt.xlabel("Input overlap") plt.ylabel("Output overlap") plt.title("Figure 4: Output overlap in function of input overlap in a SP after training") plt.savefig("figure_4") plt.close() print "" print "+++++++++++++++++++++++++++++++++++++++++++++++++++" print " All images generated by this script will be saved" print " in your current working directory." print "+++++++++++++++++++++++++++++++++++++++++++++++++++" print ""
ywcui1990/nupic
examples/sp/sp_tutorial.py
Python
agpl-3.0
12,463
""" Synchronizes a mailchimp list with the students of a course. """ import itertools import logging import math import random from collections import namedtuple from itertools import chain from optparse import make_option from django.core.management.base import BaseCommand, CommandError from mailsnake import MailSnake from opaque_keys.edx.keys import CourseKey from student.models import UserProfile, unique_id_for_user BATCH_SIZE = 15000 # If you try to subscribe with too many users at once # the transaction times out on the mailchimp side. SUBSCRIBE_BATCH_SIZE = 1000 log = logging.getLogger('edx.mailchimp') FIELD_TYPES = {'EDX_ID': 'text'} class Command(BaseCommand): """ Synchronizes a mailchimp list with the students of a course. """ args = '<mailchimp_key mailchimp_list course_id>' help = 'Synchronizes a mailchimp list with the students of a course.' option_list = BaseCommand.option_list + ( make_option('--key', action='store', help='mailchimp api key'), make_option('--list', action='store', dest='list_id', help='mailchimp list id'), make_option('--course', action='store', dest='course_id', help='xmodule course_id'), make_option('--segments', action='store', dest='segments', default=0, type=int, help='number of static random segments to create'), ) def parse_options(self, options): """Parses `options` of the command.""" if not options['key']: raise CommandError('missing key') if not options['list_id']: raise CommandError('missing list id') if not options['course_id']: raise CommandError('missing course id') return (options['key'], options['list_id'], options['course_id'], options['segments']) def handle(self, *args, **options): """Synchronizes a mailchimp list with the students of a course.""" key, list_id, course_id, nsegments = self.parse_options(options) log.info('Syncronizing email list for %s', course_id) mailchimp = connect_mailchimp(key) subscribed = get_subscribed(mailchimp, list_id) unsubscribed = get_unsubscribed(mailchimp, list_id) cleaned = get_cleaned(mailchimp, list_id) non_subscribed = unsubscribed.union(cleaned) enrolled = get_enrolled_students(course_id) exclude = subscribed.union(non_subscribed) to_subscribe = get_student_data(enrolled, exclude=exclude) tag_names = set(chain.from_iterable(d.keys() for d in to_subscribe)) update_merge_tags(mailchimp, list_id, tag_names) subscribe_with_data(mailchimp, list_id, to_subscribe) enrolled_emails = set(enrolled.values_list('user__email', flat=True)) non_enrolled_emails = list(subscribed.difference(enrolled_emails)) unsubscribe(mailchimp, list_id, non_enrolled_emails) subscribed = subscribed.union(set(d['EMAIL'] for d in to_subscribe)) make_segments(mailchimp, list_id, nsegments, subscribed) def connect_mailchimp(api_key): """ Initializes connection to the mailchimp api """ mailchimp = MailSnake(api_key) result = mailchimp.ping() log.debug(result) return mailchimp def verify_list(mailchimp, list_id, course_id): """ Verifies that the given list_id corresponds to the course_id Returns boolean: whether or not course_id matches list_id """ lists = mailchimp.lists(filters={'list_id': list_id})['data'] if len(lists) != 1: log.error('incorrect list id') return False list_name = lists[0]['name'] log.debug('list name: %s', list_name) # check that we are connecting to the correct list parts = course_id.replace('_', ' ').replace('/', ' ').split() count = sum(1 for p in parts if p in list_name) if count < 3: log.info(course_id) log.info(list_name) log.error('course_id does not match list name') return False return True def get_student_data(students, exclude=None): """ Given a QuerySet of Django users, extracts id, username, and is_anonymous data. Excludes any users provided in the optional `exclude` set. Returns a list of dictionaries for each user, where the dictionary has keys 'EMAIL', 'FULLNAME', and 'EDX_ID'. """ # To speed the query, we won't retrieve the full User object, only # two of its values. The namedtuple simulates the User object. FakeUser = namedtuple('Fake', 'id username is_anonymous') # pylint: disable=invalid-name exclude = exclude if exclude else set() def make(svalue): """ Given a User value entry `svalue`, extracts the student's email and fullname, and provides a unique id for the user. Returns a dictionary with keys 'EMAIL', 'FULLNAME', and 'EDX_ID'. """ fake_user = FakeUser(svalue['user_id'], svalue['user__username'], lambda: True) entry = { 'EMAIL': svalue['user__email'], 'FULLNAME': svalue['name'].title(), 'EDX_ID': unique_id_for_user(fake_user) } return entry fields = 'user__email', 'name', 'user_id', 'user__username' values = students.values(*fields) # TODO: Since `students` is a QuerySet, can we chain a filter here that would be more # performant than calling a lambda for every user? exclude_func = lambda s: s['user__email'] in exclude return [make(s) for s in values if not exclude_func(s)] def get_enrolled_students(course_id): """ Given a course_id, returns a QuerySet of all the active students in the course. """ objects = UserProfile.objects course_key = CourseKey.from_string(course_id) students = objects.filter(user__courseenrollment__course_id=course_key, user__courseenrollment__is_active=True) return students def get_subscribed(mailchimp, list_id): """Returns a set of email addresses subscribed to `list_id`""" return get_members(mailchimp, list_id, 'subscribed') def get_unsubscribed(mailchimp, list_id): """Returns a set of email addresses that have unsubscribed from `list_id`""" return get_members(mailchimp, list_id, 'unsubscribed') def get_cleaned(mailchimp, list_id): """ Returns a set of email addresses that have been cleaned from `list_id` These email addresses may be invalid or have caused bounces, so you don't want to re-add them back to the list. """ return get_members(mailchimp, list_id, 'cleaned') def get_members(mailchimp, list_id, status): """ Given a mailchimp list id and a user status to filter on, returns all members of the mailchimp list with that status. Returns a set of email addresses. """ mc_get_members = mailchimp.listMembers members = set() for page in itertools.count(): response = mc_get_members(id=list_id, status=status, start=page, limit=BATCH_SIZE) data = response.get('data', []) if not data: break members.update(d['email'] for d in data) return members def unsubscribe(mailchimp, list_id, emails): """ Batch unsubscribe the given email addresses from the list represented by `list_id` """ batch_unsubscribe = mailchimp.listBatchUnsubscribe result = batch_unsubscribe(id=list_id, emails=emails, send_goodbye=False, delete_member=False) log.debug(result) def update_merge_tags(mailchimp, list_id, tag_names): """ This function is rather inscrutable. Given tag_names, which in this code seems to be a list of ['FULLNAME', 'EMAIL', 'EDX_ID'], we grab tags from the mailchimp list, then we verify tag_names has 'FULLNAME' and 'EMAIL' present, we get more data from mailchimp, then sync the variables up to mailchimp using `listMergeVarAdd`. The purpose of this function is unclear. """ mc_vars = mailchimp.listMergeVars(id=list_id) mc_names = set(v['name'] for v in mc_vars) mc_merge = mailchimp.listMergeVarAdd tags = [v['tag'] for v in mc_vars] for name in tag_names: tag = name_to_tag(name) # verify FULLNAME is present # TODO: Why is this under the for loop? It does nothing with the loop # variable and seems like things would work if this was executed before or # after the loop. if 'FULLNAME' not in tags: result = mc_merge(id=list_id, tag='FULLNAME', name='Full Name', options={'field_type': 'text', 'public': False}) tags.append('FULLNAME') log.debug(result) # add extra tags if not present if name not in mc_names and tag not in ['EMAIL', 'FULLNAME']: ftype = FIELD_TYPES.get(name, 'number') result = mc_merge(id=list_id, tag=tag, name=name, options={'field_type': ftype, 'public': False}) tags.append(tag) log.debug(result) def subscribe_with_data(mailchimp, list_id, user_data): """ Given user_data in the form of a list of dictionaries for each user, where the dictionary has keys 'EMAIL', 'FULLNAME', and 'EDX_ID', batch subscribe the users to the given `list_id` via a Mailchimp api method. Returns None """ format_entry = lambda e: {name_to_tag(k): v for k, v in e.iteritems()} formated_data = list(format_entry(e) for e in user_data) # send the updates in batches of a fixed size for batch in chunk(formated_data, SUBSCRIBE_BATCH_SIZE): result = mailchimp.listBatchSubscribe(id=list_id, batch=batch, double_optin=False, update_existing=True) log.debug( "Added: %s Error on: %s", result['add_count'], result['error_count'] ) def make_segments(mailchimp, list_id, count, emails): """ Segments the list of email addresses `emails` into `count` segments, if count is nonzero. For unknown historical reasons, lost to the winds of time, this is done with a random order to the email addresses. First, existing 'random_' mailchimp segments are deleted. Then, the list of emails (the whole, large list) is shuffled. Finally, the shuffled emails are chunked into `count` segments and re-uploaded to mailchimp as 'random_'-prefixed segments. """ if count > 0: # reset segments segments = mailchimp.listStaticSegments(id=list_id) for seg in segments: if seg['name'].startswith('random'): mailchimp.listStaticSegmentDel(id=list_id, seg_id=seg['id']) # shuffle and split emails emails = list(emails) random.shuffle(emails) # Why do we do this? chunk_size = int(math.ceil(float(len(emails)) / count)) chunks = list(chunk(emails, chunk_size)) # create segments and add emails for seg in xrange(count): name = 'random_{0:002}'.format(seg) seg_id = mailchimp.listStaticSegmentAdd(id=list_id, name=name) for batch in chunk(chunks[seg], BATCH_SIZE): mailchimp.listStaticSegmentMembersAdd( id=list_id, seg_id=seg_id, batch=batch ) def name_to_tag(name): """ Returns sanitized str `name`: no more than 10 characters, with spaces replaced with `_` """ if len(name) > 10: name = name[:10] return name.replace(' ', '_').strip() def chunk(elist, size): """ Generator. Yields a list of size `size` of the given list `elist`, or a shorter list if at the end of the input. """ for i in xrange(0, len(elist), size): yield elist[i:i + size]
pepeportela/edx-platform
lms/djangoapps/mailing/management/commands/mailchimp_sync_course.py
Python
agpl-3.0
12,350
# -*- coding: utf-8 -*- # Copyright(C) 2010-2011 Romain Bignon # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. import re __all__ = ['AntiSpam'] class AntiSpam(object): def check_thread(self, thread): resume = thread['title'] # Check if there is an email address in the offer. if re.match('^[\w\d\.\-_]+@[\w\d\.]+ vous offre la pos', resume): return False if thread['who']['pseudo'] == 'Ekaterina': return False return True def check_profile(self, profile): # The name of profile is in form #123456789 if profile['pseudo'] == '': return False if profile['announce'].startswith('salut! je te donne mon msn'): return False if profile['shopping_list'].startswith('cam to cam'): return False if profile['shopping_list'].startswith('je suis une femme tres tres belle et je recherche un homme qui aime le sexe'): return False if profile['shopping_list'].endswith('mmmmmmmmmmmmmmmm'): return False return True # ipaddr is not available anymore. for ipaddr in (profile['last_ip'], profile['first_ip']): if ipaddr.startswith('41.202.'): return False if ipaddr.startswith('41.250.'): return False if ipaddr.startswith('41.251.'): return False if ipaddr.startswith('41.141.'): return False if ipaddr.startswith('194.177.'): return False if ipaddr.startswith('41.85.'): return False if ipaddr.startswith('41.86.'): return False if ipaddr.startswith('196.47.'): return False if re.match('105\.13\d.*', ipaddr): return False if ipaddr in ('62.157.186.18', '198.36.222.8', '212.234.67.61', '203.193.158.210', '41.189.34.180', '41.66.12.36', '196.47.137.21', '213.136.125.122', '41.191.87.188'): return False return True def check_contact(self, contact): if contact.id == 1: return True if not self.check_profile(contact._aum_profile): return False return True # ipaddr is not available anymore. first_ip = contact.profile['info']['IPaddr'].value.split(' ')[0] last_ip = contact.profile['info']['IPaddr'].value.rstrip(')') for ipaddr in (first_ip, last_ip): if ipaddr.endswith('.afnet.net'): return False if ipaddr.endswith('.iam.net.ma'): return False if ipaddr.endswith('.amsterdam.ananoos.net'): return False if ipaddr.endswith('.tedata.net'): return False if ipaddr.endswith('kupo.fr'): return False if ipaddr.endswith('.static.virginmedia.com'): return False if ipaddr.endswith('frozenway.com'): return False if ipaddr.endswith('.rev.bgtn.net'): return False if ipaddr.endswith('real-vpn.com'): return False if ipaddr.endswith('.nl.ipodah.net'): return False if ipaddr.endswith('.wanamaroc.com'): return False if ipaddr.endswith('.ukservers.com'): return False if ipaddr.endswith('.startdedicated.com'): return False if ipaddr.endswith('.clients.your-server.de'): return False if ipaddr.endswith('.cba.embratel.net.br'): return False if ipaddr.endswith('.idstelcom.com'): return False if ipaddr.endswith('proxy.chg-support.com'): return False if ipaddr.endswith('.sprintsvc.net'): return False if ipaddr.endswith('.relakks.com'): return False return True def check_mail(self, mail): # Spambot with a long first-message. if mail['message'] is None: return True if mail['message'].find('Je veux que vous m\'ayez ecrit directement sur le mon e-mail') >= 0: return False if mail['message'].find('ilusa12010@live.fr') >= 0: return False return True
blckshrk/Weboob
modules/aum/antispam.py
Python
agpl-3.0
5,066
""" Unit tests for instructor dashboard Based on (and depends on) unit tests for courseware. Notes for running by hand: ./manage.py lms --settings test test lms/djangoapps/instructor """ from django.test.utils import override_settings # Need access to internal func to put users in the right group from django.contrib.auth.models import User from django.core.urlresolvers import reverse from courseware.tests.helpers import LoginEnrollmentTestCase from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE from courseware.roles import CourseStaffRole from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.django import modulestore, clear_existing_modulestores @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class TestInstructorDashboardGradeDownloadCSV(ModuleStoreTestCase, LoginEnrollmentTestCase): ''' Check for download of csv ''' def setUp(self): clear_existing_modulestores() self.toy = modulestore().get_course("edX/toy/2012_Fall") # Create two accounts self.student = 'view@test.com' self.instructor = 'view2@test.com' self.password = 'foo' self.create_account('u1', self.student, self.password) self.create_account('u2', self.instructor, self.password) self.activate_user(self.student) self.activate_user(self.instructor) def make_instructor(course): """ Create an instructor for the course. """ CourseStaffRole(course.location).add_users(User.objects.get(email=self.instructor)) make_instructor(self.toy) self.logout() self.login(self.instructor, self.password) self.enroll(self.toy) def test_download_grades_csv(self): course = self.toy url = reverse('instructor_dashboard', kwargs={'course_id': course.id}) msg = "url = {0}\n".format(url) response = self.client.post(url, {'action': 'Download CSV of all student grades for this course'}) msg += "instructor dashboard download csv grades: response = '{0}'\n".format(response) self.assertEqual(response['Content-Type'], 'text/csv', msg) cdisp = response['Content-Disposition'] msg += "Content-Disposition = '%s'\n" % cdisp self.assertEqual(cdisp, 'attachment; filename=grades_{0}.csv'.format(course.id), msg) body = response.content.replace('\r', '') msg += "body = '{0}'\n".format(body) # All the not-actually-in-the-course hw and labs come from the # default grading policy string in graders.py expected_body = '''"ID","Username","Full Name","edX email","External email","HW 01","HW 02","HW 03","HW 04","HW 05","HW 06","HW 07","HW 08","HW 09","HW 10","HW 11","HW 12","HW Avg","Lab 01","Lab 02","Lab 03","Lab 04","Lab 05","Lab 06","Lab 07","Lab 08","Lab 09","Lab 10","Lab 11","Lab 12","Lab Avg","Midterm","Final" "2","u2","username","view2@test.com","","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0" ''' self.assertEqual(body, expected_body, msg)
mjg2203/edx-platform-seas
lms/djangoapps/instructor/tests/test_legacy_download_csv.py
Python
agpl-3.0
3,149
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2015 Smile (<http://www.smile.fr>). All Rights Reserved # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields, api, _ from openerp.exceptions import Warning STATUS = [('valid', 'Valid'), ('expired', 'Expired'), ('archived', 'Archived')] class IrAttachementType(models.Model): _name = 'ir.attachment.type' name = fields.Char(required=True, translate=True) _sql_constraints = [ ('unique_name', 'UNIQUE (name)', _('Document type name must be unique')), ] @api.multi def unlink(self): if self._context.get('force_unlink_doc_type'): return super(IrAttachementType, self).unlink() raise Warning(_('Attention : You cannot unlink document type!')) class IrAttachement(models.Model): _inherit = 'ir.attachment' document_type_id = fields.Many2one('ir.attachment.type', string="Document Type") document_date = fields.Date(default=lambda self: fields.Date.today()) expiry_date = fields.Date() archived = fields.Boolean() status = fields.Selection(STATUS, readonly=True, default='valid') @api.multi def _compute_document_status(self): for doc in self: status = 'valid' today = fields.Date.today() if doc.expiry_date: if doc.expiry_date >= today and not doc.archived: status = 'valid' elif doc.expiry_date < today and not doc.archived: status = 'expired' if doc.archived: status = 'archived' if doc.expiry_date > today: doc.expiry_date = today if doc.status != status: doc.status = status @api.model def create(self, values): res = super(IrAttachement, self).create(values) res._compute_document_status() return res @api.multi def write(self, values): res = super(IrAttachement, self).write(values) self._compute_document_status() return res
ovnicraft/odoo_addons
smile_document/models/document.py
Python
agpl-3.0
2,926
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import argparse from tests.contrib.utils.gcp_authenticator import GcpAuthenticator, GCP_SPANNER_KEY from tests.contrib.utils.logging_command_executor import LoggingCommandExecutor GCP_PROJECT_ID = os.environ.get('GCP_PROJECT_ID', 'example-project') CBT_INSTANCE = os.environ.get('CBT_INSTANCE_ID', 'testinstance') class GCPBigtableTestHelper(LoggingCommandExecutor): def delete_instance(self): self.execute_cmd([ 'gcloud', 'bigtable', '--project', GCP_PROJECT_ID, '--quiet', '--verbosity=none', 'instances', 'delete', CBT_INSTANCE ]) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Create or delete spanner instances for system tests.') parser.add_argument('--action', dest='action', required=True, choices=('delete-instance', 'before-tests', 'after-tests')) action = parser.parse_args().action helper = GCPBigtableTestHelper() gcp_authenticator = GcpAuthenticator(GCP_SPANNER_KEY) helper.log.info('Starting action: {}'.format(action)) gcp_authenticator.gcp_store_authentication() try: gcp_authenticator.gcp_authenticate() if action == 'before-tests': pass elif action == 'after-tests': pass elif action == 'delete-instance': helper.delete_instance() else: raise Exception("Unknown action: {}".format(action)) finally: gcp_authenticator.gcp_restore_authentication() helper.log.info('Finishing action: {}'.format(action))
fenglu-g/incubator-airflow
tests/contrib/operators/test_gcp_bigtable_operator_system_helper.py
Python
apache-2.0
2,458
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys import os from airflow.models import BaseOperator # ------------------------------------------------------------------------ # # #TODO #FIXME Airflow 2.0 # # Old import machinary below. # # This is deprecated but should be kept until Airflow 2.0 # for compatibility. # # ------------------------------------------------------------------------ # Imports operators dynamically while keeping the package API clean, # abstracting the underlying modules _operators = { 'bash_operator': ['BashOperator'], 'check_operator': [ 'CheckOperator', 'ValueCheckOperator', 'IntervalCheckOperator', ], 'python_operator': [ 'PythonOperator', 'BranchPythonOperator', 'ShortCircuitOperator', ], 'hive_operator': ['HiveOperator'], 'pig_operator': ['PigOperator'], 'presto_check_operator': [ 'PrestoCheckOperator', 'PrestoValueCheckOperator', 'PrestoIntervalCheckOperator', ], 'sensors': [ 'BaseSensorOperator', 'ExternalTaskSensor', 'HdfsSensor', 'HivePartitionSensor', 'HttpSensor', 'MetastorePartitionSensor', 'NamedHivePartitionSensor', 'S3KeySensor', 'S3PrefixSensor', 'SqlSensor', 'TimeDeltaSensor', 'TimeSensor', 'WebHdfsSensor', ], 'dagrun_operator': ['TriggerDagRunOperator'], 'dummy_operator': ['DummyOperator'], 'email_operator': ['EmailOperator'], 'hive_to_samba_operator': ['Hive2SambaOperator'], 'latest_only_operator': ['LatestOnlyOperator'], 'mysql_operator': ['MySqlOperator'], 'sqlite_operator': ['SqliteOperator'], 'mysql_to_hive': ['MySqlToHiveTransfer'], 'postgres_operator': ['PostgresOperator'], 'subdag_operator': ['SubDagOperator'], 'hive_stats_operator': ['HiveStatsCollectionOperator'], 's3_to_hive_operator': ['S3ToHiveTransfer'], 'hive_to_mysql': ['HiveToMySqlTransfer'], 'presto_to_mysql': ['PrestoToMySqlTransfer'], 's3_file_transform_operator': ['S3FileTransformOperator'], 'http_operator': ['SimpleHttpOperator'], 'hive_to_druid': ['HiveToDruidTransfer'], 'jdbc_operator': ['JdbcOperator'], 'mssql_operator': ['MsSqlOperator'], 'mssql_to_hive': ['MsSqlToHiveTransfer'], 'slack_operator': ['SlackAPIOperator', 'SlackAPIPostOperator'], 'generic_transfer': ['GenericTransfer'], 'oracle_operator': ['OracleOperator'] } if not os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False): from airflow.utils.helpers import AirflowImporter airflow_importer = AirflowImporter(sys.modules[__name__], _operators) def _integrate_plugins(): """Integrate plugins to the context""" from airflow.plugins_manager import operators_modules for operators_module in operators_modules: sys.modules[operators_module.__name__] = operators_module globals()[operators_module._name] = operators_module ########################################################## # TODO FIXME Remove in Airflow 2.0 if not os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False): from zope.deprecation import deprecated as _deprecated for _operator in operators_module._objects: operator_name = _operator.__name__ globals()[operator_name] = _operator _deprecated( operator_name, "Importing plugin operator '{i}' directly from " "'airflow.operators' has been deprecated. Please " "import from 'airflow.operators.[plugin_module]' " "instead. Support for direct imports will be dropped " "entirely in Airflow 2.0.".format(i=operator_name))
danielvdende/incubator-airflow
airflow/operators/__init__.py
Python
apache-2.0
4,581