gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from toontown.estate import DistributedPlantBase
from direct.interval.IntervalGlobal import *
from direct.directnotify import DirectNotifyGlobal
from direct.showbase import PythonUtil
from toontown.toonbase import ToontownBattleGlobals
from toontown.toontowngui import TTDialog
from toontown.toontowngui.TeaserPanel import TeaserPanel
from toontown.toonbase import TTLocalizer
import GardenGlobals
import HouseGlobals
from direct.task import Task
from pandac.PandaModules import *
from otp.otpbase import OTPGlobals
from toontown.estate import DistributedLawnDecor
DIRT_AS_WATER_INDICATOR = True
class DistributedGagTree(DistributedPlantBase.DistributedPlantBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGagTree')
def __init__(self, cr):
DistributedPlantBase.DistributedPlantBase.__init__(self, cr)
base.tree = self
self.collSphereRadius = 4.2
self.confirmDialog = None
self.resultDialog = None
self.dirtMound = None
self.sandMound = None
self.needToPlant = 0
self.needToLoad = 0
self.backupFruits = []
self.signHasBeenStuck2Ground = False
self._teaserPanel = None
self.setName('DistributedGagTree')
return
def delete(self):
DistributedPlantBase.DistributedPlantBase.delete(self)
if self._teaserPanel:
self._teaserPanel.destroy()
self._teaserPanel = None
del self.prop
del self.prop2
del self.dirtMound
del self.sandMound
self.signModel.removeNode()
self.signModel = None
return
def setTypeIndex(self, typeIndex):
DistributedPlantBase.DistributedPlantBase.setTypeIndex(self, typeIndex)
track, level = GardenGlobals.getTreeTrackAndLevel(typeIndex)
self.gagTrack = track
self.gagLevel = level
invModel = loader.loadModel('phase_3.5/models/gui/inventory_icons')
propName = ToontownBattleGlobals.AvPropsNew[track][level]
self.prop = invModel.find('**/' + propName)
self.prop.setScale(7)
invModel.removeNode()
invModel2 = loader.loadModel('phase_3.5/models/gui/inventory_icons')
propName = ToontownBattleGlobals.AvPropsNew[track][level]
self.prop2 = invModel2.find('**/' + propName)
self.prop2.setScale(7)
self.filename = self.attributes['filename']
self.maxFruit = self.attributes['maxFruit']
if hasattr(self, 'needToLoad'):
if self.needToLoad:
self.loadModel()
def loadModel(self):
if not hasattr(self, 'filename'):
self.needToLoad = 1
return
if not self.rotateNode:
self.rotateNode = self.plantPath.attachNewNode('rotate')
all = loader.loadModel(self.filename)
self.modelName = self.getModelName()
if self.isWilted():
self.modelName += '_wilt'
self.model = all.find('**/' + self.modelName)
all.detachNode()
shadow = self.model.find('**/shadow1')
if shadow:
shadow.hide()
self.model.reparentTo(self.rotateNode)
if self.isFruiting() and not self.isWilted():
self.fruits = []
for i in range(1, self.maxFruit + 1):
pos = self.model.find('**/locator' + str(i))
if pos and not pos.isEmpty():
fruit = self.prop.copyTo(self.model)
fruit.setPos(pos, 0, 0, 0)
fruit.setScale(13)
self.fruits.append(fruit)
self.createBackupFruits()
if DIRT_AS_WATER_INDICATOR:
self.dirtMound = loader.loadModel('phase_5.5/models/estate/dirt_mound')
self.dirtMound.reparentTo(self.model)
self.sandMound = loader.loadModel('phase_5.5/models/estate/sand_mound')
self.sandMound.reparentTo(self.model)
self.adjustGrowth()
self.signModel = loader.loadModel('phase_5.5/models/estate/garden_sign.bam')
self.signModel.setPos(3.5, 0, 0.025)
self.signModel.reparentTo(self.rotateNode)
owner = self.getOwnerIndex()
color = HouseGlobals.houseColors[owner]
for geomName in ('sign', 'sign1'):
sign = self.signModel.find('**/' + geomName)
if sign:
sign.setColor(*color)
self.prop.setPos(0.1, -0.17, 1.63)
self.prop.reparentTo(self.signModel)
self.prop2.setPos(0.15, 0.17, 1.63)
self.prop2.setH(self.prop.getH() + 180)
self.prop2.reparentTo(self.signModel)
self.needToLoad = 0
if self.needToPlant:
self.stickParts()
def setupShadow(self):
DistributedPlantBase.DistributedPlantBase.setupShadow(self)
self.adjustGrowth()
def makeMovieNode(self):
self.movieNode = self.rotateNode.attachNewNode('moviePos')
self.movieNode.setPos(0, -5, 0)
self.createBackupFruits()
def handlePicking(self):
messenger.send('wakeup')
if self.isFruiting() and self.canBeHarvested():
if self.velvetRoped():
self._teaserPanel = TeaserPanel(pageName='pickGags')
localAvatar._gagTreeVelvetRoped = None
else:
self.startInteraction()
self.doHarvesting()
return
fullName = self.name
text = TTLocalizer.ConfirmRemoveTree % {'tree': fullName}
if self.hasDependentTrees():
text += TTLocalizer.ConfirmWontBeAbleToHarvest
self.confirmDialog = TTDialog.TTDialog(style=TTDialog.YesNo, text=text, command=self.confirmCallback)
self.confirmDialog.show()
self.startInteraction()
return
def confirmCallback(self, value):
self.confirmDialog.destroy()
self.confirmDialog = None
if value > 0:
self.doPicking()
else:
self.finishInteraction()
return
def doPicking(self):
if not self.canBePicked():
return
self.sendUpdate('removeItem', [])
def createBackupFruits(self):
if not hasattr(self, 'fruits'):
return
if not self.fruits:
return
if not hasattr(self, 'movieNode'):
return
if not self.movieNode:
return
if self.movieNode.isEmpty():
return
if not self.signHasBeenStuck2Ground:
return
if not self.backupFruits:
for fruit in self.fruits:
newFruit = fruit.copyTo(render)
newFruit.setPos(fruit.getPos(render))
newFruit.setH(self.movieNode.getH(render))
newFruit.hide()
self.backupFruits.append(newFruit)
def clearBackupFruits(self):
self.backupFruits = []
def doHarvesting(self):
if not self.canBePicked():
return
if hasattr(self, 'backupFruits'):
for fruit in self.backupFruits:
fruit.show()
self.sendUpdate('requestHarvest', [])
def getTrack(self):
return self.gagTrack
def getGagLevel(self):
return self.gagLevel
def setWaterLevel(self, waterLevel):
self.waterLevel = waterLevel
self.adjustWaterIndicator()
def setGrowthLevel(self, growthLevel):
self.growthLevel = growthLevel
if self.model:
newModelName = self.getModelName()
if True:
self.model.removeNode()
self.loadModel()
self.adjustWaterIndicator()
self.stick2Ground()
else:
self.adjustGrowth()
def adjustGrowth(self):
newScale = self.growthLevel + 1
if newScale > 1:
newScale = 1
shadowScale = 2.5
collScale = 1.5
if self.isSeedling():
shadowScale = 1
collScale = 1
if self.shadowJoint:
self.shadowJoint.setScale(shadowScale)
if DIRT_AS_WATER_INDICATOR:
dirtMoundScale = shadowScale * 1.5
dirtMoundDepth = 2.0
if self.isEstablished():
dirtMoundScale = shadowScale * 1.2
self.dirtMound.setScale(dirtMoundScale, dirtMoundScale, dirtMoundDepth)
self.sandMound.setScale(dirtMoundScale, dirtMoundScale, dirtMoundDepth)
self.adjustWaterIndicator()
def setWilted(self, wilted):
self.wilted = wilted
def isWilted(self):
return self.wilted
def setMovie(self, mode, avId):
if mode == GardenGlobals.MOVIE_HARVEST:
self.doHarvestTrack(avId)
elif mode == GardenGlobals.MOVIE_WATER:
self.doWaterTrack(avId)
elif mode == GardenGlobals.MOVIE_FINISHPLANTING:
self.doFinishPlantingTrack(avId)
elif mode == GardenGlobals.MOVIE_REMOVE:
self.doDigupTrack(avId)
def doFinishPlantingTrack(self, avId):
toon = base.cr.doId2do.get(avId)
if not toon:
return
self.finishMovies()
self.movie = Sequence()
if self.model:
self.model.setTransparency(1)
self.model.setAlphaScale(0)
self.movie.append(LerpFunc(self.model.setAlphaScale, fromData=0, toData=1, duration=3))
if self.signModel:
self.signModel.hide()
self.movie.append(Func(self.signModel.show))
self.movie.append(LerpScaleInterval(self.signModel, 1, 1, 0))
self.movie.append(Func(toon.loop, 'neutral'))
if avId == localAvatar.doId:
self.movie.append(Func(self.finishInteraction))
self.movie.append(Func(self.movieDone))
self.movie.append(Func(self.doResultDialog))
self.movie.start()
def doHarvestTrack(self, avId):
toon = base.cr.doId2do.get(avId)
if not toon:
return
self.finishMovies()
moveTrack = self.generateToonMoveTrack(toon)
harvestTrack = self.generateHarvestTrack(toon)
self.movie = Sequence(self.startCamIval(avId), moveTrack, harvestTrack, self.stopCamIval(avId))
if avId == localAvatar.doId:
self.movie.append(Func(self.finishInteraction))
self.movie.append(Func(self.movieDone))
self.movie.start()
def setupShadow(self):
if DIRT_AS_WATER_INDICATOR:
pass
else:
DistributedPlantBase.DistributedPlantBase.setupShadow(self)
def generateHarvestTrack(self, toon):
pos = toon.getPos(render)
pos.setZ(pos.getZ() + 2)
fruitTrack = Parallel()
for fruit in self.backupFruits:
fruitTrack.append(Sequence(Func(fruit.show), LerpPosInterval(fruit, 1.5, pos, startPos=Point3(fruit.getX(), fruit.getY(), fruit.getZ() + self.model.getZ())), Func(fruit.removeNode)))
self.fruits = None
harvestTrack = Sequence(fruitTrack, Func(self.clearBackupFruits))
return harvestTrack
def adjustWaterIndicator(self):
DistributedPlantBase.DistributedPlantBase.adjustWaterIndicator(self)
if self.dirtMound:
curWaterLevel = self.waterLevel
if curWaterLevel > self.maxWaterLevel:
curWaterLevel = self.maxWaterLevel
if curWaterLevel > 0:
darkestColorScale = 0.4
lightestColorScale = 1.0
scaleRange = lightestColorScale - darkestColorScale
scaleIncrement = scaleRange / self.maxWaterLevel
darker = lightestColorScale - scaleIncrement * curWaterLevel
self.dirtMound.setColorScale(darker, darker, darker, 1.0)
self.sandMound.hide()
self.dirtMound.show()
else:
self.sandMound.show()
self.dirtMound.hide()
def stickParts(self):
if not hasattr(self, 'signModel'):
self.needToPlant = 1
return Task.done
if self.signModel.isEmpty():
return Task.done
testPath = NodePath('testPath')
testPath.reparentTo(render)
cRay = CollisionRay(0.0, 0.0, 40000.0, 0.0, 0.0, -1.0)
cRayNode = CollisionNode(self.uniqueName('estate-FloorRay'))
cRayNode.addSolid(cRay)
cRayNode.setFromCollideMask(OTPGlobals.FloorBitmask)
cRayNode.setIntoCollideMask(BitMask32.allOff())
cRayNodePath = testPath.attachNewNode(cRayNode)
queue = CollisionHandlerQueue()
picker = CollisionTraverser()
picker.addCollider(cRayNodePath, queue)
testPath.setPos(self.signModel.getX(render), self.signModel.getY(render), 0)
picker.traverse(render)
if queue.getNumEntries() > 0:
queue.sortEntries()
for index in range(queue.getNumEntries()):
entry = queue.getEntry(index)
if DistributedLawnDecor.recurseParent(entry.getIntoNode(), 'terrain_DNARoot'):
self.signModel.wrtReparentTo(render)
self.signModel.setZ(entry.getSurfacePoint(render)[2] + self.stickUp + 0.1)
self.signModel.wrtReparentTo(self.rotateNode)
self.signHasBeenStuck2Ground = True
self.createBackupFruits()
return Task.done
return Task.done
def canBeHarvested(self):
if not base.cr.isPaid():
if self.velvetRoped():
if hasattr(localAvatar, '_gagTreeVelvetRoped'):
return False
myTrack, myLevel = GardenGlobals.getTreeTrackAndLevel(self.typeIndex)
levelsInTrack = []
levelTreeDict = {}
allGagTrees = base.cr.doFindAll('DistributedGagTree')
for gagTree in allGagTrees:
if gagTree.getOwnerId() == localAvatar.doId:
curTrack, curLevel = GardenGlobals.getTreeTrackAndLevel(gagTree.typeIndex)
if curTrack == myTrack:
levelsInTrack.append(curLevel)
levelTreeDict[curLevel] = gagTree
for levelToTest in range(myLevel):
if levelToTest not in levelsInTrack:
return False
curTree = levelTreeDict[levelToTest]
if not curTree.isGTEFullGrown():
return False
return True
def hasDependentTrees(self):
myTrack, myLevel = GardenGlobals.getTreeTrackAndLevel(self.typeIndex)
allGagTrees = base.cr.doFindAll('DistributedGagTree')
for gagTree in allGagTrees:
if gagTree.getOwnerId() == localAvatar.doId:
curTrack, curLevel = GardenGlobals.getTreeTrackAndLevel(gagTree.typeIndex)
if curTrack == myTrack:
if myLevel < curLevel:
return True
return False
def doResultDialog(self):
self.startInteraction()
curTrack, curLevel = GardenGlobals.getTreeTrackAndLevel(self.typeIndex)
species = GardenGlobals.getTreeTypeIndex(curTrack, curLevel)
treeName = GardenGlobals.PlantAttributes[species]['name']
stringToShow = TTLocalizer.getResultPlantedSomethingSentence(treeName)
self.resultDialog = TTDialog.TTDialog(style=TTDialog.Acknowledge, text=stringToShow, command=self.resultsCallback)
def resultsCallback(self, value):
if self.resultDialog:
self.resultDialog.destroy()
self.resultDialog = None
self.finishInteraction()
return
def velvetRoped(self):
return not base.cr.isPaid() and ToontownBattleGlobals.gagIsPaidOnly(self.gagTrack, self.gagLevel)
def allowedToPick(self):
retval = True
if self.velvetRoped():
retval = False
return retval
def unlockPick(self):
retval = True
toon = base.localAvatar
inventory = toon.inventory
load = inventory.totalProps
maxCarry = toon.getMaxCarry()
if load >= maxCarry and not self.gagLevel > ToontownBattleGlobals.LAST_REGULAR_GAG_LEVEL:
retval = False
if inventory.numItem(self.gagTrack, self.gagLevel) >= inventory.getMax(self.gagTrack, self.gagLevel):
retval = False
return retval
|
|
#!/usr/bin/env python
#
# spiffsgen is a tool used to generate a spiffs image from a directory
#
# Copyright 2019 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import io
import math
import struct
import argparse
import ctypes
SPIFFS_PH_FLAG_USED_FINAL_INDEX = 0xF8
SPIFFS_PH_FLAG_USED_FINAL = 0xFC
SPIFFS_PH_FLAG_LEN = 1
SPIFFS_PH_IX_SIZE_LEN = 4
SPIFFS_PH_IX_OBJ_TYPE_LEN = 1
SPIFFS_TYPE_FILE = 1
# Based on typedefs under spiffs_config.h
SPIFFS_OBJ_ID_LEN = 2 # spiffs_obj_id
SPIFFS_SPAN_IX_LEN = 2 # spiffs_span_ix
SPIFFS_PAGE_IX_LEN = 2 # spiffs_page_ix
SPIFFS_BLOCK_IX_LEN = 2 # spiffs_block_ix
class SpiffsBuildConfig():
def __init__(self, page_size, page_ix_len, block_size,
block_ix_len, meta_len, obj_name_len, obj_id_len,
span_ix_len, packed, aligned, endianness, use_magic, use_magic_len):
if block_size % page_size != 0:
raise RuntimeError("block size should be a multiple of page size")
self.page_size = page_size
self.block_size = block_size
self.obj_id_len = obj_id_len
self.span_ix_len = span_ix_len
self.packed = packed
self.aligned = aligned
self.obj_name_len = obj_name_len
self.meta_len = meta_len
self.page_ix_len = page_ix_len
self.block_ix_len = block_ix_len
self.endianness = endianness
self.use_magic = use_magic
self.use_magic_len = use_magic_len
self.PAGES_PER_BLOCK = self.block_size // self.page_size
self.OBJ_LU_PAGES_PER_BLOCK = int(math.ceil(self.block_size / self.page_size * self.obj_id_len / self.page_size))
self.OBJ_USABLE_PAGES_PER_BLOCK = self.PAGES_PER_BLOCK - self.OBJ_LU_PAGES_PER_BLOCK
self.OBJ_LU_PAGES_OBJ_IDS_LIM = self.page_size // self.obj_id_len
self.OBJ_DATA_PAGE_HEADER_LEN = self.obj_id_len + self.span_ix_len + SPIFFS_PH_FLAG_LEN
pad = 4 - (4 if self.OBJ_DATA_PAGE_HEADER_LEN % 4 == 0 else self.OBJ_DATA_PAGE_HEADER_LEN % 4)
self.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED = self.OBJ_DATA_PAGE_HEADER_LEN + pad
self.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED_PAD = pad
self.OBJ_DATA_PAGE_CONTENT_LEN = self.page_size - self.OBJ_DATA_PAGE_HEADER_LEN
self.OBJ_INDEX_PAGES_HEADER_LEN = (self.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED + SPIFFS_PH_IX_SIZE_LEN +
SPIFFS_PH_IX_OBJ_TYPE_LEN + self.obj_name_len + self.meta_len)
self.OBJ_INDEX_PAGES_OBJ_IDS_HEAD_LIM = (self.page_size - self.OBJ_INDEX_PAGES_HEADER_LEN) // self.block_ix_len
self.OBJ_INDEX_PAGES_OBJ_IDS_LIM = (self.page_size - self.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED) / self.block_ix_len
class SpiffsFullError(RuntimeError):
def __init__(self, message=None):
super(SpiffsFullError, self).__init__(message)
class SpiffsPage():
_endianness_dict = {
"little": "<",
"big": ">"
}
_len_dict = {
1: "B",
2: "H",
4: "I",
8: "Q"
}
_type_dict = {
1: ctypes.c_ubyte,
2: ctypes.c_ushort,
4: ctypes.c_uint,
8: ctypes.c_ulonglong
}
def __init__(self, bix, build_config):
self.build_config = build_config
self.bix = bix
class SpiffsObjLuPage(SpiffsPage):
def __init__(self, bix, build_config):
SpiffsPage.__init__(self, bix, build_config)
self.obj_ids_limit = self.build_config.OBJ_LU_PAGES_OBJ_IDS_LIM
self.obj_ids = list()
def _calc_magic(self, blocks_lim):
# Calculate the magic value mirrorring computation done by the macro SPIFFS_MAGIC defined in
# spiffs_nucleus.h
magic = 0x20140529 ^ self.build_config.page_size
if self.build_config.use_magic_len:
magic = magic ^ (blocks_lim - self.bix)
magic = SpiffsPage._type_dict[self.build_config.obj_id_len](magic)
return magic.value
def register_page(self, page):
if not self.obj_ids_limit > 0:
raise SpiffsFullError()
obj_id = (page.obj_id, page.__class__)
self.obj_ids.append(obj_id)
self.obj_ids_limit -= 1
def to_binary(self):
global test
img = b""
for (obj_id, page_type) in self.obj_ids:
if page_type == SpiffsObjIndexPage:
obj_id ^= (1 << ((self.build_config.obj_id_len * 8) - 1))
img += struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] +
SpiffsPage._len_dict[self.build_config.obj_id_len], obj_id)
assert(len(img) <= self.build_config.page_size)
img += b"\xFF" * (self.build_config.page_size - len(img))
return img
def magicfy(self, blocks_lim):
# Only use magic value if no valid obj id has been written to the spot, which is the
# spot taken up by the last obj id on last lookup page. The parent is responsible
# for determining which is the last lookup page and calling this function.
remaining = self.obj_ids_limit
empty_obj_id_dict = {
1: 0xFF,
2: 0xFFFF,
4: 0xFFFFFFFF,
8: 0xFFFFFFFFFFFFFFFF
}
if (remaining >= 2):
for i in range(remaining):
if i == remaining - 2:
self.obj_ids.append((self._calc_magic(blocks_lim), SpiffsObjDataPage))
break
else:
self.obj_ids.append((empty_obj_id_dict[self.build_config.obj_id_len], SpiffsObjDataPage))
self.obj_ids_limit -= 1
class SpiffsObjIndexPage(SpiffsPage):
def __init__(self, obj_id, span_ix, size, name, build_config):
SpiffsPage.__init__(self, 0, build_config)
self.obj_id = obj_id
self.span_ix = span_ix
self.name = name
self.size = size
if self.span_ix == 0:
self.pages_lim = self.build_config.OBJ_INDEX_PAGES_OBJ_IDS_HEAD_LIM
else:
self.pages_lim = self.build_config.OBJ_INDEX_PAGES_OBJ_IDS_LIM
self.pages = list()
def register_page(self, page):
if not self.pages_lim > 0:
raise SpiffsFullError
self.pages.append(page.offset)
self.pages_lim -= 1
def to_binary(self):
obj_id = self.obj_id ^ (1 << ((self.build_config.obj_id_len * 8) - 1))
img = struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] +
SpiffsPage._len_dict[self.build_config.obj_id_len] +
SpiffsPage._len_dict[self.build_config.span_ix_len] +
SpiffsPage._len_dict[SPIFFS_PH_FLAG_LEN],
obj_id,
self.span_ix,
SPIFFS_PH_FLAG_USED_FINAL_INDEX)
# Add padding before the object index page specific information
img += b"\xFF" * self.build_config.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED_PAD
# If this is the first object index page for the object, add filname, type
# and size information
if self.span_ix == 0:
img += struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] +
SpiffsPage._len_dict[SPIFFS_PH_IX_SIZE_LEN] +
SpiffsPage._len_dict[SPIFFS_PH_FLAG_LEN],
self.size,
SPIFFS_TYPE_FILE)
img += self.name.encode() + (b"\x00" * ((self.build_config.obj_name_len - len(self.name)) + self.build_config.meta_len))
# Finally, add the page index of daa pages
for page in self.pages:
page = page >> int(math.log(self.build_config.page_size, 2))
img += struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] +
SpiffsPage._len_dict[self.build_config.page_ix_len], page)
assert(len(img) <= self.build_config.page_size)
img += b"\xFF" * (self.build_config.page_size - len(img))
return img
class SpiffsObjDataPage(SpiffsPage):
def __init__(self, offset, obj_id, span_ix, contents, build_config):
SpiffsPage.__init__(self, 0, build_config)
self.obj_id = obj_id
self.span_ix = span_ix
self.contents = contents
self.offset = offset
def to_binary(self):
img = struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] +
SpiffsPage._len_dict[self.build_config.obj_id_len] +
SpiffsPage._len_dict[self.build_config.span_ix_len] +
SpiffsPage._len_dict[SPIFFS_PH_FLAG_LEN],
self.obj_id,
self.span_ix,
SPIFFS_PH_FLAG_USED_FINAL)
img += self.contents
assert(len(img) <= self.build_config.page_size)
img += b"\xFF" * (self.build_config.page_size - len(img))
return img
class SpiffsBlock():
def _reset(self):
self.cur_obj_index_span_ix = 0
self.cur_obj_data_span_ix = 0
self.cur_obj_id = 0
self.cur_obj_idx_page = None
def __init__(self, bix, blocks_lim, build_config):
self.build_config = build_config
self.offset = bix * self.build_config.block_size
self.remaining_pages = self.build_config.OBJ_USABLE_PAGES_PER_BLOCK
self.pages = list()
self.bix = bix
lu_pages = list()
for i in range(self.build_config.OBJ_LU_PAGES_PER_BLOCK):
page = SpiffsObjLuPage(self.bix, self.build_config)
lu_pages.append(page)
self.pages.extend(lu_pages)
self.lu_page_iter = iter(lu_pages)
self.lu_page = next(self.lu_page_iter)
self._reset()
def _register_page(self, page):
if isinstance(page, SpiffsObjDataPage):
self.cur_obj_idx_page.register_page(page) # can raise SpiffsFullError
try:
self.lu_page.register_page(page)
except SpiffsFullError:
self.lu_page = next(self.lu_page_iter)
try:
self.lu_page.register_page(page)
except AttributeError: # no next lookup page
# Since the amount of lookup pages is pre-computed at every block instance,
# this should never occur
raise RuntimeError("invalid attempt to add page to a block when there is no more space in lookup")
self.pages.append(page)
def begin_obj(self, obj_id, size, name, obj_index_span_ix=0, obj_data_span_ix=0):
if not self.remaining_pages > 0:
raise SpiffsFullError()
self._reset()
self.cur_obj_id = obj_id
self.cur_obj_index_span_ix = obj_index_span_ix
self.cur_obj_data_span_ix = obj_data_span_ix
page = SpiffsObjIndexPage(obj_id, self.cur_obj_index_span_ix, size, name, self.build_config)
self._register_page(page)
self.cur_obj_idx_page = page
self.remaining_pages -= 1
self.cur_obj_index_span_ix += 1
def update_obj(self, contents):
if not self.remaining_pages > 0:
raise SpiffsFullError()
page = SpiffsObjDataPage(self.offset + (len(self.pages) * self.build_config.page_size),
self.cur_obj_id, self.cur_obj_data_span_ix, contents, self.build_config)
self._register_page(page)
self.cur_obj_data_span_ix += 1
self.remaining_pages -= 1
def end_obj(self):
self._reset()
def is_full(self):
return self.remaining_pages <= 0
def to_binary(self, blocks_lim):
img = b""
if self.build_config.use_magic:
for (idx, page) in enumerate(self.pages):
if idx == self.build_config.OBJ_LU_PAGES_PER_BLOCK - 1:
page.magicfy(blocks_lim)
img += page.to_binary()
else:
for page in self.pages:
img += page.to_binary()
assert(len(img) <= self.build_config.block_size)
img += b"\xFF" * (self.build_config.block_size - len(img))
return img
class SpiffsFS():
def __init__(self, img_size, build_config):
if img_size % build_config.block_size != 0:
raise RuntimeError("image size should be a multiple of block size")
self.img_size = img_size
self.build_config = build_config
self.blocks = list()
self.blocks_lim = self.img_size // self.build_config.block_size
self.remaining_blocks = self.blocks_lim
self.cur_obj_id = 1 # starting object id
def _create_block(self):
if self.is_full():
raise SpiffsFullError("the image size has been exceeded")
block = SpiffsBlock(len(self.blocks), self.blocks_lim, self.build_config)
self.blocks.append(block)
self.remaining_blocks -= 1
return block
def is_full(self):
return self.remaining_blocks <= 0
def create_file(self, img_path, file_path):
contents = None
if len(img_path) > self.build_config.obj_name_len:
raise RuntimeError("object name '%s' too long" % img_path)
name = img_path
with open(file_path, "rb") as obj:
contents = obj.read()
stream = io.BytesIO(contents)
try:
block = self.blocks[-1]
block.begin_obj(self.cur_obj_id, len(contents), name)
except (IndexError, SpiffsFullError):
block = self._create_block()
block.begin_obj(self.cur_obj_id, len(contents), name)
contents_chunk = stream.read(self.build_config.OBJ_DATA_PAGE_CONTENT_LEN)
while contents_chunk:
try:
block = self.blocks[-1]
try:
# This can fail because either (1) all the pages in block have been
# used or (2) object index has been exhausted.
block.update_obj(contents_chunk)
except SpiffsFullError:
# If its (1), use the outer exception handler
if block.is_full():
raise SpiffsFullError
# If its (2), write another object index page
block.begin_obj(self.cur_obj_id, len(contents), name,
obj_index_span_ix=block.cur_obj_index_span_ix,
obj_data_span_ix=block.cur_obj_data_span_ix)
continue
except (IndexError, SpiffsFullError):
# All pages in the block have been exhausted. Create a new block, copying
# the previous state of the block to a new one for the continuation of the
# current object
prev_block = block
block = self._create_block()
block.cur_obj_id = prev_block.cur_obj_id
block.cur_obj_idx_page = prev_block.cur_obj_idx_page
block.cur_obj_data_span_ix = prev_block.cur_obj_data_span_ix
block.cur_obj_index_span_ix = prev_block.cur_obj_index_span_ix
continue
contents_chunk = stream.read(self.build_config.OBJ_DATA_PAGE_CONTENT_LEN)
block.end_obj()
self.cur_obj_id += 1
def to_binary(self):
img = b""
for block in self.blocks:
img += block.to_binary(self.blocks_lim)
bix = len(self.blocks)
if self.build_config.use_magic:
# Create empty blocks with magic numbers
while self.remaining_blocks > 0:
block = SpiffsBlock(bix, self.blocks_lim, self.build_config)
img += block.to_binary(self.blocks_lim)
self.remaining_blocks -= 1
bix += 1
else:
# Just fill remaining spaces FF's
img += "\xFF" * (self.img_size - len(img))
return img
def main():
parser = argparse.ArgumentParser(description="SPIFFS Image Generator",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("image_size",
help="Size of the created image")
parser.add_argument("base_dir",
help="Path to directory from which the image will be created")
parser.add_argument("output_file",
help="Created image output file path")
parser.add_argument("--page-size",
help="Logical page size. Set to value same as CONFIG_SPIFFS_PAGE_SIZE.",
type=int,
default=256)
parser.add_argument("--block-size",
help="Logical block size. Set to the same value as the flash chip's sector size (g_rom_flashchip.sector_size).",
type=int,
default=4096)
parser.add_argument("--obj-name-len",
help="File full path maximum length. Set to value same as CONFIG_SPIFFS_OBJ_NAME_LEN.",
type=int,
default=32)
parser.add_argument("--meta-len",
help="File metadata length. Set to value same as CONFIG_SPIFFS_META_LENGTH.",
type=int,
default=4)
parser.add_argument("--use-magic",
help="Use magic number to create an identifiable SPIFFS image. Specify if CONFIG_SPIFFS_USE_MAGIC.",
action="store_true",
default=True)
parser.add_argument("--use-magic-len",
help="Use position in memory to create different magic numbers for each block. Specify if CONFIG_SPIFFS_USE_MAGIC_LENGTH.",
action="store_true",
default=True)
parser.add_argument("--big-endian",
help="Specify if the target architecture is big-endian. If not specified, little-endian is assumed.",
action="store_true",
default=False)
args = parser.parse_args()
if not os.path.exists(args.base_dir):
raise RuntimeError("given base directory %s does not exist" % args.base_dir)
with open(args.output_file, "wb") as image_file:
image_size = int(args.image_size, 0)
spiffs_build_default = SpiffsBuildConfig(args.page_size, SPIFFS_PAGE_IX_LEN,
args.block_size, SPIFFS_BLOCK_IX_LEN, args.meta_len,
args.obj_name_len, SPIFFS_OBJ_ID_LEN, SPIFFS_SPAN_IX_LEN,
True, True, "big" if args.big_endian else "little",
args.use_magic, args.use_magic_len)
spiffs = SpiffsFS(image_size, spiffs_build_default)
for root, dirs, files in os.walk(args.base_dir):
for f in files:
full_path = os.path.join(root, f)
spiffs.create_file("/" + os.path.relpath(full_path, args.base_dir).replace("\\", "/"), full_path)
image = spiffs.to_binary()
image_file.write(image)
if __name__ == "__main__":
main()
|
|
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
nonwhitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
"""A way of looking up TreeBuilder subclasses by their name or by desired
features.
"""
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features.
:param treebuilder_class: A subclass of Treebuilder. its .features
attribute should list its features.
"""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
"""Look up a TreeBuilder subclass with the desired features.
:param features: A list of features to look for. If none are
provided, the most recently registered TreeBuilder subclass
will be used.
:return: A TreeBuilder subclass, or None if there's no
registered subclass with all the requested features.
"""
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a textual document into a Beautiful Soup object tree."""
NAME = "[Unknown tree builder]"
ALTERNATE_NAMES = []
features = []
is_xml = False
picklable = False
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
DEFAULT_CDATA_LIST_ATTRIBUTES = {}
DEFAULT_PRESERVE_WHITESPACE_TAGS = set()
USE_DEFAULT = object()
# Most parsers don't keep track of line numbers.
TRACKS_LINE_NUMBERS = False
def __init__(self, multi_valued_attributes=USE_DEFAULT,
preserve_whitespace_tags=USE_DEFAULT,
store_line_numbers=USE_DEFAULT):
"""Constructor.
:param multi_valued_attributes: If this is set to None, the
TreeBuilder will not turn any values for attributes like
'class' into lists. Setting this do a dictionary will
customize this behavior; look at DEFAULT_CDATA_LIST_ATTRIBUTES
for an example.
Internally, these are called "CDATA list attributes", but that
probably doesn't make sense to an end-user, so the argument name
is `multi_valued_attributes`.
:param preserve_whitespace_tags: A list of tags to treat
the way <pre> tags are treated in HTML. Tags in this list
are immune from pretty-printing; their contents will always be
output as-is.
:param store_line_numbers: If the parser keeps track of the
line numbers and positions of the original markup, that
information will, by default, be stored in each corresponding
`Tag` object. You can turn this off by passing
store_line_numbers=False. If the parser you're using doesn't
keep track of this information, then setting store_line_numbers=True
will do nothing.
"""
self.soup = None
if multi_valued_attributes is self.USE_DEFAULT:
multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES
self.cdata_list_attributes = multi_valued_attributes
if preserve_whitespace_tags is self.USE_DEFAULT:
preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS
self.preserve_whitespace_tags = preserve_whitespace_tags
if store_line_numbers == self.USE_DEFAULT:
store_line_numbers = self.TRACKS_LINE_NUMBERS
self.store_line_numbers = store_line_numbers
def initialize_soup(self, soup):
"""The BeautifulSoup object has been initialized and is now
being associated with the TreeBuilder.
:param soup: A BeautifulSoup object.
"""
self.soup = soup
def reset(self):
"""Do any work necessary to reset the underlying parser
for a new document.
By default, this does nothing.
"""
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p/>" or "<p>".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no children.
"<foo></foo>" will become "<foo/>", and "<foo>bar</foo>" will
be left alone.
:param tag_name: The name of a markup tag.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
"""Run some incoming markup through some parsing process,
populating the `BeautifulSoup` object in self.soup.
This method is not implemented in TreeBuilder; it must be
implemented in subclasses.
:return: None.
"""
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None, exclude_encodings=None):
"""Run any preliminary steps necessary to make incoming markup
acceptable to the parser.
:param markup: Some markup -- probably a bytestring.
:param user_specified_encoding: The user asked to try this encoding.
:param document_declared_encoding: The markup itself claims to be
in this encoding.
:param exclude_encodings: The user asked _not_ to try any of
these encodings.
:yield: A series of 4-tuples:
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for converting the
document to Unicode and parsing it. Each strategy will be tried
in turn.
By default, the only strategy is to parse the markup
as-is. See `LXMLTreeBuilderForXML` and
`HTMLParserTreeBuilder` for implementations that take into
account the quirks of particular parsers.
"""
yield markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
:param fragment: A string -- fragment of HTML.
:return: A string -- a full HTML document.
"""
return fragment
def set_up_substitutions(self, tag):
"""Set up any substitutions that will need to be performed on
a `Tag` when it's output as a string.
By default, this does nothing. See `HTMLTreeBuilder` for a
case where this is used.
:param tag: A `Tag`
:return: Whether or not a substitution was performed.
"""
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""When an attribute value is associated with a tag that can
have multiple values for that attribute, convert the string
value to a list of strings.
Basically, replaces class="foo bar" with class=["foo", "bar"]
NOTE: This method modifies its input in place.
:param tag_name: The name of a tag.
:param attrs: A dictionary containing the tag's attributes.
Any appropriate attribute values will be modified in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in list(attrs.keys()):
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, str):
values = nonwhitespace_re.findall(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events.
This is not currently used for anything, but it demonstrates
how a simple TreeBuilder would work.
"""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print "Start %s, %r" % (name, attrs)
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print "End %s" % name
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
empty_element_tags = set([
# These are from HTML5.
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr',
# These are from earlier versions of HTML and are removed in HTML5.
'basefont', 'bgsound', 'command', 'frame', 'image', 'isindex', 'nextid', 'spacer'
])
# The HTML standard defines these as block-level elements. Beautiful
# Soup does not treat these elements differently from other elements,
# but it may do so eventually, and this information is available if
# you need to use it.
block_elements = set(["address", "article", "aside", "blockquote", "canvas", "dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "li", "main", "nav", "noscript", "ol", "output", "p", "pre", "section", "table", "tfoot", "ul", "video"])
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
DEFAULT_CDATA_LIST_ATTRIBUTES = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
DEFAULT_PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
def set_up_substitutions(self, tag):
"""Replace the declared encoding in a <meta> tag with a placeholder,
to be substituted when the tag is output to a string.
An HTML document may come in to Beautiful Soup as one
encoding, but exit in a different encoding, and the <meta> tag
needs to be changed to reflect this.
:param tag: A `Tag`
:return: Whether or not a substitution was performed.
"""
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
# I'm fairly sure this is not the best way to do this.
this_module = sys.modules['bs4.builder']
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
"""An Exception to be raised when the underlying parser simply
refuses to parse the given markup.
"""
def __init__(self, message_or_exception):
"""Explain why the parser rejected the given markup, either
with a textual explanation or another exception.
"""
if isinstance(message_or_exception, Exception):
e = message_or_exception
message_or_exception = "%s: %s" % (e.__class__.__name__, str(e))
super(ParserRejectedMarkup, self).__init__(message_or_exception)
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last resort.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
|
|
#!/usr/bin/env python
#MIT License
#Copyright (c) 2017 Massimiliano Patacchiola
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import numpy as np
from mountain_car import MountainCar
import matplotlib.pyplot as plt
def update_state_action(state_action_matrix, visit_counter_matrix, observation,
new_observation, action, new_action, reward, alpha,
gamma, tot_bins):
'''Return the updated utility matrix
@param state_action_matrix the matrix before the update
@param observation the state obsrved at t
@param new_observation the state observed at t+1
@param action the action at t
@param new_action the action at t+1
@param reward the reward observed after the action
@param alpha the ste size (learning rate)
@param gamma the discount factor
@return the updated state action matrix
'''
#Getting the values of Q at t and at t+1
col = observation[1] + (observation[0]*tot_bins)
q = state_action_matrix[action, col]
col_t1 = new_observation[1] + (new_observation[0]*tot_bins)
q_t1 = state_action_matrix[new_action ,col_t1]
#Calculate alpha based on how many time it
#has been visited
alpha_counted = 1.0 / (1.0 + visit_counter_matrix[action, col])
#Applying the update rule
#Here you can change "alpha" with "alpha_counted" if you want
#to take into account how many times that particular state-action
#pair has been visited until now.
state_action_matrix[action ,col] = state_action_matrix[action ,col] + \
alpha * (reward + gamma * q_t1 - q)
return state_action_matrix
def update_visit_counter(visit_counter_matrix, observation, action, tot_bins):
'''Update the visit counter
Counting how many times a state-action pair has been
visited. This information can be used during the update.
@param visit_counter_matrix a matrix initialised with zeros
@param observation the state observed
@param action the action taken
'''
col = observation[1] + (observation[0]*tot_bins)
visit_counter_matrix[action ,col] += 1.0
return visit_counter_matrix
def update_policy(policy_matrix, state_action_matrix, observation, tot_bins):
'''Return the updated policy matrix
@param policy_matrix the matrix before the update
@param state_action_matrix the state-action matrix
@param observation the state obsrved at t
@return the updated state action matrix
'''
col = observation[1] + (observation[0]*tot_bins)
#Getting the index of the action with the highest utility
best_action = np.argmax(state_action_matrix[:, col])
#Updating the policy
policy_matrix[observation[0], observation[1]] = best_action
return policy_matrix
def return_epsilon_greedy_action(policy_matrix, observation, epsilon=0.1):
'''Return an action choosing it with epsilon-greedy
@param policy_matrix the matrix before the update
@param observation the state obsrved at t
@param epsilon the value used for computing the probabilities
@return the updated policy_matrix
'''
tot_actions = int(np.nanmax(policy_matrix) + 1)
action = int(policy_matrix[observation[0], observation[1]])
non_greedy_prob = epsilon / tot_actions
greedy_prob = 1 - epsilon + non_greedy_prob
weight_array = np.full((tot_actions), non_greedy_prob)
weight_array[action] = greedy_prob
return int(np.random.choice(tot_actions, 1, p=weight_array))
def print_policy(policy_matrix):
'''Print the policy using specific symbol.
* terminal state
^ > v < up, right, down, left
# obstacle
'''
counter = 0
shape = policy_matrix.shape
policy_string = ""
for row in range(shape[0]):
for col in range(shape[1]):
if(policy_matrix[row,col] == 0): policy_string += " < "
elif(policy_matrix[row,col] == 1): policy_string += " O "
elif(policy_matrix[row,col] == 2): policy_string += " > "
counter += 1
policy_string += '\n'
print(policy_string)
def return_decayed_value(starting_value, minimum_value, global_step, decay_step):
"""Returns the decayed value.
decayed_value = starting_value * decay_rate ^ (global_step / decay_steps)
@param starting_value the value before decaying
@param global_step the global step to use for decay (positive integer)
@param decay_step the step at which the value is decayed
"""
decayed_value = starting_value * np.power(0.9, (global_step/decay_step))
if decayed_value < minimum_value:
return minimum_value
else:
return decayed_value
def plot_curve(data_list, filepath="./my_plot.png", x_label="X", y_label="Y",
x_range=(0, 1), y_range=(0,1), color="-r", kernel_size=50,
alpha=0.4, grid=True):
"""Plot a graph using matplotlib
"""
if(len(data_list) <=1): return
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=x_range, ylim=y_range)
ax.grid(grid)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
# The original data is showed in background
ax.plot(data_list, color, alpha=alpha)
# Smooth the graph using a convolution
kernel = np.ones(int(kernel_size))/float(kernel_size)
tot_data = len(data_list)
lower_boundary = int(kernel_size/2.0)
upper_boundary = int(tot_data-(kernel_size/2.0))
data_convolved_array = np.convolve(data_list, kernel, 'same')[lower_boundary:upper_boundary]
#print("arange: " + str(np.arange(tot_data)[lower_boundary:upper_boundary]))
#print("Convolved: " + str(np.arange(tot_data).shape))
ax.plot(np.arange(tot_data)[lower_boundary:upper_boundary],
data_convolved_array, color, alpha=1.0) # Convolved plot
fig.savefig(filepath)
fig.clear()
plt.close(fig)
# print(plt.get_fignums()) # print the number of figures opened in background
def main():
env = MountainCar(mass=0.2, friction=0.3, delta_t=0.1)
# Define the state arrays for velocity and position
tot_action = 3 # Three possible actions
tot_bins = 12 # the value used to discretize the space
velocity_state_array = np.linspace(-1.5, +1.5, num=tot_bins-1,
endpoint=False)
position_state_array = np.linspace(-1.2, +0.5, num=tot_bins-1,
endpoint=False)
# Random policy as a square matrix of size (tot_bins x tot_bins)
# Three possible actions represented by three integers
policy_matrix = np.random.randint(low=0, high=tot_action, size=(tot_bins,tot_bins))
print("Policy Matrix:")
print(policy_matrix)
# The state-action matrix and the visit counter
# The rows are the velocities and the columns the positions.
state_action_matrix = np.zeros((tot_action, tot_bins*tot_bins))
visit_counter_matrix = np.zeros((tot_action, tot_bins*tot_bins))
# Variables
gamma = 0.999
alpha = 0.001
tot_episode = 100000
epsilon_start = 0.9 # those are the values for epsilon decay
epsilon_stop = 0.1
epsilon_decay_step = 3000
print_episode = 500 # print every...
movie_episode = 10000 # movie saved every...
reward_list = list()
step_list = list()
for episode in range(tot_episode):
epsilon = return_decayed_value(epsilon_start, epsilon_stop, episode,
decay_step=epsilon_decay_step)
# Reset and return the first observation
observation = env.reset(exploring_starts=False)
# The observation is digitized, meaning that an integer corresponding
# to the bin where the raw float belongs is obtained and use as replacement.
observation = (np.digitize(observation[1], velocity_state_array),
np.digitize(observation[0], position_state_array))
is_starting = True
cumulated_reward = 0
for step in range(100):
#Take the action from the action matrix
#action = policy_matrix[observation[0], observation[1]]
#Take the action using epsilon-greedy
action = return_epsilon_greedy_action(policy_matrix, observation,
epsilon=epsilon)
if(is_starting):
action = np.random.randint(0, tot_action)
is_starting = False
#Move one step in the environment and get obs and reward
new_observation, reward, done = env.step(action)
new_observation = (np.digitize(new_observation[1],
velocity_state_array),
np.digitize(new_observation[0],
position_state_array))
new_action = policy_matrix[new_observation[0], new_observation[1]]
#Updating the state-action matrix
state_action_matrix = update_state_action(state_action_matrix,
visit_counter_matrix,
observation,
new_observation,
action,
new_action,
reward,
alpha,
gamma,
tot_bins)
#Updating the policy
policy_matrix = update_policy(policy_matrix,
state_action_matrix,
observation,
tot_bins)
#Increment the visit counter
visit_counter_matrix = update_visit_counter(visit_counter_matrix,
observation,
action,
tot_bins)
observation = new_observation
cumulated_reward += reward
if done: break
# Store the data for statistics
reward_list.append(cumulated_reward)
step_list.append(step)
# Printing utilities
if(episode % print_episode == 0):
print("")
print("Episode: " + str(episode+1))
print("Epsilon: " + str(epsilon))
print("Episode steps: " + str(step+1))
print("Cumulated Reward: " + str(cumulated_reward))
print("Policy matrix: ")
print_policy(policy_matrix)
if(episode % movie_episode == 0):
print("Saving the reward plot in: ./reward.png")
plot_curve(reward_list, filepath="./reward.png",
x_label="Episode", y_label="Reward",
x_range=(0, len(reward_list)), y_range=(-1.1,1.1),
color="red", kernel_size=500,
alpha=0.4, grid=True)
print("Saving the step plot in: ./step.png")
plot_curve(step_list, filepath="./step.png",
x_label="Episode", y_label="Steps",
x_range=(0, len(step_list)), y_range=(-0.1,100),
color="blue", kernel_size=500,
alpha=0.4, grid=True)
print("Saving the gif in: ./mountain_car.gif")
env.render(file_path='./mountain_car.gif', mode='gif')
print("Complete!")
# Save reward and steps in npz file for later use
# np.savez("./statistics.npz", reward=np.asarray(reward_list), step=np.asarray(step_list))
# Time to check the utility matrix obtained
print("Policy matrix after " + str(tot_episode) + " episodes:")
print_policy(policy_matrix)
if __name__ == "__main__":
main()
|
|
from trac.core import *
from pkg_resources import resource_filename
from trac.config import Option, IntOption, ListOption, BoolOption
from trac.web.api import IRequestHandler, Href
from trac.util.translation import _
from trac.web.chrome import add_stylesheet, add_script, INavigationContributor, ITemplateProvider
import datetime
from trac.web.chrome import Chrome
from trac.util.datefmt import utc, to_timestamp
from genshi.template import TemplateLoader
from genshi.filters.transform import Transformer
from trac.web.api import ITemplateStreamFilter
from trac.perm import IPermissionRequestor
class TracSprints(Component):
implements(IRequestHandler, ITemplateProvider, IPermissionRequestor)
#key = Option('github', 'apitoken', '', doc=""" """)
permission = ListOption('tracsprints', 'permission', '')
def __init__(self):
self.db = self.env.get_db_cnx()
self.defaultOrder = ['closed', 'accepted', 'assigned', 'new', 'reopened']
self.colors = ['green', 'yellow', 'orange', 'red', 'purple']
self.scaleFactor = 3
self.currentMilestone = False
self.perm = self.config.get('tracsprints', 'permission', '').upper()
if not self.perm:
self.perm = 'ROADMAP_VIEW'
self.env.log.debug("Using Permission: %s" % self.perm)
def get_permission_actions(self):
yield self.perm
# IRequestHandler methods
def match_request(self, req):
serve = False
self.currentMilestone = False
self.env.log.debug("Match Request")
uri = req.path_info.lstrip('/').split('/')
if uri[0] == 'burndown':
serve = True
self.env.log.debug("Handle Request: %s" % serve)
self.baseURL = req.href('burndown', '/')
self.baseQueryURL = req.href('query', '/')
if not self.perm in req.perm:
self.env.log.debug("NO Permission to view")
return False
try:
if uri[1]:
self.env.log.debug("Milestone: %s" % uri[1])
cursor = self.db.cursor()
sql = "select name from milestone where (name = '%s')" % uri[1]
cursor.execute(sql)
for name in cursor:
self.currentMilestone = uri[1]
except IndexError:
pass
#if req.args.get('milestone'):
# cursor = self.db.cursor()
# sql = "select name from milestone where (name = '%s')" % req.args.get('milestone')
# cursor.execute(sql)
# for name in cursor:
# self.currentMilestone = req.args.get('milestone')
return serve
def get_milestones(self):
cursor = self.db.cursor()
sql = "select name from milestone where (completed = 0)"
cursor.execute(sql)
self.env.log.debug("sql: %s" % sql)
m = []
for name in cursor:
self.env.log.debug("name: %s" % name)
n = {
'name': name,
'count': 0
}
m.append(n)
for item in m:
sql = "select count(*) as nCount from ticket where (milestone = '%s')" % item['name']
cursor.execute(sql)
for count in cursor:
item['count'] = count
return m
def get_users(self):
devs = self.env.get_known_users()
odevs = []
for username,name,email in devs:
data = {
'username': username,
'name': name,
'email': email
}
odevs.append(data)
#self.env.log.debug("devs: %s" % odevs)
return odevs
def get_dev_totals(self):
users = self.get_users()
data = []
for u in users:
tot = self.get_ordered_totals(u['username'])
count = 0
for i in tot:
count = count + i['count']
if count > 0:
u['totals'] = tot
data.append(u)
return data
def get_totals(self, user=None):
cursor = self.db.cursor()
if not user == None:
sql = 'select status, count(*) as ncount from ticket where (milestone = "%s") and (owner = "%s") group by status' % (self.currentMilestone, user)
else:
sql = 'select status, count(*) as ncount from ticket where (milestone = "%s") group by status' % self.currentMilestone
cursor.execute(sql)
data = {}
c = 0
for status, count in cursor:
data[status] = {
'status': status,
'count': count,
'percent': 0,
'width': 0
}
c = c + 1
return data
def get_ordered_totals(self, user=None):
data = self.get_totals(user)
ordered = []
for k in self.defaultOrder:
try:
ordered.append(data[k])
except KeyError:
n = {}
n[k] = {
'status': k,
'count': 0,
'percent': 0,
'width': 0
}
ordered.append(n[k])
c = 0
total = 0
for d in ordered:
total += d['count']
d['color'] = self.colors[c]
c = c + 1
total = float(total)
if not user == None:
width = 0
for i in ordered:
try:
i['percent'] = (round((float(i['count']) / total), 3) * 100)
i['width'] = round(i['percent'])
except ZeroDivisionError:
i['percent'] = 1
i['width'] = round(i['percent'])
if i['percent'] == 0:
i['percent'] = 1
i['width'] = 1
width = width + i['width']
if width > 100:
diff = width - 100
i['width'] = i['width'] - diff
return ordered
def process_request(self, req):
data = {}
data['baseURL'] = self.baseURL
data['baseQueryURL'] = self.baseQueryURL
if self.currentMilestone == False:
data['milestones'] = self.get_milestones()
else:
data['title'] = self.currentMilestone
data['devs'] = self.get_users()
data['dev_totals'] = self.get_dev_totals()
data['totals'] = self.get_ordered_totals()
self.env.log.debug("defaultOrder: %s" % data['totals'])
total = 0
for i in data['totals']:
total += i['count']
total = float(total)
if total > 0:
data['hasWork'] = True
else:
data['noWork'] = True
width = 0
for i in data['totals']:
try:
i['percent'] = (round((float(i['count']) / total), 3) * 100)
i['width'] = round(i['percent'])
except ZeroDivisionError:
i['percent'] = 1
i['width'] = round(i['percent'])
width = width + i['width']
if width > 100:
diff = width - 100
i['width'] = i['width'] - diff
data['barWidth'] = self.scaleFactor * 100
add_script(req, "tracsprints/tracsprints.js")
add_stylesheet(req, "tracsprints/tracsprints.css")
return 'sprints.html', data, None
def get_htdocs_dirs(self):
"""Return the absolute path of a directory containing additional
static resources (such as images, style sheets, etc).
"""
return [('tracsprints', resource_filename(__name__, 'htdocs'))]
def get_templates_dirs(self):
"""Return the absolute path of the directory containing the provided
genshi templates.
"""
rtn = [resource_filename(__name__, 'templates')]
return rtn
|
|
import ast
import re
from collections import deque
from typing import Any, Generator, Iterable, List, Optional, Tuple, TypeVar
from .keywords import ABBREVIATED_KEYWORDS, ROOT_KEYWORD_DESCRIPTORS
from .parser import Parser
__version__ = '0.4.0'
SQL_RE = re.compile(
r'(select\s.*from\s|'
r'delete\s+from\s|'
r'insert\s+into\s.*values\s|'
r'update\s.*set\s)',
re.IGNORECASE | re.DOTALL,
)
class Linter:
name = 'sql'
version = __version__
excepted_names = []
def __init__(self, tree: Any, lines: List[str]) -> None:
self.tree = tree
self.lines = lines
@classmethod
def add_options(cls, parser):
parser.add_option(
'--sql-excepted-names',
default='',
action='store',
type='string',
help='Names not to consider keywords',
parse_from_config=True,
comma_separated_list=True,
)
@classmethod
def parse_options(cls, options):
cls.excepted_names = [name.upper() for name in options.sql_excepted_names]
def run(self) -> Generator[Tuple[int, int, str, type], Any, None]:
for node in _ast_walk(self.tree):
if isinstance(node, ast.Str) and SQL_RE.search(node.s) is not None:
initial_offset = _get_initial_offset(node, self.lines)
parser = Parser(node.s, initial_offset)
yield from self._check_query_words(node, parser)
yield from self._check_query_whitespace(node, parser)
yield from self._check_query_alignment(node, parser)
def _check_query_words(
self, query: ast.Str, parser: Parser,
) -> Generator[Tuple[int, int, str, type], Any, None]:
query_end_lineno = _get_query_end_lineno(query)
for token in parser:
word = token.value
if token.is_keyword or token.is_function_name:
if not word.isupper() and word.upper() not in self.excepted_names:
yield(
query_end_lineno, query.col_offset,
"Q440 keyword {} is not uppercase".format(word),
type(self),
)
if word.upper() in ABBREVIATED_KEYWORDS:
yield(
query_end_lineno, query.col_offset,
"Q442 avoid abbreviated keywords, {}".format(word),
type(self),
)
elif token.is_name and (not word.islower() or word.endswith('_')):
yield(
query_end_lineno, query.col_offset,
"Q441 name {} is not valid, must be snake_case, and cannot "
"end with `_`".format(word),
type(self),
)
def _check_query_whitespace(
self, query: ast.Str, parser: Parser,
) -> Generator[Tuple[int, int, str, type], Any, None]:
query_end_lineno = _get_query_end_lineno(query)
for before, token, after in _pre_post_iter(parser):
pre_whitespace = (before is not None and before.is_whitespace)
post_whitespace = (after is not None and after.is_whitespace)
post_newline = (after is None or after.is_newline)
if token.is_punctuation:
if token.value == ',' and not post_whitespace:
yield(
query_end_lineno, query.col_offset,
'Q443 incorrect whitespace around comma',
type(self),
)
elif token.value == ';' and not post_newline:
yield(
query_end_lineno, query.col_offset,
'Q446 missing newline after semicolon',
type(self),
)
elif (
token.is_comparison
and (not pre_whitespace or not post_whitespace)
):
yield(
query_end_lineno, query.col_offset,
'Q444 incorrect whitespace around equals',
type(self),
)
def _check_query_alignment(
self, query: ast.Str, parser: Parser,
) -> Generator[Tuple[int, int, str, type], Any, None]:
if len(query.s.splitlines()) == 1: # Single line queries are exempt
return
query_end_lineno = _get_query_end_lineno(query)
roots = []
for token in parser:
if token.value == ';':
roots = []
elif len(roots) < token.depth + 1:
if token.is_root_keyword:
roots.append(token)
if len(roots) > 1:
previous_root = roots[token.depth - 1]
if token.col < previous_root.col + len(previous_root.value) + 1:
yield (
query_end_lineno, query.col_offset,
'Q448 subquery should be aligned to the right of the river',
type(self),
)
elif token.is_root_keyword:
previous_root = roots[token.depth]
if previous_root.row == token.row:
message = "Q445 missing linespace between root_keywords {} and {}".format(
previous_root.value, token.value,
)
yield (query_end_lineno, query.col_offset, message, type(self))
elif previous_root.col + len(previous_root.value) != token.col + len(token.value):
message = "Q447 root_keywords {} and {} are not right aligned".format(
previous_root.value, token.value,
)
yield (query_end_lineno, query.col_offset, message, type(self))
elif not token.is_whitespace and token.value not in ROOT_KEYWORD_DESCRIPTORS:
previous_root = roots[token.depth]
if token.col < previous_root.col + len(previous_root.value) + 1:
message = "Q449 token {} should be aligned to the right of the river".format(
token.value,
)
yield (query_end_lineno, query.col_offset, message, type(self))
T = TypeVar('T')
def _pre_post_iter(
iterable: Iterable[T],
) -> Generator[Tuple[Optional[T], T, Optional[T]], Any, None]:
iterator = iter(iterable)
before = None
current = next(iterator)
for after in iterator:
yield (before, current, after)
before = current
current = after
yield (before, current, None)
def _get_initial_offset(query: ast.Str, physical_lines: List[str]) -> int:
logical_lines = query.s.splitlines()
query_end_lineno = _get_query_end_lineno(query)
first_physical_line = physical_lines[query_end_lineno - len(logical_lines)]
return first_physical_line.find(logical_lines[0])
def _get_query_end_lineno(query: ast.Str) -> int:
"""Get the lineno for the last line of the given query.
In Python versions below 3.8, this could be obtained by `ast.expr.lineno`.
However Python 3.8 changed this to be the first line, and for the last line
you would instead have to use `ast.expr.end_lineno`. The real kicker here is
that this field is NOT required to be set by the compiler, so we have no
guarantee that it can be used. In practice, it is set for multi-line strings
which is suitable for our purposes - so we just need to handle the case for a
single-line string for which we can use the first lineno.
"""
try:
end_lineno = query.end_lineno
except AttributeError:
# Should only happen for non multi-line strings or Python versions below 3.8.
end_lineno = query.lineno
return end_lineno
def _ast_walk(node: ast.AST) -> Generator[ast.AST, None, None]:
if not hasattr(ast, 'JoinedStr'): # No f-strings
yield from ast.walk(node)
else: # f-strings supported
todo = deque([node])
while todo:
node = todo.popleft()
if isinstance(node, ast.JoinedStr):
merged_node = ast.Str(s='', lineno=node.lineno, col_offset=node.col_offset)
for child in ast.iter_child_nodes(node):
if isinstance(child, ast.Str):
merged_node.s += child.s
elif isinstance(child, ast.FormattedValue):
merged_node.s += 'formatted_value'
todo.append(merged_node)
else:
todo.extend(ast.iter_child_nodes(node))
yield node
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright © 2014-2016 NetApp, Inc. All Rights Reserved.
#
# DO NOT EDIT THIS CODE BY HAND! It has been generated with jsvcgen.
#
import click
from element.cli import utils as cli_utils
from element.cli import parser
from element.cli.cli import pass_context
from element import utils
import jsonpickle
import simplejson
from solidfire.models import *
from solidfire.custom.models import *
from uuid import UUID
from element import exceptions
from solidfire import common
from element.cli.cli import SolidFireOption, SolidFireCommand
class ProtectionSchemeVisibility(data_model.DataObject):
"""ProtectionSchemeVisibility
The public visibility of the protection scheme.
"""
enum_values = ("customer", "testOnly", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class RemoteClusterSnapshotStatus(data_model.DataObject):
"""RemoteClusterSnapshotStatus
Status of the remote snapshot on the target cluster as seen on the source cluster
"""
enum_values = ("Present", "Not Present", "Syncing", "Deleted", "Unknown", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class ProtectionSchemeCategory(data_model.DataObject):
"""ProtectionSchemeCategory
The category of the protection scheme.
"""
enum_values = ("helix", "erasureCoded", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class ProtectionScheme(data_model.DataObject):
"""ProtectionScheme
The method of protecting data on the cluster
"""
enum_values = ("singleHelix", "doubleHelix", "tripleHelix", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class AuthConfigType(data_model.DataObject):
"""AuthConfigType
This type indicates the configuration data which will be accessed or modified by the element auth container.
"""
enum_values = ("mNode", "element", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class DriveEncryptionCapabilityType(data_model.DataObject):
"""DriveEncryptionCapabilityType
This specifies a drive's encryption capability.
"""
enum_values = ("none", "sed", "fips", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class FipsDrivesStatusType(data_model.DataObject):
"""FipsDrivesStatusType
This specifies a node's FIPS 140-2 compliance status.
"""
enum_values = ("None", "Partial", "Ready", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class AuthMethod(data_model.DataObject):
"""AuthMethod
This type qualifies a ClusterAdmin with its authentication method.
"""
enum_values = ("Cluster", "Ldap", "Idp", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class MaintenanceMode(data_model.DataObject):
"""MaintenanceMode
Which mode a node is in when it is having maintenenace peformed.
"""
enum_values = ("Disabled", "FailedToRecover", "Unexpected", "RecoveringFromMaintenance", "PreparingForMaintenance", "ReadyForMaintenance", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class ProposedNodeErrorCode(data_model.DataObject):
"""ProposedNodeErrorCode
This specifies error code for a proposed node addition.
"""
enum_values = ("nodesNoCapacity", "nodesTooLarge", "nodesConnectFailed", "nodesQueryFailed", "nodesClusterMember", "nonFipsNodeCapable", "nonFipsDrivesCapable", "nodeTypeUnsupported", "nodeTypesHeterogeneous", "nodeTypeInvalid", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class VolumeAccess(data_model.DataObject):
"""VolumeAccess
Describes host access for a volume.
"""
enum_values = ("locked", "readOnly", "readWrite", "replicationTarget", "snapMirrorTarget", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class ProtectionDomainType(data_model.DataObject):
"""ProtectionDomainType
A Protection Domain is a set of one or more components whose simultaneous failure is protected
from causing data unavailability or loss. This specifies one of the types of Protection Domains
recognized by this cluster.
"""
enum_values = ("node", "chassis", "custom", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
@click.group()
@pass_context
def cli(ctx):
"""testkeyserverkmip listkeyproviderskmip listkeyserverskmip createkeyserverkmip deletekeyserverkmip getkeyproviderkmip addkeyservertoproviderkmip removekeyserverfromproviderkmip getkeyserverkmip getclientcertificatesignrequest createpublicprivatekeypair deletekeyproviderkmip modifykeyserverkmip testkeyproviderkmip getfipsreport createkeyproviderkmip """
@cli.command('testkeyserverkmip', short_help="""Test whether the specified KMIP (Key Management Interoperability Protocol) Key Server is functioning normally. """, cls=SolidFireCommand)
@click.option('--keyserverid',
type=int,
required=True,
prompt=True,
help="""The ID of the KMIP Key Server to test. """)
@pass_context
def testkeyserverkmip(ctx,
# Mandatory main parameter
keyserverid):
"""Test whether the specified KMIP (Key Management Interoperability Protocol) Key Server is functioning normally."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""keyserverid = """ + str(keyserverid)+""";"""+"")
try:
_TestKeyServerKmipResult = ctx.element.test_key_server_kmip(key_server_id=keyserverid)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_TestKeyServerKmipResult), indent=4))
return
else:
cli_utils.print_result(_TestKeyServerKmipResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('listkeyproviderskmip', short_help="""Returns the list of KMIP (Key Management Interoperability Protocol) Key Providers which have been created via CreateKeyProviderKmip. The list can optionally be filtered by specifying additional parameters. """, cls=SolidFireCommand)
@click.option('--keyproviderisactive',
type=bool,
required=False,
help="""If omitted, returned KMIP Key Provider objects will not be filtered based on whether they're active. If true, returns only KMIP Key Provider objects which are active (providing keys which are currently in use). If false, returns only KMIP Key Provider objects which are inactive (not providing any keys and able to be deleted). """)
@click.option('--kmipkeyproviderhasserverassigned',
type=bool,
required=False,
help="""If omitted, returned KMIP Key Provider objects will not be filtered based on whether they have a KMIP Key Server assigned. If true, returns only KMIP Key Provider objects which have a KMIP Key Server assigned. If false, returns only KMIP Key Provider objects which do not have a KMIP Key Server assigned. """)
@pass_context
def listkeyproviderskmip(ctx,
# Optional main parameter
keyproviderisactive = None,
# Optional main parameter
kmipkeyproviderhasserverassigned = None):
"""Returns the list of KMIP (Key Management Interoperability Protocol) Key Providers which have been created via CreateKeyProviderKmip. The list can optionally be filtered by specifying additional parameters."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""keyproviderisactive = """+str(keyproviderisactive)+";" + """kmipkeyproviderhasserverassigned = """+str(kmipkeyproviderhasserverassigned)+""";"""+"")
try:
_ListKeyProvidersKmipResult = ctx.element.list_key_providers_kmip(key_provider_is_active=keyproviderisactive, kmip_key_provider_has_server_assigned=kmipkeyproviderhasserverassigned)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_ListKeyProvidersKmipResult), indent=4))
return
else:
cli_utils.print_result(_ListKeyProvidersKmipResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('listkeyserverskmip', short_help="""Returns the list of KMIP (Key Management Interoperability Protocol) Key Servers which have been created via CreateKeyServerKmip. The list can optionally be filtered by specifying additional parameters. """, cls=SolidFireCommand)
@click.option('--keyproviderid',
type=int,
required=False,
help="""If omitted, returned KMIP Key Server objects will not be filtered based on whether they're assigned to the specified KMIP Key Provider. If specified, returned KMIP Key Server objects will be filtered to those assigned to the specified KMIP Key Provider. """)
@click.option('--kmipassignedproviderisactive',
type=bool,
required=False,
help="""If omitted, returned KMIP Key Server objects will not be filtered based on whether they're active. If true, returns only KMIP Key Server objects which are active (providing keys which are currently in use). If false, returns only KMIP Key Server objects which are inactive (not providing any keys and able to be deleted). """)
@click.option('--kmiphasproviderassigned',
type=bool,
required=False,
help="""If omitted, returned KMIP Key Server objects will not be filtered based on whether they have a KMIP Key Provider assigned. If true, returns only KMIP Key Server objects which have a KMIP Key Provider assigned. If false, returns only KMIP Key Server objects which do not have a KMIP Key Provider assigned. """)
@pass_context
def listkeyserverskmip(ctx,
# Optional main parameter
keyproviderid = None,
# Optional main parameter
kmipassignedproviderisactive = None,
# Optional main parameter
kmiphasproviderassigned = None):
"""Returns the list of KMIP (Key Management Interoperability Protocol) Key Servers which have been created via CreateKeyServerKmip. The list can optionally be filtered by specifying additional parameters."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""keyproviderid = """+str(keyproviderid)+";" + """kmipassignedproviderisactive = """+str(kmipassignedproviderisactive)+";" + """kmiphasproviderassigned = """+str(kmiphasproviderassigned)+""";"""+"")
try:
_ListKeyServersKmipResult = ctx.element.list_key_servers_kmip(key_provider_id=keyproviderid, kmip_assigned_provider_is_active=kmipassignedproviderisactive, kmip_has_provider_assigned=kmiphasproviderassigned)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_ListKeyServersKmipResult), indent=4))
return
else:
cli_utils.print_result(_ListKeyServersKmipResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('createkeyserverkmip', short_help="""Creates a KMIP (Key Management Interoperability Protocol) Key Server with the specified attributes. The server will not be contacted as part of this operation so it need not exist or be configured prior. For clustered Key Server configurations, the hostnames or IP Addresses, of all server nodes, must be provided in the kmipKeyServerHostnames parameter. """, cls=SolidFireCommand)
@click.option('--kmipcacertificate',
type=str,
required=True,
prompt=True,
help="""The public key certificate of the external key server's root CA. This will be used to verify the certificate presented by external key server in the TLS communication. For key server clusters where individual servers use different CAs, provide a concatenated string containing the root certificates of all the CAs. """)
@click.option('--kmipclientcertificate',
type=str,
required=True,
prompt=True,
help="""A PEM format Base64 encoded PKCS#10 X.509 certificate used by the Solidfire KMIP client. """)
@click.option('--kmipkeyserverhostnames',
type=str,
required=True,
prompt=True,
help="""Array of the hostnames or IP addresses associated with this KMIP Key Server. Multiple hostnames or IP addresses must only be provided if the key servers are in a clustered configuration. """)
@click.option('--kmipkeyservername',
type=str,
required=True,
prompt=True,
help="""The name of the KMIP Key Server. This name is only used for display purposes and does not need to be unique. """)
@click.option('--kmipkeyserverport',
type=int,
required=False,
help="""The port number associated with this KMIP Key Server (typically 5696). """)
@pass_context
def createkeyserverkmip(ctx,
# Mandatory main parameter
kmipcacertificate,
# Mandatory main parameter
kmipclientcertificate,
# Mandatory main parameter
kmipkeyserverhostnames,
# Mandatory main parameter
kmipkeyservername,
# Optional main parameter
kmipkeyserverport = None):
"""Creates a KMIP (Key Management Interoperability Protocol) Key Server with the specified attributes. The server will not be contacted as part of this operation so it need not exist or be configured prior."""
"""For clustered Key Server configurations, the hostnames or IP Addresses, of all server nodes, must be provided in the kmipKeyServerHostnames parameter."""
cli_utils.establish_connection(ctx)
kmipkeyserverhostnames = parser.parse_array(kmipkeyserverhostnames)
ctx.logger.info(""": """"""kmipcacertificate = """ + str(kmipcacertificate)+";"+"""kmipclientcertificate = """ + str(kmipclientcertificate)+";"+"""kmipkeyserverhostnames = """ + str(kmipkeyserverhostnames)+";"+"""kmipkeyservername = """ + str(kmipkeyservername)+";" + """kmipkeyserverport = """+str(kmipkeyserverport)+""";"""+"")
try:
_CreateKeyServerKmipResult = ctx.element.create_key_server_kmip(kmip_ca_certificate=kmipcacertificate, kmip_client_certificate=kmipclientcertificate, kmip_key_server_hostnames=kmipkeyserverhostnames, kmip_key_server_name=kmipkeyservername, kmip_key_server_port=kmipkeyserverport)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_CreateKeyServerKmipResult), indent=4))
return
else:
cli_utils.print_result(_CreateKeyServerKmipResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('deletekeyserverkmip', short_help="""Delete the specified KMIP (Key Management Interoperability Protocol) Key Server. A KMIP Key Server can be deleted unless it's the last one assigned to its provider, and that provider is active (providing keys which are currently in use). """, cls=SolidFireCommand)
@click.option('--keyserverid',
type=int,
required=True,
prompt=True,
help="""The ID of the KMIP Key Server to delete. """)
@pass_context
def deletekeyserverkmip(ctx,
# Mandatory main parameter
keyserverid):
"""Delete the specified KMIP (Key Management Interoperability Protocol) Key Server. A KMIP Key Server can be deleted unless it's the last one assigned to its provider, and that provider is active (providing keys which are currently in use)."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""keyserverid = """ + str(keyserverid)+""";"""+"")
try:
_DeleteKeyServerKmipResult = ctx.element.delete_key_server_kmip(key_server_id=keyserverid)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_DeleteKeyServerKmipResult), indent=4))
return
else:
cli_utils.print_result(_DeleteKeyServerKmipResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('getkeyproviderkmip', short_help="""Returns the specified KMIP (Key Management Interoperability Protocol) Key Provider object. """, cls=SolidFireCommand)
@click.option('--keyproviderid',
type=int,
required=True,
prompt=True,
help="""The ID of the KMIP Key Provider object to return. """)
@pass_context
def getkeyproviderkmip(ctx,
# Mandatory main parameter
keyproviderid):
"""Returns the specified KMIP (Key Management Interoperability Protocol) Key Provider object."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""keyproviderid = """ + str(keyproviderid)+""";"""+"")
try:
_GetKeyProviderKmipResult = ctx.element.get_key_provider_kmip(key_provider_id=keyproviderid)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_GetKeyProviderKmipResult), indent=4))
return
else:
cli_utils.print_result(_GetKeyProviderKmipResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('addkeyservertoproviderkmip', short_help="""Adds (assigns) the specified KMIP (Key Management Interoperability Protocol) Key Server to the specified Key Provider. This will result in contacting the server to verify it's functional, as well as to synchronize keys in the event that there are multiple key servers assigned to the provider. This synchronization may result in conflicts which could cause this to fail. If the specified KMIP Key Server is already assigned to the specified Key Provider, this is a no-op and no error will be returned. The assignment can be removed (unassigned) using RemoveKeyServerFromProviderKmip. """, cls=SolidFireCommand)
@click.option('--keyproviderid',
type=int,
required=True,
prompt=True,
help="""The ID of the Key Provider to assign the KMIP Key Server to. """)
@click.option('--keyserverid',
type=int,
required=True,
prompt=True,
help="""The ID of the KMIP Key Server to assign. """)
@pass_context
def addkeyservertoproviderkmip(ctx,
# Mandatory main parameter
keyproviderid,
# Mandatory main parameter
keyserverid):
"""Adds (assigns) the specified KMIP (Key Management Interoperability Protocol) Key Server to the specified Key Provider. This will result in contacting the server to verify it's functional, as well as to synchronize keys in the event that there are multiple key servers assigned to the provider. This synchronization may result in conflicts which could cause this to fail. If the specified KMIP Key Server is already assigned to the specified Key Provider, this is a no-op and no error will be returned. The assignment can be removed (unassigned) using RemoveKeyServerFromProviderKmip."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""keyproviderid = """ + str(keyproviderid)+";"+"""keyserverid = """ + str(keyserverid)+""";"""+"")
try:
_AddKeyServerToProviderKmipResult = ctx.element.add_key_server_to_provider_kmip(key_provider_id=keyproviderid, key_server_id=keyserverid)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_AddKeyServerToProviderKmipResult), indent=4))
return
else:
cli_utils.print_result(_AddKeyServerToProviderKmipResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('removekeyserverfromproviderkmip', short_help="""Remove (unassign) the specified KMIP (Key Management Interoperability Protocol) Key Server from the provider it was assigned to via AddKeyServerToProviderKmip (if any). A KMIP Key Server can be unassigned from its provider unless it's the last one and that provider is active (providing keys which are currently in use). If the specified KMIP Key Server is not assigned to a provider, this is a no-op and no error will be returned. """, cls=SolidFireCommand)
@click.option('--keyserverid',
type=int,
required=True,
prompt=True,
help="""The ID of the KMIP Key Server to unassign. """)
@pass_context
def removekeyserverfromproviderkmip(ctx,
# Mandatory main parameter
keyserverid):
"""Remove (unassign) the specified KMIP (Key Management Interoperability Protocol) Key Server from the provider it was assigned to via AddKeyServerToProviderKmip (if any). A KMIP Key Server can be unassigned from its provider unless it's the last one and that provider is active (providing keys which are currently in use). If the specified KMIP Key Server is not assigned to a provider, this is a no-op and no error will be returned."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""keyserverid = """ + str(keyserverid)+""";"""+"")
try:
_RemoveKeyServerFromProviderKmipResult = ctx.element.remove_key_server_from_provider_kmip(key_server_id=keyserverid)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_RemoveKeyServerFromProviderKmipResult), indent=4))
return
else:
cli_utils.print_result(_RemoveKeyServerFromProviderKmipResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('getkeyserverkmip', short_help="""Returns the specified KMIP (Key Management Interoperability Protocol) Key Server object. """, cls=SolidFireCommand)
@click.option('--keyserverid',
type=int,
required=True,
prompt=True,
help="""The ID of the KMIP Key Server object to return. """)
@pass_context
def getkeyserverkmip(ctx,
# Mandatory main parameter
keyserverid):
"""Returns the specified KMIP (Key Management Interoperability Protocol) Key Server object."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""keyserverid = """ + str(keyserverid)+""";"""+"")
try:
_GetKeyServerKmipResult = ctx.element.get_key_server_kmip(key_server_id=keyserverid)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_GetKeyServerKmipResult), indent=4))
return
else:
cli_utils.print_result(_GetKeyServerKmipResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('getclientcertificatesignrequest', short_help="""Generates a Certificate Sign Request which can be signed by a Certificate Authority to generate a client certificate for the cluster. This is part of establishing a trust relationship for interacting with external services. """, cls=SolidFireCommand)
@pass_context
def getclientcertificatesignrequest(ctx):
"""Generates a Certificate Sign Request which can be signed by a Certificate Authority to generate a client certificate for the cluster. This is part of establishing a trust relationship for interacting with external services."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """+""";"""+"")
try:
_GetClientCertificateSignRequestResult = ctx.element.get_client_certificate_sign_request()
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_GetClientCertificateSignRequestResult), indent=4))
return
else:
cli_utils.print_result(_GetClientCertificateSignRequestResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('createpublicprivatekeypair', short_help="""Creates SSL public and private keys. These keys can be used to generate Certificate Sign Requests. There can be only one key pair in use for the cluster. To replace the existing keys, make sure that they are not being used by any providers before invoking this API. """, cls=SolidFireCommand)
@click.option('--commonname',
type=str,
required=False,
help="""This is the X.509 distinguished name Common Name field (CN). """)
@click.option('--organization',
type=str,
required=False,
help="""This is the X.509 distinguished name Organization Name field (O). """)
@click.option('--organizationalunit',
type=str,
required=False,
help="""This is the X.509 distinguished name Organizational Unit Name field (OU). """)
@click.option('--locality',
type=str,
required=False,
help="""This is the X.509 distinguished name Locality Name field (L). """)
@click.option('--state',
type=str,
required=False,
help="""This is the X.509 distinguished name State or Province Name field (ST or SP or S). """)
@click.option('--country',
type=str,
required=False,
help="""This is the X.509 distinguished name Country field (C). """)
@click.option('--emailaddress',
type=str,
required=False,
help="""This is the X.509 distinguished name Email Address field (MAIL). """)
@pass_context
def createpublicprivatekeypair(ctx,
# Optional main parameter
commonname = None,
# Optional main parameter
organization = None,
# Optional main parameter
organizationalunit = None,
# Optional main parameter
locality = None,
# Optional main parameter
state = None,
# Optional main parameter
country = None,
# Optional main parameter
emailaddress = None):
"""Creates SSL public and private keys. These keys can be used to generate Certificate Sign Requests."""
"""There can be only one key pair in use for the cluster. To replace the existing keys, make sure that they are not being used by any providers before invoking this API."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""commonname = """+str(commonname)+";" + """organization = """+str(organization)+";" + """organizationalunit = """+str(organizationalunit)+";" + """locality = """+str(locality)+";" + """state = """+str(state)+";" + """country = """+str(country)+";" + """emailaddress = """+str(emailaddress)+""";"""+"")
try:
_CreatePublicPrivateKeyPairResult = ctx.element.create_public_private_key_pair(common_name=commonname, organization=organization, organizational_unit=organizationalunit, locality=locality, state=state, country=country, email_address=emailaddress)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_CreatePublicPrivateKeyPairResult), indent=4))
return
else:
cli_utils.print_result(_CreatePublicPrivateKeyPairResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('deletekeyproviderkmip', short_help="""Delete the specified inactive Key Provider. """, cls=SolidFireCommand)
@click.option('--keyproviderid',
type=int,
required=True,
prompt=True,
help="""The ID of the Key Provider to delete. """)
@pass_context
def deletekeyproviderkmip(ctx,
# Mandatory main parameter
keyproviderid):
"""Delete the specified inactive Key Provider."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""keyproviderid = """ + str(keyproviderid)+""";"""+"")
try:
_DeleteKeyProviderKmipResult = ctx.element.delete_key_provider_kmip(key_provider_id=keyproviderid)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_DeleteKeyProviderKmipResult), indent=4))
return
else:
cli_utils.print_result(_DeleteKeyProviderKmipResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('modifykeyserverkmip', short_help="""Modifies a KMIP (Key Management Interoperability Protocol) Key Server to the specified attributes. The only required parameter is the keyServerID. A request which contains only the keyServerID will be a no-op and no error will be returned. Any other parameters which are specified will replace the existing values on the KMIP Key Server with the specified keyServerID. Because this server might be part of an active provider this will result in contacting the server to verify it's functional. Multiple hostnames or IP addresses must only be provided to the kmipKeyServerHostnames parameter if the key servers are in a clustered configuration. """, cls=SolidFireCommand)
@click.option('--kmipcacertificate',
type=str,
required=False,
help="""The public key certificate of the external key server's root CA. This will be used to verify the certificate presented by external key server in the TLS communication. For key server clusters where individual servers use different CAs, provide a concatenated string containing the root certificates of all the CAs. """)
@click.option('--kmipclientcertificate',
type=str,
required=False,
help="""A PEM format Base64 encoded PKCS#10 X.509 certificate used by the Solidfire KMIP client. """)
@click.option('--kmipkeyserverhostnames',
type=str,
required=False,
help="""Array of the hostnames or IP addresses associated with this KMIP Key Server. Multiple hostnames or IP addresses must only be provided if the key servers are in a clustered configuration. """)
@click.option('--keyserverid',
type=int,
required=True,
prompt=True,
help="""The ID of the KMIP Key Server to modify. """)
@click.option('--kmipkeyservername',
type=str,
required=False,
help="""The name of the KMIP Key Server. This name is only used for display purposes and does not need to be unique. """)
@click.option('--kmipkeyserverport',
type=int,
required=False,
help="""The port number associated with this KMIP Key Server (typically 5696). """)
@pass_context
def modifykeyserverkmip(ctx,
# Mandatory main parameter
keyserverid,
# Optional main parameter
kmipcacertificate = None,
# Optional main parameter
kmipclientcertificate = None,
# Optional main parameter
kmipkeyserverhostnames = None,
# Optional main parameter
kmipkeyservername = None,
# Optional main parameter
kmipkeyserverport = None):
"""Modifies a KMIP (Key Management Interoperability Protocol) Key Server to the specified attributes. The only required parameter is the keyServerID. A request which contains only the keyServerID will be a no-op and no error will be returned. Any other parameters which are specified will replace the existing values on the KMIP Key Server with the specified keyServerID. Because this server might be part of an active provider this will result in contacting the server to verify it's functional. Multiple hostnames or IP addresses must only be provided to the kmipKeyServerHostnames parameter if the key servers are in a clustered configuration."""
cli_utils.establish_connection(ctx)
kmipkeyserverhostnames = parser.parse_array(kmipkeyserverhostnames)
ctx.logger.info(""": """"""keyserverid = """ + str(keyserverid)+";" + """kmipcacertificate = """+str(kmipcacertificate)+";" + """kmipclientcertificate = """+str(kmipclientcertificate)+";" + """kmipkeyserverhostnames = """+str(kmipkeyserverhostnames)+";" + """kmipkeyservername = """+str(kmipkeyservername)+";" + """kmipkeyserverport = """+str(kmipkeyserverport)+""";"""+"")
try:
_ModifyKeyServerKmipResult = ctx.element.modify_key_server_kmip(key_server_id=keyserverid, kmip_ca_certificate=kmipcacertificate, kmip_client_certificate=kmipclientcertificate, kmip_key_server_hostnames=kmipkeyserverhostnames, kmip_key_server_name=kmipkeyservername, kmip_key_server_port=kmipkeyserverport)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_ModifyKeyServerKmipResult), indent=4))
return
else:
cli_utils.print_result(_ModifyKeyServerKmipResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('testkeyproviderkmip', short_help="""Test whether the specified Key Provider is functioning normally. """, cls=SolidFireCommand)
@click.option('--keyproviderid',
type=int,
required=True,
prompt=True,
help="""The ID of the Key Provider to test. """)
@pass_context
def testkeyproviderkmip(ctx,
# Mandatory main parameter
keyproviderid):
"""Test whether the specified Key Provider is functioning normally."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""keyproviderid = """ + str(keyproviderid)+""";"""+"")
try:
_TestKeyProviderKmipResult = ctx.element.test_key_provider_kmip(key_provider_id=keyproviderid)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_TestKeyProviderKmipResult), indent=4))
return
else:
cli_utils.print_result(_TestKeyProviderKmipResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('getfipsreport', short_help="""GetFipsReport enables you to retrieve FIPS compliance status on a per node basis. """, cls=SolidFireCommand)
@pass_context
def getfipsreport(ctx):
"""GetFipsReport enables you to retrieve FIPS compliance status on a per node basis."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """+""";"""+"")
try:
_GetFipsReportResult = ctx.element.get_fips_report()
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_GetFipsReportResult), indent=4))
return
else:
cli_utils.print_result(_GetFipsReportResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('createkeyproviderkmip', short_help="""Creates a KMIP (Key Management Interoperability Protocol) Key Provider with the specified name. A Key Provider defines a mechanism and location to retrieve authentication keys. A KMIP Key Provider represents a collection of one or more KMIP Key Servers. A newly created KMIP Key Provider will not have any KMIP Key Servers assigned to it. To create a KMIP Key Server see CreateKeyServerKmip and to assign it to a provider created via this method see AddKeyServerToProviderKmip. """, cls=SolidFireCommand)
@click.option('--keyprovidername',
type=str,
required=True,
prompt=True,
help="""The name to associate with the created KMIP Key Provider. This name is only used for display purposes and does not need to be unique. """)
@pass_context
def createkeyproviderkmip(ctx,
# Mandatory main parameter
keyprovidername):
"""Creates a KMIP (Key Management Interoperability Protocol) Key Provider with the specified name. A Key Provider defines a mechanism and location to retrieve authentication keys. A KMIP Key Provider represents a collection of one or more KMIP Key Servers. A newly created KMIP Key Provider will not have any KMIP Key Servers assigned to it. To create a KMIP Key Server see CreateKeyServerKmip and to assign it to a provider created via this method see AddKeyServerToProviderKmip."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""keyprovidername = """ + str(keyprovidername)+""";"""+"")
try:
_CreateKeyProviderKmipResult = ctx.element.create_key_provider_kmip(key_provider_name=keyprovidername)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_CreateKeyProviderKmipResult), indent=4))
return
else:
cli_utils.print_result(_CreateKeyProviderKmipResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
|
|
from __future__ import unicode_literals, absolute_import, print_function
import click
import sys
import frappe
from frappe.utils import cint
from frappe.commands import pass_context, get_site
from frappe.exceptions import SiteNotSpecifiedError
def _is_scheduler_enabled():
enable_scheduler = False
try:
frappe.connect()
enable_scheduler = cint(frappe.db.get_single_value("System Settings", "enable_scheduler")) and True or False
except:
pass
finally:
frappe.db.close()
return enable_scheduler
@click.command('trigger-scheduler-event')
@click.argument('event')
@pass_context
def trigger_scheduler_event(context, event):
"Trigger a scheduler event"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.trigger(site, event, now=True)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('enable-scheduler')
@pass_context
def enable_scheduler(context):
"Enable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.enable_scheduler()
frappe.db.commit()
print("Enabled for", site)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('disable-scheduler')
@pass_context
def disable_scheduler(context):
"Disable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.disable_scheduler()
frappe.db.commit()
print("Disabled for", site)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('scheduler')
@click.option('--site', help='site name')
@click.argument('state', type=click.Choice(['pause', 'resume', 'disable', 'enable']))
@pass_context
def scheduler(context, state, site=None):
from frappe.installer import update_site_config
import frappe.utils.scheduler
if not site:
site = get_site(context)
try:
frappe.init(site=site)
if state == 'pause':
update_site_config('pause_scheduler', 1)
elif state == 'resume':
update_site_config('pause_scheduler', 0)
elif state == 'disable':
frappe.connect()
frappe.utils.scheduler.disable_scheduler()
frappe.db.commit()
elif state == 'enable':
frappe.connect()
frappe.utils.scheduler.enable_scheduler()
frappe.db.commit()
print('Scheduler {0}d for site {1}'.format(state, site))
finally:
frappe.destroy()
@click.command('set-maintenance-mode')
@click.option('--site', help='site name')
@click.argument('state', type=click.Choice(['on', 'off']))
@pass_context
def set_maintenance_mode(context, state, site=None):
from frappe.installer import update_site_config
if not site:
site = get_site(context)
try:
frappe.init(site=site)
update_site_config('maintenance_mode', 1 if (state == 'on') else 0)
finally:
frappe.destroy()
@click.command('doctor') #Passing context always gets a site and if there is no use site it breaks
@click.option('--site', help='site name')
@pass_context
def doctor(context, site=None):
"Get diagnostic info about background workers"
from frappe.utils.doctor import doctor as _doctor
if not site:
site = get_site(context, raise_err=False)
return _doctor(site=site)
@click.command('show-pending-jobs')
@click.option('--site', help='site name')
@pass_context
def show_pending_jobs(context, site=None):
"Get diagnostic info about background jobs"
from frappe.utils.doctor import pending_jobs as _pending_jobs
if not site:
site = get_site(context)
with frappe.init_site(site):
pending_jobs = _pending_jobs(site=site)
return pending_jobs
@click.command('purge-jobs')
@click.option('--site', help='site name')
@click.option('--queue', default=None, help='one of "low", "default", "high')
@click.option('--event', default=None, help='one of "all", "weekly", "monthly", "hourly", "daily", "weekly_long", "daily_long"')
def purge_jobs(site=None, queue=None, event=None):
"Purge any pending periodic tasks, if event option is not given, it will purge everything for the site"
from frappe.utils.doctor import purge_pending_jobs
frappe.init(site or '')
count = purge_pending_jobs(event=event, site=site, queue=queue)
print("Purged {} jobs".format(count))
@click.command('schedule')
def start_scheduler():
from frappe.utils.scheduler import start_scheduler
start_scheduler()
@click.command('worker')
@click.option('--queue', type=str)
@click.option('--quiet', is_flag = True, default = False, help = 'Hide Log Outputs')
def start_worker(queue, quiet = False):
from frappe.utils.background_jobs import start_worker
start_worker(queue, quiet = quiet)
@click.command('ready-for-migration')
@click.option('--site', help='site name')
@pass_context
def ready_for_migration(context, site=None):
from frappe.utils.doctor import get_pending_jobs
if not site:
site = get_site(context)
try:
frappe.init(site=site)
pending_jobs = get_pending_jobs(site=site)
if pending_jobs:
print('NOT READY for migration: site {0} has pending background jobs'.format(site))
sys.exit(1)
else:
print('READY for migration: site {0} does not have any background jobs'.format(site))
return 0
finally:
frappe.destroy()
commands = [
disable_scheduler,
doctor,
enable_scheduler,
purge_jobs,
ready_for_migration,
scheduler,
set_maintenance_mode,
show_pending_jobs,
start_scheduler,
start_worker,
trigger_scheduler_event,
]
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Metadata request handler."""
import hashlib
import hmac
import os
from oslo_config import cfg
import six
import webob.dec
import webob.exc
from nova.api.metadata import base
from nova import conductor
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
from nova import utils
from nova import wsgi
CACHE_EXPIRATION = 15 # in seconds
CONF = cfg.CONF
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
metadata_proxy_opts = [
cfg.BoolOpt(
'service_metadata_proxy',
default=False,
help='Set flag to indicate Neutron will proxy metadata requests and '
'resolve instance ids.'),
cfg.StrOpt(
'metadata_proxy_shared_secret',
default='', secret=True,
help='Shared secret to validate proxies Neutron metadata requests'),
]
CONF.register_opts(metadata_proxy_opts, 'neutron')
LOG = logging.getLogger(__name__)
class MetadataRequestHandler(wsgi.Application):
"""Serve metadata."""
def __init__(self):
self._cache = memorycache.get_client()
self.conductor_api = conductor.API()
def get_metadata_by_remote_address(self, address):
if not address:
raise exception.FixedIpNotFoundForAddress(address=address)
cache_key = 'metadata-%s' % address
data = self._cache.get(cache_key)
if data:
return data
try:
data = base.get_metadata_by_address(self.conductor_api, address)
except exception.NotFound:
return None
self._cache.set(cache_key, data, CACHE_EXPIRATION)
return data
def get_metadata_by_instance_id(self, instance_id, address):
cache_key = 'metadata-%s' % instance_id
data = self._cache.get(cache_key)
if data:
return data
try:
data = base.get_metadata_by_instance_id(self.conductor_api,
instance_id, address)
except exception.NotFound:
return None
self._cache.set(cache_key, data, CACHE_EXPIRATION)
return data
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if os.path.normpath(req.path_info) == "/":
resp = base.ec2_md_print(base.VERSIONS + ["latest"])
req.response.body = resp
req.response.content_type = base.MIME_TYPE_TEXT_PLAIN
return req.response
if CONF.neutron.service_metadata_proxy:
meta_data = self._handle_instance_id_request(req)
else:
if req.headers.get('X-Instance-ID'):
LOG.warning(
_LW("X-Instance-ID present in request headers. The "
"'service_metadata_proxy' option must be "
"enabled to process this header."))
meta_data = self._handle_remote_ip_request(req)
if meta_data is None:
raise webob.exc.HTTPNotFound()
try:
data = meta_data.lookup(req.path_info)
except base.InvalidMetadataPath:
raise webob.exc.HTTPNotFound()
if callable(data):
return data(req, meta_data)
resp = base.ec2_md_print(data)
if isinstance(resp, six.text_type):
req.response.text = resp
else:
req.response.body = resp
req.response.content_type = meta_data.get_mimetype()
return req.response
def _handle_remote_ip_request(self, req):
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
try:
meta_data = self.get_metadata_by_remote_address(remote_address)
except Exception:
LOG.exception(_LE('Failed to get metadata for ip: %s'),
remote_address)
msg = _('An unknown error has occurred. '
'Please try your request again.')
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(msg))
if meta_data is None:
LOG.error(_LE('Failed to get metadata for ip: %s'),
remote_address)
return meta_data
def _handle_instance_id_request(self, req):
instance_id = req.headers.get('X-Instance-ID')
tenant_id = req.headers.get('X-Tenant-ID')
signature = req.headers.get('X-Instance-ID-Signature')
remote_address = req.headers.get('X-Forwarded-For')
# Ensure that only one header was passed
if instance_id is None:
msg = _('X-Instance-ID header is missing from request.')
elif signature is None:
msg = _('X-Instance-ID-Signature header is missing from request.')
elif tenant_id is None:
msg = _('X-Tenant-ID header is missing from request.')
elif not isinstance(instance_id, six.string_types):
msg = _('Multiple X-Instance-ID headers found within request.')
elif not isinstance(tenant_id, six.string_types):
msg = _('Multiple X-Tenant-ID headers found within request.')
else:
msg = None
if msg:
raise webob.exc.HTTPBadRequest(explanation=msg)
expected_signature = hmac.new(
CONF.neutron.metadata_proxy_shared_secret,
instance_id,
hashlib.sha256).hexdigest()
if not utils.constant_time_compare(expected_signature, signature):
if instance_id:
LOG.warning(_LW('X-Instance-ID-Signature: %(signature)s does '
'not match the expected value: '
'%(expected_signature)s for id: '
'%(instance_id)s. Request From: '
'%(remote_address)s'),
{'signature': signature,
'expected_signature': expected_signature,
'instance_id': instance_id,
'remote_address': remote_address})
msg = _('Invalid proxy request signature.')
raise webob.exc.HTTPForbidden(explanation=msg)
try:
meta_data = self.get_metadata_by_instance_id(instance_id,
remote_address)
except Exception:
LOG.exception(_LE('Failed to get metadata for instance id: %s'),
instance_id)
msg = _('An unknown error has occurred. '
'Please try your request again.')
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(msg))
if meta_data is None:
LOG.error(_LE('Failed to get metadata for instance id: %s'),
instance_id)
elif meta_data.instance.project_id != tenant_id:
LOG.warning(_LW("Tenant_id %(tenant_id)s does not match tenant_id "
"of instance %(instance_id)s."),
{'tenant_id': tenant_id, 'instance_id': instance_id})
# causes a 404 to be raised
meta_data = None
return meta_data
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, pointless-statement, missing-docstring, unbalanced-tuple-unpacking, len-as-condition, no-member
import re
import pytest
from ..pattern import StringPattern, RePattern, FunctionalPattern, REGEX_ENABLED
from ..match import Match
class TestStringPattern(object):
"""
Tests for StringPattern matching
"""
input_string = "An Abyssinian fly playing a Celtic violin was annoyed by trashy flags on " \
"which were the Hebrew letter qoph."
def test_single(self):
pattern = StringPattern("Celtic")
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (28, 34)
assert matches[0].value == "Celtic"
def test_repr(self):
pattern = StringPattern("Celtic")
assert repr(pattern) == '<StringPattern:(\'Celtic\',)>'
def test_ignore_case(self):
pattern = StringPattern("celtic", ignore_case=False)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 0
pattern = StringPattern("celtic", ignore_case=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert matches[0].value == "Celtic"
def test_private_names(self):
pattern = StringPattern("celtic", name="test", private_names=["test"], ignore_case=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert matches[0].private
def test_ignore_names(self):
pattern = StringPattern("celtic", name="test", ignore_names=["test"], ignore_case=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 0
def test_no_match(self):
pattern = StringPattern("Python")
matches = list(pattern.matches(self.input_string))
assert not matches
def test_multiple_patterns(self):
pattern = StringPattern("playing", "annoyed", "Hebrew")
matches = list(pattern.matches(self.input_string))
assert len(matches) == 3
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (18, 25)
assert matches[0].value == "playing"
assert isinstance(matches[1], Match)
assert matches[1].pattern == pattern
assert matches[1].span == (46, 53)
assert matches[1].value == "annoyed"
assert isinstance(matches[2], Match)
assert matches[2].pattern == pattern
assert matches[2].span == (88, 94)
assert matches[2].value == "Hebrew"
def test_start_end_kwargs(self):
pattern = StringPattern("Abyssinian", start=20, end=40)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 0
def test_matches_kwargs(self):
pattern = StringPattern("Abyssinian", name="test", value="AB")
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert matches[0].name == "test"
assert matches[0].value == "AB"
def test_post_processor(self):
def post_processor(matches, pattern):
assert len(matches) == 1
assert isinstance(pattern, StringPattern)
return []
pattern = StringPattern("Abyssinian", name="test", value="AB", post_processor=post_processor)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 0
class TestRePattern(object):
"""
Tests for RePattern matching
"""
input_string = "An Abyssinian fly playing a Celtic violin was annoyed by trashy flags on " \
"which were the Hebrew letter qoph."
def test_single_compiled(self):
pattern = RePattern(re.compile("Celt.?c"))
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (28, 34)
assert matches[0].value == "Celtic"
def test_single_string(self):
pattern = RePattern("Celt.?c")
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (28, 34)
assert matches[0].value == "Celtic"
def test_single_kwargs(self):
pattern = RePattern({"pattern": "celt.?c", "flags": re.IGNORECASE})
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (28, 34)
assert matches[0].value == "Celtic"
def test_single_vargs(self):
pattern = RePattern(("celt.?c", re.IGNORECASE))
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (28, 34)
assert matches[0].value == "Celtic"
def test_no_match(self):
pattern = RePattern("abc.?def")
matches = list(pattern.matches(self.input_string))
assert len(matches) == 0
def test_shortcuts(self):
pattern = RePattern("Celtic-violin", abbreviations=[("-", r"[\W_]+")])
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
pattern = RePattern({"pattern": "celtic-violin", "flags": re.IGNORECASE}, abbreviations=[("-", r"[\W_]+")])
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
def test_multiple_patterns(self):
pattern = RePattern("pla.?ing", "ann.?yed", "Heb.?ew")
matches = list(pattern.matches(self.input_string))
assert len(matches) == 3
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (18, 25)
assert matches[0].value == "playing"
assert isinstance(matches[1], Match)
assert matches[1].pattern == pattern
assert matches[1].span == (46, 53)
assert matches[1].value == "annoyed"
assert isinstance(matches[2], Match)
assert matches[2].pattern == pattern
assert matches[2].span == (88, 94)
assert matches[2].value == "Hebrew"
def test_unnamed_groups(self):
pattern = RePattern(r"(Celt.?c)\s+(\w+)")
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
parent = matches[0]
assert isinstance(parent, Match)
assert parent.pattern == pattern
assert parent.span == (28, 41)
assert parent.name is None
assert parent.value == "Celtic violin"
assert len(parent.children) == 2
group1, group2 = parent.children
assert isinstance(group1, Match)
assert group1.pattern == pattern
assert group1.span == (28, 34)
assert group1.name is None
assert group1.value == "Celtic"
assert group1.parent == parent
assert isinstance(group2, Match)
assert group2.pattern == pattern
assert group2.span == (35, 41)
assert group2.name is None
assert group2.value == "violin"
assert group2.parent == parent
def test_named_groups(self):
pattern = RePattern(r"(?P<param1>Celt.?c)\s+(?P<param2>\w+)")
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
parent = matches[0]
assert isinstance(parent, Match)
assert parent.pattern == pattern
assert parent.span == (28, 41)
assert parent.name is None
assert parent.value == "Celtic violin"
assert len(parent.children) == 2
group1, group2 = parent.children
assert isinstance(group1, Match)
assert group1.pattern == pattern
assert group1.span == (28, 34)
assert group1.name == "param1"
assert group1.value == "Celtic"
assert group1.parent == parent
assert isinstance(group2, Match)
assert group2.pattern == pattern
assert group2.span == (35, 41)
assert group2.name == "param2"
assert group2.value == "violin"
assert group2.parent == parent
def test_children(self):
pattern = RePattern(r"(?P<param1>Celt.?c)\s+(?P<param2>\w+)", children=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 2
group1, group2 = matches
assert isinstance(group1, Match)
assert group1.pattern == pattern
assert group1.span == (28, 34)
assert group1.name == "param1"
assert group1.value == "Celtic"
assert isinstance(group2, Match)
assert group2.pattern == pattern
assert group2.span == (35, 41)
assert group2.name == "param2"
assert group2.value == "violin"
def test_children_parent_private(self):
pattern = RePattern(r"(?P<param1>Celt.?c)\s+(?P<param2>\w+)", children=True, private_parent=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 3
parent, group1, group2 = matches
assert isinstance(group1, Match)
assert parent.private
assert parent.pattern == pattern
assert parent.span == (28, 41)
assert parent.name is None
assert parent.value == "Celtic violin"
assert isinstance(group1, Match)
assert not group1.private
assert group1.pattern == pattern
assert group1.span == (28, 34)
assert group1.name == "param1"
assert group1.value == "Celtic"
assert isinstance(group2, Match)
assert not group2.private
assert group2.pattern == pattern
assert group2.span == (35, 41)
assert group2.name == "param2"
assert group2.value == "violin"
def test_parent_children_private(self):
pattern = RePattern(r"(?P<param1>Celt.?c)\s+(?P<param2>\w+)", private_children=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 3
parent, group1, group2 = matches
assert isinstance(group1, Match)
assert not parent.private
assert parent.pattern == pattern
assert parent.span == (28, 41)
assert parent.name is None
assert parent.value == "Celtic violin"
assert isinstance(group1, Match)
assert group1.private
assert group1.pattern == pattern
assert group1.span == (28, 34)
assert group1.name == "param1"
assert group1.value == "Celtic"
assert isinstance(group2, Match)
assert group2.private
assert group2.pattern == pattern
assert group2.span == (35, 41)
assert group2.name == "param2"
assert group2.value == "violin"
def test_every(self):
pattern = RePattern(r"(?P<param1>Celt.?c)\s+(?P<param2>\w+)", every=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 3
parent, group1, group2 = matches
assert isinstance(group1, Match)
assert not parent.private
assert parent.pattern == pattern
assert parent.span == (28, 41)
assert parent.name is None
assert parent.value == "Celtic violin"
assert isinstance(group1, Match)
assert not group1.private
assert group1.pattern == pattern
assert group1.span == (28, 34)
assert group1.name == "param1"
assert group1.value == "Celtic"
assert isinstance(group2, Match)
assert not group2.private
assert group2.pattern == pattern
assert group2.span == (35, 41)
assert group2.name == "param2"
assert group2.value == "violin"
def test_private_names(self):
pattern = RePattern(r"(?P<param1>Celt.?c)\s+(?P<param2>\w+)", private_names=["param2"], children=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 2
assert matches[0].name == "param1"
assert not matches[0].private
assert matches[1].name == "param2"
assert matches[1].private
def test_ignore_names(self):
pattern = RePattern(r"(?P<param1>Celt.?c)\s+(?P<param2>\w+)", ignore_names=["param2"], children=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert matches[0].name == "param1"
def test_matches_kwargs(self):
pattern = RePattern("He.rew", name="test", value="HE")
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert matches[0].name == "test"
assert matches[0].value == "HE"
pattern = RePattern("H(e.)(rew)", name="test", value="HE")
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert matches[0].name == "test"
assert matches[0].value == "HE"
children = matches[0].children
assert len(children) == 2
assert children[0].name == "test"
assert children[0].value == "HE"
assert children[1].name == "test"
assert children[1].value == "HE"
pattern = RePattern("H(?P<first>e.)(?P<second>rew)", name="test", value="HE")
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert matches[0].name == "test"
assert matches[0].value == "HE"
children = matches[0].children
assert len(children) == 2
assert children[0].name == "first"
assert children[0].value == "HE"
assert children[1].name == "second"
assert children[1].value == "HE"
class TestFunctionalPattern(object):
"""
Tests for FunctionalPattern matching
"""
input_string = "An Abyssinian fly playing a Celtic violin was annoyed by trashy flags on " \
"which were the Hebrew letter qoph."
def test_single_vargs(self):
def func(input_string):
i = input_string.find("fly")
if i > -1:
return i, i + len("fly"), "fly", "functional"
pattern = FunctionalPattern(func)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (14, 17)
assert matches[0].name == "functional"
assert matches[0].value == "fly"
def test_single_kwargs(self):
def func(input_string):
i = input_string.find("fly")
if i > -1:
return {"start": i, "end": i + len("fly"), "name": "functional"}
pattern = FunctionalPattern(func)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (14, 17)
assert matches[0].name == "functional"
assert matches[0].value == "fly"
def test_multiple_objects(self):
def func(input_string):
i = input_string.find("fly")
matches = []
if i > -1:
matches.append((i, i + len("fly"), {'name': "functional"}))
i = input_string.find("annoyed")
if i > -1:
matches.append((i, i + len("annoyed")))
i = input_string.find("Hebrew")
if i > -1:
matches.append({"start": i, "end": i + len("Hebrew")})
return matches
pattern = FunctionalPattern(func)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 3
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (14, 17)
assert matches[0].name == "functional"
assert matches[0].value == "fly"
assert isinstance(matches[1], Match)
assert matches[1].pattern == pattern
assert matches[1].span == (46, 53)
assert matches[1].value == "annoyed"
assert isinstance(matches[2], Match)
assert matches[2].pattern == pattern
assert matches[2].span == (88, 94)
assert matches[2].value == "Hebrew"
def test_multiple_generator(self):
def func(input_string):
i = input_string.find("fly")
if i > -1:
yield (i, i + len("fly"), {'name': "functional"})
i = input_string.find("annoyed")
if i > -1:
yield (i, i + len("annoyed"))
i = input_string.find("Hebrew")
if i > -1:
yield (i, {"end": i + len("Hebrew")})
pattern = FunctionalPattern(func)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 3
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (14, 17)
assert matches[0].name == "functional"
assert matches[0].value == "fly"
assert isinstance(matches[1], Match)
assert matches[1].pattern == pattern
assert matches[1].span == (46, 53)
assert matches[1].value == "annoyed"
assert isinstance(matches[2], Match)
assert matches[2].pattern == pattern
assert matches[2].span == (88, 94)
assert matches[2].value == "Hebrew"
def test_no_match(self):
pattern = FunctionalPattern(lambda x: None)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 0
def test_multiple_patterns(self):
def playing(input_string):
i = input_string.find("playing")
if i > -1:
return i, i + len("playing")
def annoyed(input_string):
i = input_string.find("annoyed")
if i > -1:
return i, i + len("annoyed")
def hebrew(input_string):
i = input_string.find("Hebrew")
if i > -1:
return i, i + len("Hebrew")
pattern = FunctionalPattern(playing, annoyed, hebrew)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 3
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (18, 25)
assert matches[0].value == "playing"
assert isinstance(matches[1], Match)
assert matches[1].pattern == pattern
assert matches[1].span == (46, 53)
assert matches[1].value == "annoyed"
assert isinstance(matches[2], Match)
assert matches[2].pattern == pattern
assert matches[2].span == (88, 94)
assert matches[2].value == "Hebrew"
def test_matches_kwargs(self):
def playing(input_string):
i = input_string.find("playing")
if i > -1:
return i, i + len("playing")
pattern = FunctionalPattern(playing, name="test", value="PLAY")
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert matches[0].name == "test"
assert matches[0].value == "PLAY"
class TestValue(object):
"""
Tests for value option
"""
input_string = "This string contains 1849 a number"
def test_str_value(self):
pattern = StringPattern("1849", name="dummy", value="test")
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (21, 25)
assert matches[0].value == "test"
def test_dict_child_value(self):
pattern = RePattern(r"(?P<strParam>cont.?ins)\s+(?P<intParam>\d+)",
formatter={'intParam': lambda x: int(x) * 2,
'strParam': lambda x: "really " + x},
format_all=True,
value={'intParam': 'INT_PARAM_VALUE'})
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
parent = matches[0]
assert len(parent.children) == 2
group1, group2 = parent.children
assert isinstance(group1, Match)
assert group1.pattern == pattern
assert group1.span == (12, 20)
assert group1.value == "really contains"
assert isinstance(group2, Match)
assert group2.pattern == pattern
assert group2.span == (21, 25)
assert group2.value == 'INT_PARAM_VALUE'
def test_dict_default_value(self):
pattern = RePattern(r"(?P<strParam>cont.?ins)\s+(?P<intParam>\d+)",
formatter={'intParam': lambda x: int(x) * 2,
'strParam': lambda x: "really " + x},
format_all=True,
value={'__children__': 'CHILD', 'strParam': 'STR_VALUE', '__parent__': 'PARENT'})
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
parent = matches[0]
assert parent.value == "PARENT"
assert len(parent.children) == 2
group1, group2 = parent.children
assert isinstance(group1, Match)
assert group1.pattern == pattern
assert group1.span == (12, 20)
assert group1.value == "STR_VALUE"
assert isinstance(group2, Match)
assert group2.pattern == pattern
assert group2.span == (21, 25)
assert group2.value == "CHILD"
class TestFormatter(object):
"""
Tests for formatter option
"""
input_string = "This string contains 1849 a number"
def test_single_string(self):
pattern = StringPattern("1849", name="dummy", formatter=lambda x: int(x) / 2)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (21, 25)
assert matches[0].value == 1849 / 2
def test_single_re_no_group(self):
pattern = RePattern(r"\d+", formatter=lambda x: int(x) * 2)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (21, 25)
assert matches[0].value == 1849 * 2
def test_single_re_named_groups(self):
pattern = RePattern(r"(?P<strParam>cont.?ins)\s+(?P<intParam>\d+)",
formatter={'intParam': lambda x: int(x) * 2,
'strParam': lambda x: "really " + x}, format_all=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
parent = matches[0]
assert len(parent.children) == 2
group1, group2 = parent.children
assert isinstance(group1, Match)
assert group1.pattern == pattern
assert group1.span == (12, 20)
assert group1.value == "really contains"
assert isinstance(group2, Match)
assert group2.pattern == pattern
assert group2.span == (21, 25)
assert group2.value == 1849 * 2
def test_repeated_captures_option(self):
pattern = RePattern(r"\[(\d+)\](?:-(\d+))*")
matches = list(pattern.matches("[02]-03-04-05-06"))
assert len(matches) == 1
match = matches[0]
if REGEX_ENABLED:
assert len(match.children) == 5
assert [child.value for child in match.children] == ["02", "03", "04", "05", "06"]
else:
assert len(match.children) == 2
assert [child.value for child in match.children] == ["02", "06"]
with pytest.raises(NotImplementedError):
RePattern(r"\[(\d+)\](?:-(\d+))*", repeated_captures=True)
pattern = RePattern(r"\[(\d+)\](?:-(\d+))*", repeated_captures=False)
matches = list(pattern.matches("[02]-03-04-05-06"))
assert len(matches) == 1
match = matches[0]
assert len(match.children) == 2
assert [child.value for child in match.children] == ["02", "06"]
def test_single_functional(self):
def digit(input_string):
i = input_string.find("1849")
if i > -1:
return i, i + len("1849")
pattern = FunctionalPattern(digit, formatter=lambda x: int(x) * 3)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
assert isinstance(matches[0], Match)
assert matches[0].pattern == pattern
assert matches[0].span == (21, 25)
assert matches[0].value == 1849 * 3
class TestValidator(object):
"""
Tests for validator option
"""
input_string = "This string contains 1849 a number"
@staticmethod
def true_validator(match):
return int(match.value) < 1850
@staticmethod
def false_validator(match):
return int(match.value) >= 1850
def test_single_string(self):
pattern = StringPattern("1849", name="dummy", validator=self.false_validator)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 0
pattern = StringPattern("1849", validator=self.true_validator)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
def test_single_re_no_group(self):
pattern = RePattern(r"\d+", validator=self.false_validator)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 0
pattern = RePattern(r"\d+", validator=self.true_validator)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
def test_single_re_named_groups(self):
pattern = RePattern(r"(?P<strParam>cont.?ins)\s+(?P<intParam>\d+)",
validator={'intParam': self.false_validator}, validate_all=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 0
pattern = RePattern(r"(?P<strParam>cont.?ins)\s+(?P<intParam>\d+)",
validator={'intParam': self.true_validator}, validate_all=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
def test_validate_all(self):
pattern = RePattern(r"contains (?P<intParam>\d+)", formatter=int, validator=lambda match: match.value < 100,
children=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 0
pattern = RePattern(r"contains (?P<intParam>\d+)", formatter=int, validator=lambda match: match.value > 100,
children=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
def invalid_func(match):
if match.name == 'intParam':
return True
return match.value.startswith('abc')
pattern = RePattern(r"contains (?P<intParam>\d+)", formatter=int, validator=invalid_func, validate_all=True,
children=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 0
def func(match):
if match.name == 'intParam':
return True
return match.value.startswith('contains')
pattern = RePattern(r"contains (?P<intParam>\d+)", formatter=int, validator=func, validate_all=True,
children=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
def test_format_all(self):
pattern = RePattern(r"contains (?P<intParam>\d+)", formatter=int,
children=True)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
for match in matches:
assert match.value is not None
with pytest.raises(ValueError):
pattern = RePattern(r"contains (?P<intParam>\d+)", formatter=int, format_all=True)
matches = list(pattern.matches(self.input_string))
for match in matches:
assert match.value is not None
def test_single_functional(self):
def digit(input_string):
i = input_string.find("1849")
if i > -1:
return i, i + len("1849")
pattern = FunctionalPattern(digit, validator=self.false_validator)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 0
pattern = FunctionalPattern(digit, validator=self.true_validator)
matches = list(pattern.matches(self.input_string))
assert len(matches) == 1
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RDF Helper Module.
Utility Code to create RDF Class and other RDF generic relationships
"""
from __future__ import print_function
import datetime
from absl import logging
from rdfformat.generator import constants
import rdflib
from rdflib.extras import infixowl
def CreateClassInGraph(graph,
class_name,
class_description,
parent_clazz,
entity_namespace=constants.DIGITAL_BUILDINGS_NS):
"""Utility function to create an RDF OWL Class and adds to the provided graph.
Creates an RDF OWL Class with the input parameters
OWL: Ontology Web Language
RDF: Resource Description Framework
Args:
graph: the global graph where the ontology is being built in RDF.
class_name: name of the RDF Class.
class_description: the RDF Class description.
parent_clazz: the parent RDF Class.
entity_namespace: the entity_namespace to be used, DIGITAL_BUILDINGS_NS is
by default
Returns:
graph: a graph with the newly built class
clazz_object: the class object created
"""
clazz = rdflib.URIRef(entity_namespace[class_name])
# Add the OWL data to the graph
clazz_tuple = (clazz, rdflib.RDF.type, rdflib.OWL.Class)
graph.add(clazz_tuple)
graph.add((clazz, rdflib.RDFS.subClassOf, parent_clazz))
graph.add((clazz, rdflib.RDFS.label, rdflib.Literal(class_name)))
if class_description is not None:
graph.add((clazz, rdflib.RDFS.comment, rdflib.Literal(class_description)))
graph.commit()
return graph, clazz_tuple
def CreateInstanceInGraph(graph,
instance_name,
instance_description,
parent_clazz,
entity_namespace=constants.DIGITAL_BUILDINGS_NS):
"""Utility function to create an RDF OWL Instance
and adds to the provided graph.
Creates an RDF OWL Instance with the input parameters
OWL: Ontology Web Language
RDF: Resource Description Framework
Args:
graph: the global graph where the ontology is being built in RDF.
instance_name: name of the RDF Instance.
instance_description: the RDF Instance description.
parent_clazz: the parent RDF Class.
entity_namespace: the entity_namespace of the RDF Class.
Returns:
a graph with the newly built class
instance object created
"""
instance = rdflib.URIRef(entity_namespace[instance_name])
# Add the OWL data to the graph
instance_tuple = (instance, rdflib.RDF.type, rdflib.OWL.NamedIndividual)
graph.add(instance_tuple)
graph.add((instance, rdflib.RDF.type, parent_clazz[0]))
graph.add((instance, rdflib.RDFS.label, rdflib.Literal(instance_name)))
if instance_description is not None:
graph.add(
(instance, rdflib.RDFS.comment, rdflib.Literal(instance_description)))
graph.commit()
return graph, instance_tuple
def CreateDataPropertyInGraph(graph,
data_property_name,
data_property_description,
entity_namespace=constants.DIGITAL_BUILDINGS_NS):
"""Utility function to create an OWL Data Property relation
and adds to the provided graph.
Creates an RDF OWL Data Property with the input parameters
OWL: Ontology Web Language
RDF: Resource Description Framework
Args:
graph: the global graph where the ontology is being built in RDF.
data_property_name: name of the OWL Data Property
data_property_description: the RDF property description.
entity_namespace: the entity_namespace of the RDF DataProperty.
Returns:
a graph with the newly built class
the data property object created
"""
data_property = rdflib.URIRef(entity_namespace[data_property_name])
# Add the OWL data to the graph
data_property_tuple = (data_property, rdflib.RDF.type,
rdflib.OWL.DatatypeProperty)
graph.add(data_property_tuple)
graph.add(
(data_property, rdflib.RDFS.label, rdflib.Literal(data_property_name)))
if data_property_description is not None:
graph.add((data_property, rdflib.RDFS.comment,
rdflib.Literal(data_property_description)))
graph.commit()
return graph, data_property_tuple
def CreateObjectPropertyInGraph(
graph,
object_property_name,
object_property_description,
entity_namespace=constants.DIGITAL_BUILDINGS_NS):
"""Utility function to create an OWL Object Property relation
and adds to the provided graph.
Creates an RDF OWL Object Property with the input parameters
OWL: Ontology Web Language
RDF: Resource Description Framework
Args:
graph: the global graph where the ontology is being built in RDF.
object_property_name: name of the OWL Data Property
object_property_description: the RDF Instance description.
entity_namespace : the entity_namespace of the RDF Instance
Returns:
a graph with the newly built class
the object property tuple created
"""
object_property = rdflib.URIRef(entity_namespace[object_property_name])
# Add the OWL data to the graph
object_property_tuple = (object_property, rdflib.RDF.type,
rdflib.ObjectProperty)
graph.add(object_property_tuple)
graph.add((object_property, rdflib.RDFS.label,
rdflib.Literal(object_property_name)))
if object_property_description is not None:
graph.add((object_property, rdflib.RDFS.comment,
rdflib.Literal(object_property_description)))
graph.commit()
return graph, object_property_tuple
def CreatesStandardFieldNameCompositionInGraph(list_composition,
standard_field_name,
is_composed_of_property, graph):
"""Utility function takes a standard_field_name from the ontology
and returns its composition constraint.
Args:
list_composition: a list of composition of standard field name defined by
Carson. Example: ["Run", "Command"]
standard_field_name: an ontology standard field name from Carson. Example:
run_command
is_composed_of_property: the property used to compose the standard field
names, such as "is_composed_of"
graph: the global graph, example input run_command -> the returned result -
is_composed_of_property only (Run and Command). - run_command subClassOf
Command - Command subClassOf Point_type
Returns:
graph: updated graph with the composition.
"""
class_owl = infixowl.Class(
identifier=constants.FIELDS_NS[standard_field_name.capitalize()],
graph=graph)
graph = CreateCompositionInGraph(
list_standard_field_names=list_composition,
composition_operator="&",
composition_property=is_composed_of_property,
restriction=infixowl.only,
class_owl=class_owl,
graph=graph,
entity_namespace=constants.SUBFIELDS_NS)
return graph
def CreateCompositionInGraph(list_standard_field_names,
composition_operator,
composition_property,
restriction,
class_owl,
graph,
entity_namespace=constants.DIGITAL_BUILDINGS_NS,
sub_class_of=None):
"""Utility function that creates composition from a given list
based on a composition operator and a restriction.
the created composition is conform to the following pattern:
class_owl = [composition_property | restriction |
list_standard_field_names[i] | composition_operator |
list_standard_field_names [j]
with i != j
Args:
list_standard_field_names: a list of standard field name defined by Carson.
Example [compressor_run_status_4, supply_water_temperature_sensor,
supply_water_temperature_setpoint]
composition_operator: an "&" or "|" operator
composition_property: the property which will relate the class and the list
of standard field names
restriction: the restriction imposed on the composition, "only" or "some"
class_owl: the class where the composition is attached to.
graph: the global graph
entity_namespace: the name space for the composition elements
sub_class_of: the subClass of the composition elements
Returns:
graph: updated graph with the composition.
"""
index = 0
if list_standard_field_names:
# Prepare the first element
first_element = infixowl.Class(
identifier=entity_namespace[
list_standard_field_names[index].capitalize()],
graph=graph,
subClassOf=sub_class_of)
index += 1
# Prepare the second element since the "&" operator is needed to determine
# the nature of the composition
if index < len(list_standard_field_names):
if composition_operator == "&":
concat = first_element & infixowl.Class(
identifier=entity_namespace[
list_standard_field_names[index].capitalize()],
graph=graph,
subClassOf=sub_class_of)
elif composition_operator == "|":
concat = first_element | infixowl.Class(
identifier=entity_namespace[
list_standard_field_names[index].capitalize()],
graph=graph,
subClassOf=sub_class_of)
else:
logging.error("Unknown operator %s", composition_operator)
return graph
else: # there is only one element
class_owl.subClassOf = [
composition_property | restriction | first_element
]
return graph
index += 1
# append the rest of the elements
while index < len(list_standard_field_names):
concat += infixowl.Class(
identifier=entity_namespace[
list_standard_field_names[index].capitalize()],
graph=graph,
subClassOf=sub_class_of)
index += 1
class_owl.subClassOf = [composition_property | restriction | concat]
return graph
def DecomposeStandardFieldName(standard_field_name):
"""Utility function takes a standard_field_name from the ontology
and returns its composition.
Example: [run_command_1] -> ["Run", "Command"]
Args:
standard_field_name: a standard field name defined by Carson.
Returns:
list: a list of concepts that composes the standard field name.
"""
split_points_data = standard_field_name.split("_")
filtered_list = []
for item in split_points_data:
if not item.isdigit():
filtered_list.append(item.capitalize())
return filtered_list
def CreatesImplementsInGraph(graph, implements_list, applications_set,
application_class, class_object):
"""Utility function to handle the inheritance of types
when the implements relation is used in the yaml file.
Example: class_object subClassOf implements_list["CONTROL", "MONITORING"].
Args:
graph: the global rdf graph
implements_list: a list of implements from the yaml file
applications_set: a set which contains the application already created in
the graph to avoid recreating them.
application_class: the application mother class which the implementation
list inherits from
class_object: the current class which is a subclass of the implements list
Returns:
graph: an updated graph
applications_set: an updated application set
"""
for implements_item in implements_list:
application_sub_item = implements_item.capitalize()
# Create the class only if it is not in the Set yet
if application_sub_item not in applications_set:
applications_set.add(application_sub_item)
graph, application_sub_class = CreateClassInGraph(
graph=graph,
class_name=application_sub_item,
class_description=None,
parent_clazz=application_class[0]
) # getting the name of the class from the tuple
application_sub_class = constants.DIGITAL_BUILDINGS_NS[
implements_item.capitalize()]
graph.add((class_object[0], rdflib.RDFS.subClassOf, application_sub_class))
return graph, applications_set
def GetTimeNow():
"""Utility function returns the time in the
following format %Y/%m/%d-%H:%M:%S.
Returns:
dt_string: an updated graph.
"""
# datetime object containing current date and time
dt_string = datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S")
return dt_string
def ReadFile(filename):
"""Utility function reads a file and returns its content.
Args:
filename: the file to read.
Returns:
The file content.
"""
with open(filename, "r", encoding="utf-8") as data_file:
return data_file.read()
|
|
import time
import pytz
import json
import datetime
import math
import serial
from datetime import datetime
from threading import Thread
class Logging:
""" An object to log data from the arduino and to convert data to the JSON-format. """
def __init__(self, arduino, omtrek_wiel, trip_name):
self.arduino = arduino
self.timeZone = pytz.timezone('Europe/Brussels')
self.startTimeLogging = None
self.startTimeLoggingOffset = None
self.GPSdataList = list()
self.temperatureList = list()
self.altiList = list()
self.voorwielList = list()
self.achterList = list()
self.current_time = None
self._logging = None
self.logging_interval = None
# omtrek_wiel in meter
self.omtrek_wiel = omtrek_wiel
# Create a thread for the logging of the sensors
self._thread = Thread(target=self._start_logging_thread)
self._thread.daemon = True
self.trip_name = trip_name
self.distance_km = 0.0
self.speed_kmh = 0.0
def time_is_accurate(self):
""" Boolean that returns True if get_current_time()
is accurate, False if not """
return not (self.current_time is None)
def start_logging(self, time_interval=0.2):
"""Start main logging thread"""
if not self.logging_interval:
self.logging_interval = time_interval
if self._logging:
if self._logging == 'paused':
self._logging = 'logging'
return True
return False
self._thread.start()
return True
def pause_logging(self):
"""Pauses the logging, call start_logging to restart and stop_logging
to stop.
"""
if self._logging == 'stopped' or self._logging == 'ended':
return False
self._logging = 'paused'
def stop_logging(self):
"""Safely stops the logging and returns the data as json,
once logging has stopped it can't be started again!
"""
if self._logging == 'ended':
return self.scheme
self._logging = 'stopped'
while self._logging != 'ended':
time.sleep(0.01)
self._generate_json()
self.arduino.close()
return self.scheme
def get_current_time(self):
""" Returns the current time,
the system time if unavailable """
if self.current_time:
return self.current_time
if self.startTimeLoggingOffset:
return self._offsetTime(time.time())
return datetime.now()
def get_trip_name(self):
"""Getter for the current trip name"""
return self.trip_name
def _start_logging_thread(self):
while self._logging != 'ended':
self._logging_controller()
time.sleep(self.logging_interval)
return 'ended'
def _logging_controller(self):
""" Call once to start logging proces, can be paused with pause_logging
and stopped with stop_logging """
if not self.logging_interval:
self.logging_interval = time_interval
if self._logging == 'stopped' or self._logging == 'ended':
return False
if not self.startTimeLogging:
self.startTimeLogging = time.time()
self._logging = 'logging'
while self._logging == 'logging':
try:
self._log()
except:
pass
time.sleep(self.logging_interval)
if self._logging == 'stopped':
self._logging = 'ended'
def _log(self):
""" Function that logs all the sensors once. """
# Flush the input buffer to make sure we get the latest data
self.arduino.flushInput()
# Send 1 over serial to get the GPS data
# a to get the pressure and temperature
# ... see Serial_Hallsensoren.ino in Arduino code
self.arduino.write("1abc\n")
# This line should contain the GPS time data
GPSline1 = self.arduino.readline()
# This line should contain if the GPS has a fix
GPSline2 = self.arduino.readline()
# The arduino only sends the position data if it has a fix
if len(GPSline1) > 0 and len(GPSline2) > 0:
if GPSline2[2] == '1':
# This line should contain the GPS position data
GPSline3 = self.arduino.readline()
splitGPS = GPSline1.split(':')
# Basic test to make sure the month and day data is valid
if not int(splitGPS[2]) or not int(splitGPS[3]):
valid = False
else:
valid = True
# Some other test to check if data was sent correctly
if not self.startTimeLoggingOffset and GPSline1[0] == 'T' and \
GPSline2[2] == '1' and valid:
# Determine the offset between the system time of the raspberry pi
# and the real time (GPS time)
print('Time set')
self.startTimeLoggingOffset = self._GPStime(GPSline1)
# to get the real time use:
# ISOtime(startTimeLogging, startTimeLoggingOffset)
# More tests to see if location data is valid
if GPSline2[0] == 'F' and GPSline2[2] == '1' and GPSline3[0] == 'L' \
and valid:
# Time is in gmt so localize it to timezone 'Brussels'
self.current_time = self._localizeGPStime(GPSline1)
# coordinates have to be converted to fit the google API formatting
self.GPSdataList += [self._GPSposition(GPSline3) +
(self.current_time, self._convertGPSspeed(GPSline3)), ]
else:
self.current_time = None
# temperatureLine = self.arduino.readline()
# if self.current_time:
# self.temperatureList.append(temperatureLine, "Current", self.current_time)
# else:
# self.temperatureList.append(temperatureLine, "Unadjusted", time.time())
# Log the altimeter
altimeterLine = self.arduino.readline()
if altimeterLine and altimeterLine[0] == 'P':
altimeterLine = altimeterLine.split(':')[1:]
altimeterLine[1] = altimeterLine[1].rstrip()
if self.current_time:
self.altiList.append([altimeterLine, "Current", self.current_time],)
else:
# If there is no GPS time available, the time will have to be adjusted
self.altiList.append([altimeterLine, "Unadjusted", time.time()],)
# Log the hall sensor in the wheel
voorwielLine = self.arduino.readline()
print(voorwielLine)
if voorwielLine and voorwielLine[0] == 'V':
voorwielLine = voorwielLine.rstrip()
voorwielLine = voorwielLine.split(':')[1:]
if self.current_time:
self.voorwielList.append([voorwielLine, "Current", self.current_time],)
else:
# If there is no GPS time available, the time will have to be adjusted
self.voorwielList.append([voorwielLine, "Unadjusted", time.time()],)
# Log the hall sensor on the crank
achterLine = self.arduino.readline()
print(achterLine)
if achterLine and achterLine[0] == 'A':
achterLine = achterLine.rstrip()
achterLine = achterLine.split(':')[1:]
if self.current_time:
self.achterList.append([achterLine, "Current", self.current_time],)
else:
# If there is no GPS time available, the time will have to be adjusted
self.achterList.append([achterLine, "Unadjusted", time.time()],)
def total_distance_hall(self):
# Total number of rotations * circumference wheel
if len(self.voorwielList):
return (self.omtrek_wiel * float(self.voorwielList[-1][0][0]))/1000.
else:
return 0
def speed_hall(self):
if not len(self.voorwielList):
return 0
# elapsedTime is the time beween the two last passings of the magnet
# in front of the hall sensor in the wheel.
elapsedTime = float(self.voorwielList[-1][0][1])
return self._speedHall(elapsedTime)
def _speedHall(self, elapsedTime):
if elapsedTime == 0:
return 0
velocity_ms = self.omtrek_wiel / elapsedTime
velocity_kmh = velocity_ms * 3.6
return velocity_kmh
def _rpmHall(self, elapsedTime):
if elapsedTime == 0:
return 0
rpm = 60. / elapsedTime
return rpm
def _GPStime(self, line):
"""Returns the time difference between the internal clock
from the Raspberry Pi and the time given by the GPS reciever
"""
GPSlist = line.split(':')
for i in range(1, 6):
while len(GPSlist[i]) < 2:
GPSlist[i] = '0' + GPSlist[i]
while len(GPSlist[7]) < 3:
GPSlist[7] = '0' + GPSdataLst[7]
# Check to see if the difference between the internal clock
# of the raspberry pi and the GPS time is bigger than 1
if abs(
float(''.join(GPSlist[1:7]) + '.' + GPSlist[7]) \
- float(time.strftime("%y%m%d%H%M%S",time.gmtime()) + str(time.time()%1))) > 1:
# Convert the GPS time to second since epoch
RealTime = ':'.join(GPSlist[1:7])
GPStimestamp = time.mktime(datetime.strptime(RealTime, "%y:%m:%d:%H:%M:%S").timetuple()) + float('0.' + GPSlist[7])
return (GPStimestamp - time.time())
return 0.
def _offsetTime(self, wrong_time):
"""Returns the time in ISO format.
Input arguments:
time: the time in time since epoch
offset: the offset for the time given, in seconds (float)"""
utcTime = pytz.utc.localize(datetime.fromtimestamp(wrong_time + self.startTimeLoggingOffset))
return (utcTime.astimezone(self.timeZone))
def _GPSposition(self, line):
""" Converts the 3rd gps line (the location line)
to a tuple containing the coordinates in a format
suitable for the google API. """
line = line.split(':')
data = map(float, line[1:3])
lat_sign = -1 if data[0] < 0 else 1
data[0] = abs(data[0])
lon_sign = -1 if data[1] < 0 else 1
data[1] = abs(data[1])
lat_degree = float(data[0] // 100.)
lat_minute = float(data[0] % 100.)
lat_coordinates = (lat_degree + (lat_minute/60.))*lat_sign
lon_degree = float(data[1] // 100.)
lon_minute = float(data[1] % 100.)
lon_coordinates = lon_degree + (lon_minute/60.)*lon_sign
return (lat_coordinates, lon_coordinates)
def _convertGPSspeed(self, line):
""" Converts the gps speed in knots to a speed in kmh. """
speed_knots = float(line[3])
return speed_knots * 0.514444444
def _localizeGPStime(self, line1):
""" Localizes the time in the first (time containing) GPS line """
global timeZone
time = datetime.strptime(line1[2:-2], "%y:%m:%d:%H:%M:%S:%f")
utcTime = pytz.utc.localize(time)
return utcTime.astimezone(self.timeZone)
def _generate_GPS_sensordata(self, GPS_data_list):
"""Converts GPS_data_list to a format that can be
passed to generate_scheme
"""
sensorData = list()
for i in range(len(GPS_data_list)):
datapoint = {"sensorID": 1}
datapoint["timestamp"] = GPS_data_list[i][2].isoformat()
datapoint["data"] = [{"type": "Point",
"coordinates": list(GPS_data_list[i][0:2]),
"unit":"google",
"speed": [GPS_data_list[i][3]]}]
sensorData.append(datapoint)
return sensorData
def _generate_thermo_sensordata(self, thermo_data_list):
"""Converts thermo_data_list to a format that can be
passed to generate_scheme
"""
sensorData = list()
for i in range(len(thermo_data_list)):
datapoint = {"sensorID": 3}
if thermo_data_list[i][1] == 'Current':
datapoint["timestamp"] = thermo_data_list[i][2].isoformat()
else:
datapoint["timestamp"] = self._offsetTime(thermo_data_list[i][2]).isoformat()
datapoint["data"] = {"value": list(thermo_data_list[i][0])}
sensorData.append(datapoint)
return sensorData
def _generate_alti_sensordata(self, alti_data_list):
"""Converts alti_data_list to a format that can be
passed to generate_scheme
"""
sensorData = list()
for i in range(len(alti_data_list)):
datapoint = {"sensorID": 10}
if alti_data_list[i][1] == 'Current':
datapoint["timestamp"] = alti_data_list[i][2].isoformat()
else:
datapoint["timestamp"] = self._offsetTime(alti_data_list[i][2]).isoformat()
datapoint["data"] = [{"pressure": [float(alti_data_list[i][0][0])],
"temperature": [float(alti_data_list[i][0][1])],
"height": [float(alti_data_list[i][0][1])/4]}]
sensorData.append(datapoint)
return sensorData
def _generate_voorwiel_sensordata(self, voorwiel_data_list):
"""Converts voorwiel_data_list to a format that can be
passed to generate_scheme
"""
sensorData = list()
for i in range(len(voorwiel_data_list)):
datapoint = {"sensorID": 11}
if voorwiel_data_list[i][1] == 'Current':
datapoint["timestamp"] = voorwiel_data_list[i][2].isoformat()
else:
datapoint["timestamp"] = self._offsetTime(voorwiel_data_list[i][2]).isoformat()
datapoint ["data"] = [{"velocity": [self._speedHall(float(voorwiel_data_list[i][0][1]))]}]
sensorData.append(datapoint)
return sensorData
def _generate_trapas_sensordata(self, trapas_data_list):
"""Converts trapas_data_list to a format that can be
passed to generate_scheme
"""
sensorData = list()
for i in range(len(trapas_data_list)):
datapoint ={"sensorID":12}
if trapas_data_list[i][1] == 'Current':
datapoint["timestamp"] = trapas_data_list[i][2].isoformat()
else:
datapoint["timestamp"] = self._offsetTime(trapas_data_list[i][2]).isoformat()
datapoint["data"] = [{"RPM": [self._rpmHall(float(trapas_data_list[i][0][1]))]}]
sensorData.append(datapoint)
return sensorData
def _generate_scheme(self, distance, averageSpeed, maxSpeed, sensorData,
groupID="CW1A1", userID="OnTrack",
startTime=None, endTime=None, meta=None):
"""Generates a JSON formatted sequence from the input arguments that can be
sent to the server
"""
if startTime is None:
startTime = datetime.datetime.now().isoformat()
if endTime is None:
endTime = datetime.datetime.now().isoformat()
if meta is None or type(meta) != dict:
scheme = [{"startTime": startTime.isoformat(),
"endTime": endTime.isoformat(),
"groupID": groupID,
"userID": userID,
"sensorData": sensorData,
"meta": {"distance": distance,
"averageSpeed": averageSpeed,
"maxSpeed": maxSpeed}}]
else:
scheme = [{"startTime": startTime.isoformat(),
"endTime": endTime.isoformat(),
"groupID": groupID,
"userID": userID,
"sensorData": sensorData,
"meta": dict([("distance", distance),
("averageSpeed", averageSpeed),
("maxSpeed", maxSpeed)] + meta.items())}]
return scheme
def _generate_json(self):
"""" Makes a list that can be converted to the JSON-format by json.dumps
and saves this list in self.scheme """
GPSjson = list()
thermojson = list()
altijson = list()
voorwieljson = list()
achterjson = list()
distance = 0
startTime = self.startTimeLogging
endTime = time.time()
# Convert the raw sensor data to json suitable data
if len(self.GPSdataList):
GPSjson = self._generate_GPS_sensordata(self.GPSdataList)
distance = self._total_distance(self.GPSdataList)
startTime = self._offsetTime(self.startTimeLogging)
endTime = self.GPSdataList[-1][2]
# if len(self.temperatureList):
# thermojson = \
# generate_thermo_sensordata(self, self.temperatureList)
if len(self.altiList):
print self.altiList
altijson = self._generate_alti_sensordata(self.altiList)
if len(self.voorwielList):
voorwieljson = self._generate_voorwiel_sensordata(self.voorwielList)
distance = self.total_distance_hall()
if len(self.achterList):
achterjson = self._generate_trapas_sensordata(self.achterList)
# Combine the sensor data
sensorData = GPSjson + altijson + voorwieljson + achterjson
averageSpeed = distance / ((endTime - startTime).total_seconds() / 3600)
self.scheme = self._generate_scheme(distance=distance,
averageSpeed=averageSpeed,
maxSpeed=None,
sensorData=sensorData,
startTime=startTime,
endTime=endTime)
def _distance_on_unit_sphere(self, lat1, long1, lat2, long2):
"""Convert latitude and longitude to
spherical coordinates in radians.
"""
degrees_to_radians = math.pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
# theta = longitude
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta, phi)
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +
math.cos(phi1)*math.cos(phi2))
arc = math.acos(cos)
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
return arc
def _total_distance(self, GPS_data_list):
""" Calculates the total distance in GPS_data_list. """
distance_on_unit = 0
for i in range(len(GPS_data_list)-1):
distance_on_unit += self._distance_on_unit_sphere(GPS_data_list[i][0],
GPS_data_list[i][1],
GPS_data_list[i+1][0],
GPS_data_list[i+1][1])
return distance_on_unit * 6373
|
|
# Copyright (c) 2011, SD Elements. See LICENSE.txt for details.
import datetime
import json
import time # We monkeypatch this.
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import logout
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured, MiddlewareNotUsed
from django.urls import reverse
from django.forms import ValidationError
from django.http import HttpResponseForbidden, HttpRequest, HttpResponse
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from security.auth import min_length
from security.auth_throttling import (
attempt_count, default_delay_function, delay_message, increment_counters,
reset_counters, Middleware as AuthThrottlingMiddleware
)
from security.middleware import (
BaseMiddleware, ContentSecurityPolicyMiddleware, DoNotTrackMiddleware,
SessionExpiryPolicyMiddleware, MandatoryPasswordChangeMiddleware,
XssProtectMiddleware, XFrameOptionsMiddleware,
)
from security.models import PasswordExpiry
from security.password_expiry import never_expire_password
from security.views import require_ajax, csp_report
try:
# Python 3
from unittest.mock import MagicMock
except ImportError:
# Python 2 requires mock library
from mock import MagicMock
mocked_custom_logout = MagicMock(side_effect=logout)
def login_user(func):
"""
A decorator that will create a valid user in the database and
then log that user in. We expect self to be a DjangoTestCase,
or some object with a similar interface.
"""
def wrapper(self, *args, **kwargs):
username_local = 'a2fcf54f63993b7'
password_local = 'd8327deb882cf90'
email_local = 'testuser@example.com'
user = User.objects.create_user(
username=username_local,
email=email_local,
password=password_local,
)
user.is_superuser = True
user.save()
PasswordExpiry.objects.create(user=user).never_expire()
self.client.login(username=username_local, password=password_local)
func(self, *args, **kwargs)
self.client.logout()
user.delete()
return wrapper
class CustomLoginURLMiddleware(BaseMiddleware):
"""Used to test the custom url support in the login required middleware."""
def process_request(self, request):
request.login_url = '/custom-login/'
class BaseMiddlewareTestMiddleware(BaseMiddleware):
REQUIRED_SETTINGS = ('R1', 'R2')
OPTIONAL_SETTINGS = ('O1', 'O2')
def load_setting(self, setting, value):
if not hasattr(self, 'loaded_settings'):
self.loaded_settings = {}
self.loaded_settings[setting] = value
def process_response(self, request, response):
response.loaded_settings = self.loaded_settings
return response
def process_exception(self, request, exception):
return self.process_response(request, HttpResponse())
class BaseMiddlewareTests(TestCase):
def __init__(self, *args, **kwargs):
super(BaseMiddlewareTests, self).__init__(*args, **kwargs)
module_name = BaseMiddlewareTests.__module__
self.MIDDLEWARE_NAME = module_name + '.BaseMiddlewareTestMiddleware'
def test_settings_initially_loaded(self):
expected_settings = {'R1': 1, 'R2': 2, 'O1': 3, 'O2': 4}
with self.settings(
MIDDLEWARE=(self.MIDDLEWARE_NAME,), **expected_settings
):
response = self.client.get('/home/')
self.assertEqual(expected_settings, response.loaded_settings)
def test_required_settings(self):
with self.settings(MIDDLEWARE=(self.MIDDLEWARE_NAME,)):
self.assertRaises(ImproperlyConfigured, self.client.get, '/home/')
def test_optional_settings(self):
with self.settings(
MIDDLEWARE=(self.MIDDLEWARE_NAME,), R1=True, R2=True
):
response = self.client.get('/home/')
self.assertEqual(None, response.loaded_settings['O1'])
self.assertEqual(None, response.loaded_settings['O2'])
def test_setting_change(self):
with self.settings(
MIDDLEWARE=(self.MIDDLEWARE_NAME,), R1=123, R2=True
):
response = self.client.get('/home/')
self.assertEqual(123, response.loaded_settings['R1'])
with override_settings(R1=456):
response = self.client.get('/home/')
self.assertEqual(456, response.loaded_settings['R1'])
response = self.client.get('/home/')
self.assertEqual(123, response.loaded_settings['R1'])
def test_load_setting_abstract_method(self):
base = BaseMiddleware()
self.assertRaises(NotImplementedError, base.load_setting, None, None)
@override_settings(MIDDLEWARE=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'security.middleware.LoginRequiredMiddleware',
))
class LoginRequiredMiddlewareTests(TestCase):
def setUp(self):
self.login_url = reverse("login")
def test_aborts_if_auth_middleware_missing(self):
middleware_classes = settings.MIDDLEWARE
auth_mw = 'django.contrib.auth.middleware.AuthenticationMiddleware'
middleware_classes = [
m for m in middleware_classes if m != auth_mw
]
with self.settings(MIDDLEWARE=middleware_classes):
self.assertRaises(ImproperlyConfigured, self.client.get, '/home/')
def test_redirects_unauthenticated_request(self):
response = self.client.get('/home/')
self.assertRedirects(response, self.login_url + "?next=/home/")
def test_redirects_unauthenticated_ajax_request(self):
response = self.client.get(
'/home/',
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
)
self.assertEqual(response.status_code, 401)
self.assertEqual(
json.loads(response.content.decode('utf-8')),
{"login_url": self.login_url},
)
def test_redirects_to_custom_login_url(self):
middlware_classes = list(settings.MIDDLEWARE)
custom_login_middleware = 'tests.tests.CustomLoginURLMiddleware'
with self.settings(
MIDDLEWARE=[custom_login_middleware] + middlware_classes,
):
response = self.client.get('/home/')
self.assertRedirects(response, '/custom-login/')
response = self.client.get(
'/home/',
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
)
self.assertEqual(response.status_code, 401)
self.assertEqual(
json.loads(response.content.decode('utf-8')),
{"login_url": '/custom-login/'},
)
def test_logs_out_inactive_users(self):
user = User.objects.create_user(
username="foo",
password="foo",
email="a@foo.org",
)
never_expire_password(user)
self.client.login(username="foo", password="foo")
resp = self.client.get('/home/')
self.assertEqual(resp.status_code, 200) # check we are logged in
user.is_active = False
user.save()
resp = self.client.get('/home/')
self.assertRedirects(resp, self.login_url + "?next=/home/")
@override_settings(MIDDLEWARE=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'security.middleware.MandatoryPasswordChangeMiddleware',
))
class RequirePasswordChangeTests(TestCase):
def test_require_password_change(self):
"""
A brand-new user should have an already-expired password, and therefore
be redirected to the password change form on any request.
"""
user = User.objects.create_user(username="foo",
password="foo",
email="foo@foo.com")
self.client.login(username="foo", password="foo")
try:
with self.settings(
MANDATORY_PASSWORD_CHANGE={"URL_NAME": "change_password"}
):
self.assertRedirects(
self.client.get("/home/"),
reverse("change_password"),
)
never_expire_password(user)
self.assertEqual(self.client.get("/home/").status_code, 200)
finally:
self.client.logout()
user.delete()
def test_superuser_password_change(self):
"""
A superuser can be forced to change their password via settings.
"""
user = User.objects.create_superuser(username="foo",
password="foo",
email="foo@foo.com")
self.client.login(username="foo", password="foo")
with self.settings(MANDATORY_PASSWORD_CHANGE={
"URL_NAME": "change_password"}):
self.assertEqual(self.client.get("/home/").status_code, 200)
try:
with self.settings(MANDATORY_PASSWORD_CHANGE={
"URL_NAME": "change_password",
"INCLUDE_SUPERUSERS": True
}):
self.assertRedirects(
self.client.get("/home/"),
reverse("change_password"),
)
finally:
self.client.logout()
user.delete()
def test_dont_redirect_exempt_urls(self):
user = User.objects.create_user(
username="foo",
password="foo",
email="foo@foo.com"
)
self.client.login(username="foo", password="foo")
try:
with self.settings(MANDATORY_PASSWORD_CHANGE={
"URL_NAME": "change_password",
"EXEMPT_URLS": (r'^test1/$', r'^test2/$'),
"EXEMPT_URL_NAMES": ("test3", "test4"),
}):
# Redirect pages in general
self.assertRedirects(
self.client.get("/home/"),
reverse("change_password"),
)
# Don't redirect the password change page itself
self.assertEqual(
self.client.get(reverse("change_password")).status_code,
200,
)
# Don't redirect exempt urls
self.assertEqual(self.client.get("/test1/").status_code, 200)
self.assertEqual(self.client.get("/test2/").status_code, 200)
self.assertEqual(self.client.get("/test3/").status_code, 200)
self.assertEqual(self.client.get("/test4/").status_code, 200)
finally:
self.client.logout()
user.delete()
def test_dont_choke_on_exempt_urls_that_dont_resolve(self):
user = User.objects.create_user(username="foo",
password="foo",
email="foo@foo.com")
self.client.login(username="foo", password="foo")
try:
with self.settings(MANDATORY_PASSWORD_CHANGE={
"URL_NAME": "change_password",
"EXEMPT_URL_NAMES": ("fake1", "fake2"),
}):
# Redirect pages in general
self.assertRedirects(
self.client.get("/home/"),
reverse("change_password"),
)
finally:
self.client.logout()
user.delete()
def test_raises_improperly_configured(self):
change = MandatoryPasswordChangeMiddleware()
self.assertRaises(
ImproperlyConfigured,
change.load_setting,
'MANDATORY_PASSWORD_CHANGE',
{'EXEMPT_URLS': []},
)
class DecoratorTest(TestCase):
"""
Testing the AJAXView decorator.
"""
def require_ajax_test(self):
@require_ajax
def ajax_only_view(request):
self.assertTrue(request.is_ajax())
request = HttpRequest()
response = ajax_only_view(request)
self.assertTrue(isinstance(response, HttpResponseForbidden))
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
response = ajax_only_view(request)
self.assertFalse(isinstance(response, HttpResponseForbidden))
@override_settings(MIDDLEWARE=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'security.middleware.SessionExpiryPolicyMiddleware',
'security.middleware.LoginRequiredMiddleware',
))
class SessionExpiryTests(TestCase):
def test_session_variables_are_set(self):
"""
Verify the session cookie stores the start time and last active time.
"""
self.client.get('/home/')
now = timezone.now()
start_time = SessionExpiryPolicyMiddleware._get_datetime_in_session(
SessionExpiryPolicyMiddleware.START_TIME_KEY,
self.client.session
)
last_activity = SessionExpiryPolicyMiddleware._get_datetime_in_session(
SessionExpiryPolicyMiddleware.LAST_ACTIVITY_KEY,
self.client.session
)
self.assertTrue(now - start_time < datetime.timedelta(seconds=10))
self.assertTrue(now - last_activity < datetime.timedelta(seconds=10))
def session_expiry_test(self, key, expired):
"""
Verify that expired sessions are cleared from the system. (And that we
redirect to the login page.)
"""
self.assertTrue(self.client.get('/home/').status_code, 200)
session = self.client.session
SessionExpiryPolicyMiddleware._set_datetime_in_session(
key,
expired,
session
)
session.save()
response = self.client.get('/home/')
self.assertRedirects(response,
reverse("login") + '?next=/home/')
@login_user
def test_session_too_old(self):
"""
Pretend we are 1 second passed the session age time and make sure out
session is cleared.
"""
delta = SessionExpiryPolicyMiddleware().SESSION_COOKIE_AGE + 1
expired = timezone.now() - datetime.timedelta(seconds=delta)
self.session_expiry_test(SessionExpiryPolicyMiddleware.START_TIME_KEY,
expired)
@login_user
def test_session_inactive_too_long(self):
"""
Pretend we are 1 second passed the session inactivity timeout and make
sure the session is cleared.
"""
delta = SessionExpiryPolicyMiddleware().SESSION_INACTIVITY_TIMEOUT + 1
expired = timezone.now() - datetime.timedelta(seconds=delta)
self.session_expiry_test(
SessionExpiryPolicyMiddleware().LAST_ACTIVITY_KEY,
expired,
)
@login_user
def test_exempted_session_expiry_urls(self):
delta = SessionExpiryPolicyMiddleware().SESSION_INACTIVITY_TIMEOUT + 1
expired = timezone.now() - datetime.timedelta(seconds=delta)
self.assertTrue(self.client.get('/home/').status_code, 200)
session = self.client.session
SessionExpiryPolicyMiddleware._set_datetime_in_session(
SessionExpiryPolicyMiddleware.LAST_ACTIVITY_KEY,
expired,
session
)
session.save()
exempted_response = self.client.get('/accounts/login/')
not_exempted_response = self.client.get('/home/')
self.assertTrue(exempted_response.status_code, 200)
self.assertRedirects(not_exempted_response,
reverse("login") + '?next=/home/')
@login_user
def test_custom_logout(self):
delta = SessionExpiryPolicyMiddleware().SESSION_INACTIVITY_TIMEOUT + 1
expired = timezone.now() - datetime.timedelta(seconds=delta)
self.session_expiry_test(
SessionExpiryPolicyMiddleware().LAST_ACTIVITY_KEY,
expired,
)
assert mocked_custom_logout.called
@override_settings(MIDDLEWARE=(
'security.middleware.NoConfidentialCachingMiddleware',
))
class ConfidentialCachingTests(TestCase):
def setUp(self):
self.header_values = {
"Cache-Control": 'no-cache, no-store, max-age=0, must-revalidate',
"Pragma": "no-cache",
"Expires": '-1'
}
@override_settings(NO_CONFIDENTIAL_CACHING={
"WHITELIST_ON": True,
"BLACKLIST_ON": False,
"WHITELIST_REGEXES": ["accounts/login/$"],
"BLACKLIST_REGEXES": ["accounts/logout/$"]
})
def test_whitelisting(self):
# Get Non Confidential Page
response = self.client.get('/accounts/login/')
for header, value in self.header_values.items():
self.assertNotEqual(response.get(header, None), value)
# Get Confidential Page
response = self.client.get("/accounts/logout")
for header, value in self.header_values.items():
self.assertEqual(response.get(header, None), value)
@override_settings(NO_CONFIDENTIAL_CACHING={
"WHITELIST_ON": False,
"BLACKLIST_ON": True,
"WHITELIST_REGEXES": ["accounts/login/$"],
"BLACKLIST_REGEXES": ["accounts/logout/$"]
})
def test_blacklisting(self):
# Get Non Confidential Page
response = self.client.get('/accounts/login/')
for header, value in self.header_values.items():
self.assertNotEqual(response.get(header, None), value)
# Get Confidential Page
response = self.client.get("/accounts/logout/")
for header, value in self.header_values.items():
self.assertEqual(response.get(header, None), value)
@override_settings(MIDDLEWARE=('security.middleware.XFrameOptionsMiddleware',))
class XFrameOptionsDenyTests(TestCase):
def test_option_set(self):
"""
Verify the HTTP Response Header is set.
"""
response = self.client.get('/accounts/login/')
self.assertEqual(response['X-Frame-Options'], settings.X_FRAME_OPTIONS)
def test_exclude_urls(self):
"""
Verify that pages can be excluded from the X-Frame-Options header.
"""
response = self.client.get('/home/')
self.assertEqual(response['X-Frame-Options'], settings.X_FRAME_OPTIONS)
response = self.client.get('/test1/')
self.assertNotIn('X-Frame-Options', response)
def test_improperly_configured(self):
xframe = XFrameOptionsMiddleware()
self.assertRaises(
ImproperlyConfigured,
xframe.load_setting,
'X_FRAME_OPTIONS',
'invalid',
)
self.assertRaises(
ImproperlyConfigured,
xframe.load_setting,
'X_FRAME_OPTIONS_EXCLUDE_URLS',
1,
)
@override_settings(X_FRAME_OPTIONS_EXCLUDE_URLS=None)
def test_default_exclude_urls(self):
# This URL is excluded in other tests, see settings.py
response = self.client.get('/test1/')
self.assertEqual(
response['X-Frame-Options'],
settings.X_FRAME_OPTIONS,
)
@override_settings(X_FRAME_OPTIONS=None)
def test_default_xframe_option(self):
response = self.client.get('/home/')
self.assertEqual(
response['X-Frame-Options'],
'deny',
)
@override_settings(MIDDLEWARE=('security.middleware.XssProtectMiddleware',))
class XXssProtectTests(TestCase):
def test_option_set(self):
"""
Verify the HTTP Response Header is set.
"""
response = self.client.get('/accounts/login/')
self.assertNotEqual(response['X-XSS-Protection'], None)
def test_default_setting(self):
with self.settings(XSS_PROTECT=None):
response = self.client.get('/accounts/login/')
self.assertEqual(response['X-XSS-Protection'], '1') # sanitize
def test_option_off(self):
with self.settings(XSS_PROTECT='off'):
response = self.client.get('/accounts/login/')
self.assertEqual(response['X-XSS-Protection'], '0') # off
def test_improper_configuration_raises(self):
xss = XssProtectMiddleware()
self.assertRaises(
ImproperlyConfigured,
xss.load_setting,
'XSS_PROTECT',
'invalid',
)
@override_settings(MIDDLEWARE=('security.middleware.ContentNoSniff',))
class ContentNoSniffTests(TestCase):
def test_option_set(self):
"""
Verify the HTTP Response Header is set.
"""
response = self.client.get('/accounts/login/')
self.assertEqual(response['X-Content-Options'], 'nosniff')
@override_settings(MIDDLEWARE=(
'security.middleware.StrictTransportSecurityMiddleware',
))
class StrictTransportSecurityTests(TestCase):
def test_option_set(self):
"""
Verify the HTTP Response Header is set.
"""
response = self.client.get('/accounts/login/')
self.assertNotEqual(response['Strict-Transport-Security'], None)
@override_settings(
AUTHENTICATION_THROTTLING={
"DELAY_FUNCTION": lambda x, _: (2 ** (x - 1) if x else 0, 0),
"LOGIN_URLS_WITH_TEMPLATES": [
("accounts/login/", "registration/login.html")
]
},
MIDDLEWARE=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'security.auth_throttling.Middleware',)
)
class AuthenticationThrottlingTests(TestCase):
def setUp(self):
# monkey patch time
self.old_time = time.time
self.time = 0
time.time = lambda: self.time
self.user = User.objects.create_user(username="foo", password="foo",
email="a@foo.org")
def tearDown(self):
time.time = self.old_time
def attempt(self, password):
return self.client.post("/accounts/login/",
{"username": "foo",
"password": password},
follow=True)
def reset(self):
self.client.logout()
cache.clear()
def typo(self):
self.assertTemplateUsed(self.attempt("bar"), "registration/login.html")
def _succeed(self):
self.assertTemplateNotUsed(self.attempt("foo"),
"registration/login.html")
self.reset()
def _fail(self):
self.assertTemplateUsed(self.attempt("foo"), "registration/login.html")
self.reset()
def set_time(self, t):
self.time = t
def test_delay_message(self):
self.assertEqual("0 seconds", delay_message(0))
self.assertEqual("1 second", delay_message(0.1))
self.assertEqual("1 second", delay_message(1))
self.assertEqual("1 minute", delay_message(31))
self.assertEqual("1 minute", delay_message(60))
self.assertEqual("1 minute", delay_message(61))
self.assertEqual("2 minutes", delay_message(90))
self.assertEqual("2 minutes", delay_message(120))
def test_counters(self):
cache.clear()
increment_counters(username="foo", ip="127.0.0.1")
increment_counters(username="foo")
self.assertEqual(attempt_count("username", "foo"), 2)
self.assertEqual(attempt_count("ip", "127.0.0.1"), 1)
self.assertEqual(attempt_count("username", "baz"), 0)
reset_counters(username="foo", ip="127.0.0.1")
self.assertEqual(attempt_count("username", "foo"), 0)
self.assertEqual(attempt_count("ip", "127.0.0.1"), 0)
cache.clear()
def test_default_delay_function(self):
"""
The default function will only delay by looking at the username,
and shouldn't care about ip.
"""
delay = default_delay_function
# 100 repeated IPs doesn't result in a delay.
self.assertEqual(delay(0, 100), (0, 0))
# first 3 incorrect attempts with a username will not be delayed.
for i in range(3):
self.assertEqual(delay(i, 0), (0, 0))
# forth, fifth, sixth attempts are throttled
for i in range(4, 7):
self.assertEqual(delay(i, 0), (5 * 2 ** (i - 3), 0))
# we max out at 24 hours
self.assertEqual(delay(100, 0), (24 * 60 * 60, 0))
def test_per_account_throttling(self):
"""
Tests that multiple attempts on the same account are throttled
according to settings.AUTHENTICATION_THROTTLING.
"""
self.set_time(0)
self._succeed()
self.set_time(0)
self.typo()
self._fail()
self.set_time(0)
self.typo()
self.set_time(1)
self._succeed()
self.set_time(0)
self.typo()
self.set_time(1)
self.typo()
self.set_time(2)
self._fail()
self.set_time(0)
self.typo()
self.set_time(1)
self.typo()
self.set_time(3)
self._succeed()
@override_settings(AUTHENTICATION_THROTTLING={
"DELAY_FUNCTION": lambda x, y: (x, y),
"LOGIN_URLS_WITH_TEMPLATES": [
("accounts/login/", None)
]
})
def test_too_many_requests_error_when_no_template_provided(self):
"""
Verify we simply return a 429 error when there is no login template
provided for us to report an error within.
"""
cache.clear()
# first bad attempt
self.typo()
# second attempt is throttled as per our delay function
response = self.attempt("bar")
self.assertEqual(
response.status_code,
429,
"Expected TooManyRequests Error.",
)
cache.clear()
def test_reset_button(self):
"""
Tests that the account lockout reset button in the admin interface
actually works.
"""
self.set_time(0)
self.typo()
admin = User.objects.create_user(username="bar", password="bar",
email="a@bar.org")
admin.is_superuser = True
admin.save()
self.client.login(username="bar", password="bar")
self.client.post(
reverse("reset_username_throttle", args=[self.user.id]),
)
self.client.logout()
self._succeed()
@override_settings(AUTHENTICATION_THROTTLING={
"DELAY_FUNCTION": lambda x, y: (x, y),
})
def test_improperly_configured_middleware(self):
self.assertRaises(ImproperlyConfigured, AuthThrottlingMiddleware)
def test_throttle_reset_404_on_unauthorized(self):
resp = self.client.post(
reverse("reset_username_throttle", args=[self.user.id]),
)
self.assertEqual(resp.status_code, 404)
def test_throttle_reset_404_on_not_found(self):
admin = User.objects.create_user(
username="bar",
password="bar",
email="a@bar.org",
)
admin.is_superuser = True
admin.save()
self.client.login(username="bar", password="bar")
resp = self.client.post(
reverse("reset_username_throttle", args=[999]),
)
self.assertEqual(resp.status_code, 404)
@override_settings(MIDDLEWARE=('security.middleware.P3PPolicyMiddleware',))
class P3PPolicyTests(TestCase):
def setUp(self):
self.policy = "NN AD BLAH"
settings.P3P_COMPACT_POLICY = self.policy
def test_p3p_header(self):
expected_header = 'policyref="/w3c/p3p.xml" CP="%s"' % self.policy
response = self.client.get('/accounts/login/')
self.assertEqual(response["P3P"], expected_header)
class AuthTests(TestCase):
def test_min_length(self):
self.assertRaises(ValidationError, min_length(6), "abcde")
min_length(6)("abcdef")
@override_settings(MIDDLEWARE=(
'security.middleware.ContentSecurityPolicyMiddleware',
))
class ContentSecurityPolicyTests(TestCase):
class FakeHttpRequest(object):
method = 'POST'
body = """{
"csp-report": {
"document-uri": "http://example.org/page.html",
"referrer": "http://evil.example.com/haxor.html",
"blocked-uri": "http://evil.example.com/image.png",
"violated-directive": "default-src 'self'",
"original-policy": "%s"
}
}
""" % settings.CSP_STRING
META = {
'CONTENT_TYPE': 'application/json',
'REMOTE_ADDR': '127.0.0.1',
'HTTP_USER_AGENT': 'FakeHTTPRequest'
}
def test_option_set(self):
"""
Verify the HTTP Response Header is set.
"""
response = self.client.get('/accounts/login/')
self.assertEqual(
response['Content-Security-Policy'],
settings.CSP_STRING,
)
def test_json(self):
req = ContentSecurityPolicyTests.FakeHttpRequest()
parsed = json.loads(req.body)
self.assertNotEqual(len(parsed), 0)
# http://www.w3.org/TR/CSP/#sample-violation-report
def test_csp_view(self):
req = ContentSecurityPolicyTests.FakeHttpRequest()
# call the view
resp = csp_report(req)
self.assertEqual(resp.status_code, 204)
def test_csp_gen_1(self):
csp_dict = {
'default-src': ['self', 'cdn.example.com'],
'script-src': ['self', 'js.example.com'],
'style-src': ['self', 'css.example.com'],
'img-src': ['self', 'img.example.com'],
'connect-src': ['self', ],
'font-src': ['fonts.example.com', ],
'object-src': ['self'],
'media-src': ['media.example.com', ],
'frame-src': ['*', ],
'sandbox': ['', ],
'reflected-xss': 'filter',
'referrer': 'origin',
'report-uri': 'http://example.com/csp-report',
}
expected = (
"script-src 'self' js.example.com;"
"default-src 'self' cdn.example.com;"
"img-src 'self' img.example.com;"
"connect-src 'self';"
"reflected-xss filter;"
"style-src 'self' css.example.com;"
"report-uri http://example.com/csp-report;"
"frame-src *;"
"sandbox ;"
"object-src 'self';"
"media-src media.example.com;"
"referrer origin;"
"font-src fonts.example.com"
)
csp = ContentSecurityPolicyMiddleware()
generated = csp._csp_builder(csp_dict)
# We can't assume the iteration order on the csp_dict, so we split the
# output, sort, and ensure we got all the results back, regardless of
# the order.
expected_list = sorted(x.strip() for x in expected.split(';'))
generated_list = sorted(x.strip() for x in generated.split(';'))
self.assertEqual(generated_list, expected_list)
def test_csp_gen_2(self):
csp_dict = {'default-src': ('none',), 'script-src': ['none']}
expected = "default-src 'none'; script-src 'none'"
csp = ContentSecurityPolicyMiddleware()
generated = csp._csp_builder(csp_dict)
expected_list = sorted(x.strip() for x in expected.split(';'))
generated_list = sorted(x.strip() for x in generated.split(';'))
self.assertEqual(generated_list, expected_list)
def test_csp_gen_3(self):
csp_dict = {
'script-src': [
'self',
'www.google-analytics.com',
'ajax.googleapis.com',
],
}
expected = (
"script-src "
"'self' www.google-analytics.com ajax.googleapis.com"
)
csp = ContentSecurityPolicyMiddleware()
generated = csp._csp_builder(csp_dict)
self.assertEqual(generated, expected)
def test_csp_gen_err(self):
# argument not passed as array, expect failure
csp_dict = {'default-src': 'self'}
csp = ContentSecurityPolicyMiddleware()
self.assertRaises(MiddlewareNotUsed, csp._csp_builder, csp_dict)
def test_csp_gen_err2(self):
csp_dict = {'invalid': 'self'} # invalid directive
csp = ContentSecurityPolicyMiddleware()
self.assertRaises(MiddlewareNotUsed, csp._csp_builder, csp_dict)
def test_csp_gen_err3(self):
csp_dict = {'sandbox': 'none'} # not a list or tuple, expect failure
csp = ContentSecurityPolicyMiddleware()
self.assertRaises(MiddlewareNotUsed, csp._csp_builder, csp_dict)
def test_csp_gen_err4(self):
# Not an allowed directive, expect failure
csp_dict = {'sandbox': ('invalid', )}
csp = ContentSecurityPolicyMiddleware()
self.assertRaises(MiddlewareNotUsed, csp._csp_builder, csp_dict)
def test_csp_gen_err5(self):
# Not an allowed directive, expect failure
csp_dict = {'referrer': 'invalid'}
csp = ContentSecurityPolicyMiddleware()
self.assertRaises(MiddlewareNotUsed, csp._csp_builder, csp_dict)
def test_csp_gen_err6(self):
# Not an allowed directive, expect failure
csp_dict = {'reflected-xss': 'invalid'}
csp = ContentSecurityPolicyMiddleware()
self.assertRaises(MiddlewareNotUsed, csp._csp_builder, csp_dict)
def test_enforced_by_default(self):
with self.settings(CSP_MODE=None):
response = self.client.get('/accounts/login/')
self.assertIn('Content-Security-Policy', response)
self.assertNotIn('Content-Security-Policy-Report-Only', response)
def test_enforced_when_on(self):
with self.settings(CSP_MODE='enforce'):
response = self.client.get('/accounts/login/')
self.assertIn('Content-Security-Policy', response)
self.assertNotIn('Content-Security-Policy-Report-Only', response)
def test_report_only_set(self):
with self.settings(CSP_MODE='report-only'):
response = self.client.get('/accounts/login/')
self.assertNotIn('Content-Security-Policy', response)
self.assertIn('Content-Security-Policy-Report-Only', response)
def test_invalid_csp_mode(self):
with self.settings(CSP_MODE='invalid'):
self.assertRaises(
MiddlewareNotUsed,
ContentSecurityPolicyMiddleware,
)
def test_no_csp_options_set(self):
with self.settings(CSP_DICT=None, CSP_STRING=None):
self.assertRaises(
MiddlewareNotUsed,
ContentSecurityPolicyMiddleware,
)
def test_both_csp_options_set(self):
with self.settings(CSP_DICT={'x': 'y'}, CSP_STRING='x y;'):
self.assertRaises(
MiddlewareNotUsed,
ContentSecurityPolicyMiddleware,
)
def test_sets_from_csp_dict(self):
with self.settings(
CSP_DICT={'default-src': ('self',)},
CSP_STRING=None,
):
response = self.client.get('/accounts/login/')
self.assertEqual(
response['Content-Security-Policy'],
"default-src 'self'",
)
@override_settings(MIDDLEWARE=('security.middleware.DoNotTrackMiddleware',))
class DoNotTrackTests(TestCase):
def setUp(self):
self.dnt = DoNotTrackMiddleware()
self.request = HttpRequest()
self.response = HttpResponse()
def test_set_DNT_on(self):
self.request.META['HTTP_DNT'] = '1'
self.dnt.process_request(self.request)
self.assertTrue(self.request.dnt)
def test_set_DNT_off(self):
self.request.META['HTTP_DNT'] = 'off'
self.dnt.process_request(self.request)
self.assertFalse(self.request.dnt)
def test_default_DNT(self):
self.dnt.process_request(self.request)
self.assertFalse(self.request.dnt)
def test_DNT_echo_on(self):
self.request.META['HTTP_DNT'] = '1'
self.dnt.process_response(self.request, self.response)
self.assertIn('DNT', self.response)
self.assertEqual(self.response['DNT'], '1')
def test_DNT_echo_off(self):
self.request.META['HTTP_DNT'] = 'off'
self.dnt.process_response(self.request, self.response)
self.assertEqual(self.response['DNT'], 'off')
def test_DNT_echo_default(self):
self.dnt.process_response(self.request, self.response)
self.assertNotIn('DNT', self.response)
@override_settings(MIDDLEWARE=(
'security.middleware.ClearSiteDataMiddleware',
))
class ClearSiteDataMiddlewareTests(TestCase):
def test_request_that_matches_the_whitelist_with_default_directives(self):
response = self.client.get('/home/')
self.assertEqual(response['Clear-Site-Data'], '"cookies", "storage"')
def test_request_that_misses_the_whitelist(self):
response = self.client.get('/test1/')
self.assertNotIn("Clear-Site-Data", response)
@override_settings(CLEAR_SITE_DATA_DIRECTIVES=(
'cache', 'cookies', 'executionContexts', '*'
))
def test_request_that_matches_the_whitelist_with_custom_directives(self):
response = self.client.get('/home/')
self.assertEqual(
response['Clear-Site-Data'],
'"cache", "cookies", "executionContexts", "*"')
|
|
# Log Parser for RTI Connext.
#
# Copyright 2016 Real-Time Innovations, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function helpers for log parsing.
The module contains useful functions to parse several fields from log messages.
Functions:
+ check_periodic: Check if the given event is periodic.
+ compare_times: Compare if the time clock times are equal.
+ add_statistics_packets: Add the given packet to the packet statistics.
+ add_statistics_bandwidth: Add the given packet to the bandwidth statistics.
+ obfuscate: Obfuscate the given text.
+ get_oid: Get a name for the entity ID in hexadecimal text format.
+ is_builtin_entity: Return if the OID hex number is for a built-in entity.
+ get_data_packet_name: Return the DATA packet name.
+ get_topic_name: Get the topic name, obfuscating if needed.
+ get_type_name: Get the type name, obfuscating if needed.
+ get_port_number: Get the port number, obfuscating if needed.
+ get_port_name: Get the domain ID and index of the port.
+ get_participant: Get the participant ID from the GUID.
+ get_locator: Parse the locator and convert to text.
+ get_assign_name: Get the assigned name for the entity.
+ set_participant: Set the name of a participant.
+ set_local_address: Set the local address.
+ hex2ip: Convert the hexadecimal host ID into an IP address.
+ parse_guid: Parse the entity GUID field and conver to text.
+ parse_sn: Parse the sequence number and return as a number.
Constants:
+ INSTANCE_STATES: States for an instance.
+ VIEW_STATES: View states for an instance.
"""
from __future__ import absolute_import
from calendar import timegm
from datetime import timedelta
from hashlib import md5
INSTANCE_STATES = ["invalid", "alive", "disposed", "", "no_writers"]
VIEW_STATES = ["invalid", "new", "not_new"]
def check_periodic(state, name, logger, msg=""):
"""Check if the given event is periodic."""
# If there is no clock (timestamped log), returns always true
if 'clocks' not in state:
return True
# Init
if 'periodic_event' not in state:
state['periodic_event'] = {}
# Get the monotonic clock if possible, otherwise use the system clock.
has_monotonic = state['clocks'][0] is not None
clock = state['clocks'][0] if has_monotonic else state['clocks'][1]
# In the first call we don't have enought information
if name not in state['periodic_event']:
state['periodic_event'][name] = [-1, clock]
return True
# Get current period and previous one.
previous_period = state['periodic_event'][name][0]
period = clock - state['periodic_event'][name][1]
# Update
state['periodic_event'][name][1] = clock
state['periodic_event'][name][0] = period
# If no previous period, returns true
if previous_period == -1:
return True
# Compare times.
tolerance = 0.1 if has_monotonic else timedelta(milliseconds=100)
result = compare_times(previous_period, period, tolerance)
if result:
logger.warning("%s not periodic (%s by %s) %s" %
(name, result[0], result[1], msg))
def compare_times(past, future, tolerance):
"""Compare if the time clock times are equal."""
diff_positive = future - past
diff_negative = past - future
if diff_positive > tolerance:
return ["forward", diff_positive]
elif diff_negative > tolerance:
return ["backward", diff_negative]
return None
def add_statistics_packet(guid, typ, packet, state):
"""Add the given packet to the packet statistics."""
if 'statistics_packet' not in state:
state['statistics_packet'] = {}
stats = state['statistics_packet']
guid = guid.strip()
# Add to the guid counter
if guid not in stats:
stats[guid] = {}
if typ not in stats[guid]:
stats[guid][typ] = {'ALL': 0}
stats[guid][typ]['ALL'] += 1
# Add the specific packet counter
if packet not in stats[guid][typ]:
stats[guid][typ][packet] = 0
stats[guid][typ][packet] += 1
def add_statistics_bandwidth(addr, typ, qty, state):
"""Add the given packet to the bandwidth statistics."""
if 'statistics' not in state:
state['statistics'] = {}
stats = state['statistics']
addr = addr.split(":")
port = addr[1] if len(addr) > 1 else 0
addr = addr[0]
# Get the monotonic clock if possible, otherwise use the system clock.
if 'clocks' in state:
clock = state['clocks'][0]
if clock is None:
clock = timegm(state['clocks'][1].timetuple())
else:
clock = 0
# Add to the host counter
if addr not in stats:
stats[addr] = {}
if typ not in stats[addr]:
stats[addr][typ] = [clock, clock, 0]
stats[addr][typ][1] = clock
stats[addr][typ][2] += qty
# Add to the host + port counter
if port not in stats[addr]:
stats[addr][port] = {}
if typ not in stats[addr][port]:
stats[addr][port][typ] = [clock, clock, 0]
stats[addr][port][typ][1] = clock
stats[addr][port][typ][2] += qty
def obfuscate(text, state):
"""Obfuscate the given text."""
return md5((text + state['salt']).encode('utf-8')).hexdigest()
def get_oid(oid):
"""Get a name for the entity ID in hexadecimal text format."""
# Information from RTPS Spec: http://www.omg.org/spec/DDSI-RTPS/
# Security entities: http://www.omg.org/spec/DDS-SECURITY/1.0/Beta2/
BUILTIN_NAMES = {
# Built-in Entity GUIDs
0x00000000: "UNKNOWN", 0x000001c1: "PARTICIPANT",
0x000002c2: "SED_TOPIC_WRITER", 0x000002c7: "SED_TOPIC_READER",
0x000003c2: "SED_PUB_WRITER", 0x000003c7: "SED_PUB_READER",
0x000004c2: "SED_SUB_WRITER", 0x000004c7: "SED_SUB_READER",
0x000100c2: "SPD_PART_WRITER", 0x000100c7: "SPD_PART_READER",
0x000200c2: "MESSAGE_WRITER", 0x000200c7: "MESSAGE_READER",
# Security Built-in Entity GUIDs
0xff0003c2: "SED_PUB_SEC_WRITER", 0xff0003c7: "SED_PUB_SEC_READER",
0xff0004c2: "SED_SUB_SEC_WRITER", 0xff0004c7: "SED_SUB_SEC_READER",
0xff0200c2: "MSG_SEC_WRITER", 0xff0200c7: "MSG_SEC_READER",
0x000201c2: "MSG_STA_SEC_WRITER", 0x000201c7: "MSG_STA_SEC_READER",
0xff0202c2: "MSG_VOL_SEC_WRITER", 0xff0202c7: "MSG_VOL_SEC_READER"}
ENTITY_ORIGINS = {
0x00: "USER", 0x40: "VEND", 0x80: "BLVD", 0xc0: "BUILTIN"}
ENTITY_KINDS = {
0x00: "UNK", 0x01: "PART",
0x02: "W+K", 0x03: "W-K",
0x04: "R-K", 0x07: "R+K"}
# Convert the hexadecimal text representation to a number
oid_num = int(oid, 16)
# Analyze the entity kind
entity_kind = oid_num & 0xFF
origin = ENTITY_ORIGINS[entity_kind & 0xC0]
kind = ENTITY_KINDS[entity_kind & 0x3F]
if origin == "BUILTIN":
name = BUILTIN_NAMES[oid_num]
elif origin == "USER":
name = kind + "_" + hex(oid_num >> 8)[2:].zfill(6)
else:
name = origin + "_" + kind + "_" + hex(oid_num >> 8)[2:].zfill(6)
return name
def is_builtin_entity(oid):
"""Return if the OID hex number is for a built-in entity."""
# More information in get_oid
oid_num = int(oid, 16)
return oid_num & 0xC0 == 0xC0
def get_data_packet_name(oid):
"""Return the DATA packet name."""
# More information in get_oid
entity_name = get_oid(oid)
PACKET_NAMES = {
"SED_PUB_WRITER": "DATA(w)", "SED_SUB_WRITER": "DATA(r)",
"SPD_PART_WRITER": "DATA(p)", "MESSAGE_WRITER": "DATA(m)",
"PARTICIPANT": "DATA(p)"}
return PACKET_NAMES[entity_name] if entity_name in PACKET_NAMES else "DATA"
def get_topic_name(topic, state):
"""Get the topic name, obfuscating if needed."""
return obfuscate(topic, state) if state['obfuscate'] else topic
def get_type_name(typ, state):
"""get_type_name: Get the type name, obfuscating if needed."""
return obfuscate(typ, state) if state['obfuscate'] else typ
def get_port_number(port, state):
"""Get the port number, obfuscating if needed."""
return obfuscate(port, state)[:5] if state['obfuscate'] else port
def get_port_name(port):
"""Get the domain ID and index of the port."""
port_base = 7400
domain_id_gain = 250
domain_id = (port - port_base) / domain_id_gain
doffset = (port - port_base) % domain_id_gain
if doffset == 0:
nature = "MeMu"
participant_idx = 0
elif doffset == 1:
nature = "UsMu"
participant_idx = 0
else:
participant_idx = (doffset - 10) / 2
if (doffset - 10) % 2 == 0:
nature = "MeUn"
else:
nature = "UsUn"
if "Mu" in nature:
return "%d %s" % (domain_id, nature)
elif "Un" in nature:
return "%d.%d %s" % (domain_id, participant_idx, nature)
def get_participant(guid, state):
"""Get the participant ID from the GUID."""
address = guid.split()
# Check if this is a local participant (we don't know which because we
# miss the instance ID from the message).
if 'local_address' in state and tuple(address) in state['local_address'] \
and not state['assign_names']:
return 'local ' + get_port_number(address[1], state)
name = None
if state['obfuscate']:
address[0] = obfuscate(address[0], state)[:15]
if len(address) > 1:
address[1] = obfuscate(address[1], state)[:5]
guid = " ".join(address)
# If obfuscate and assign_names give priority over participants name
if state['assign_names']:
name = get_assign_name(guid, state)
if 'participants' not in state or guid not in state['participants']:
name = get_assign_name(guid, state) if state['assign_names'] else guid
elif name is None:
name = state['participants'][guid]
if 'initial_peers' in state:
for peer in state['initial_peers']:
if name in peer:
name += "*"
return name
def get_locator(loc, state):
"""Parse the locator and convert to text."""
if state['obfuscate'] or state['assign_names']:
addr_idx = loc.find("://") + 3
if addr_idx != len(loc):
addr = loc[addr_idx:]
port_idx = addr.rfind(":")
port = ""
if port_idx != -1:
port = ":" + addr[port_idx + 1:]
addr = addr[:port_idx]
if state['obfuscate']:
port = ":" + obfuscate(port, state)[:5]
loc = loc[:addr_idx] + get_participant(addr, state) + port
return loc
def get_assign_name(guid, state):
"""Get the assigned name for the entity."""
guid = " ".join(guid.split())
if 'name_table' not in state:
state['name_table'] = {}
if 'names' not in state:
state['names'] = {}
if guid not in state['names']:
names = state['name_table']
addr = guid.split()
# Add host part
if addr[0] not in names:
names[addr[0]] = {}
state['names'][addr[0]] = "H" + str(len(names))
name = state['names'][addr[0]]
# Add application part
if len(addr) >= 2:
app_guid = addr[0] + " " + addr[1]
if addr[1] not in names[addr[0]]:
names[addr[0]][addr[1]] = []
app_name = name + ".A" + str(len(names[addr[0]]))
state['names'][app_guid] = app_name
name = state['names'][app_guid]
# Add participant part
if len(addr) >= 3:
app_dict = names[addr[0]][addr[1]]
if addr[2] not in app_dict:
app_dict.append(addr[2])
name += ".P" + str(len(app_dict))
state['names'][guid] = name
return state['names'][guid]
def set_participant(guid, name, state):
"""Set the name of a participant."""
if 'participants' not in state:
state['participants'] = {}
if state['obfuscate']:
address = guid.split(' ')
address[0] = obfuscate(address[0], state)[:15]
address[1] = obfuscate(address[1], state)[:5]
guid = " ".join(address)
name = obfuscate(name, state)[:20]
state['participants'][guid] = name
def set_local_address(guid, state, logger):
"""Set the local address."""
address = guid.split()
local_address = (address[0], address[1])
# If the local address is already in the list you are most likely
# writing the output of two different apps in the same file.
if 'local_address' not in state:
state['local_address'] = set()
elif local_address not in state['local_address']:
logger.warning("You may have written output from two different apps.")
state['local_address'].add(local_address)
if state['obfuscate']:
address[0] = obfuscate(address[0], state)[:15]
address[1] = obfuscate(address[1], state)[:5]
logger.cfg("Local address: %s %s" % (address[0], address[1]))
def get_interface_props(props):
"""Get the interface properties."""
FLAGS = {
0x01: "UP", 0x02: "BROADCAST", 0x04: "LOOPBACK", 0x08: "POINTOPOINT",
0x10: "MULTICAST", 0x20: "RUNNING"}
flag = int(props, 16)
flag_name = ""
for bit in FLAGS:
if flag & bit != 0:
flag_name += FLAGS[bit] + "|"
return flag_name
def get_ip(ip, state, hexadecimal=True, reverse=True):
"""Get the IP address obfuscated if needed."""
ip = hex2ip(ip, reverse) if hexadecimal else ip
return obfuscate(ip, state)[:15] if state['obfuscate'] else ip
def hex2ip(host_id, reverse=False):
"""Convert the hexadecimal host ID into an IP address."""
host_id = int(host_id, 16)
if not reverse:
addr = "%d.%d.%d.%d" % ((host_id >> 24) & 0xFF, (host_id >> 16) & 0xFF,
(host_id >> 8) & 0xFF, host_id & 0xFF)
else:
addr = "%d.%d.%d.%d" % (host_id & 0xFF, (host_id >> 8) & 0xFF,
(host_id >> 16) & 0xFF, (host_id >> 24) & 0xFF)
return addr
def parse_guid(state, host_id, app_id, instance_id=None):
"""Parse the entity GUID field and conver to text."""
addr = hex2ip(host_id)
app_id = str(int(app_id, 16))
guid = addr + " " + app_id.zfill(5)
if instance_id:
guid += " " + str(int(instance_id, 16))
return get_participant(guid, state)
def parse_sn(seqnum, base=10):
"""Parse the sequence number and return as a number."""
seqnum = seqnum.split(',')
high_sn = int(seqnum[0], base)
low_sn = int(seqnum[1], base)
return (high_sn << 32) | (low_sn)
def get_transport_name(class_id):
"""Get the transport name from the class ID."""
TRANSPORTS = {1: "UDPv4", 2: "UDPv6/SHMEM@510", 3: "INTRA", 5: "UDPv6@510",
6: "DTLS", 7: "WAN", 8: "TCPv4LAN", 9: "TCPv4WAN",
10: "TLSv4LAN", 11: "TLSV4WAN", 12: "PCIE", 13: "ITP",
0x01000000: "SHMEM"}
class_id = int(class_id)
return TRANSPORTS[class_id] if class_id in TRANSPORTS else "UNKNOWN"
|
|
import numpy as np
from numpy.random import randn
import pytest
from pandas._libs import join as libjoin
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, concat, merge
from pandas.tests.reshape.merge.test_merge import NGROUPS, N, get_test_data
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
a_ = np.array
class TestJoin:
def setup_method(self, method):
# aggregate multiple columns
self.df = DataFrame({'key1': get_test_data(),
'key2': get_test_data(),
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
# exclude a couple keys for fun
self.df = self.df[self.df['key2'] > 1]
self.df2 = DataFrame({'key1': get_test_data(n=N // 5),
'key2': get_test_data(ngroups=NGROUPS // 2,
n=N // 5),
'value': np.random.randn(N // 5)})
index, data = tm.getMixedTypeDict()
self.target = DataFrame(data, index=index)
# Join on string value
self.source = DataFrame({'MergedA': data['A'], 'MergedD': data['D']},
index=data['C'])
def test_cython_left_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
ls, rs = libjoin.left_outer_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8, 9, 10])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5, -1, -1])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_right_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
rs, ls = libjoin.left_outer_join(right, left, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
# 0 1 1 1
exp_li = a_([0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5,
# 2 2 4
6, 7, 8, 6, 7, 8, -1])
exp_ri = a_([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
4, 4, 4, 5, 5, 5, 6])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_inner_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64)
max_group = 5
ls, rs = libjoin.inner_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_left_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='left')
joined_both = merge(self.df, self.df2)
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='left')
def test_right_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='right')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='right')
joined_both = merge(self.df, self.df2, how='right')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='right')
def test_full_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='outer')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='outer')
joined_both = merge(self.df, self.df2, how='outer')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='outer')
def test_inner_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='inner')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='inner')
joined_both = merge(self.df, self.df2, how='inner')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='inner')
def test_handle_overlap(self):
joined = merge(self.df, self.df2, on='key2',
suffixes=['.foo', '.bar'])
assert 'key1.foo' in joined
assert 'key1.bar' in joined
def test_handle_overlap_arbitrary_key(self):
joined = merge(self.df, self.df2,
left_on='key2', right_on='key1',
suffixes=['.foo', '.bar'])
assert 'key1.foo' in joined
assert 'key2.bar' in joined
def test_join_on(self):
target = self.target
source = self.source
merged = target.join(source, on='C')
tm.assert_series_equal(merged['MergedA'], target['A'],
check_names=False)
tm.assert_series_equal(merged['MergedD'], target['D'],
check_names=False)
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
joined = df.join(df2, on='key')
expected = DataFrame({'key': ['a', 'a', 'b', 'b', 'c'],
'value': [0, 0, 1, 1, 2]})
assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=['a', 'b', 'c'],
columns=['one'])
df_b = DataFrame([['foo'], ['bar']], index=[1, 2],
columns=['two'])
df_c = DataFrame([[1], [2]], index=[1, 2],
columns=['three'])
joined = df_a.join(df_b, on='one')
joined = joined.join(df_c, on='one')
assert np.isnan(joined['two']['c'])
assert np.isnan(joined['three']['c'])
# merge column not p resent
with pytest.raises(KeyError, match="^'E'$"):
target.join(source, on='E')
# overlap
source_copy = source.copy()
source_copy['A'] = 0
msg = ("You are trying to merge on float64 and object columns. If"
" you wish to proceed you should use pd.concat")
with pytest.raises(ValueError, match=msg):
target.join(source_copy, on='A')
def test_join_on_fails_with_different_right_index(self):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
msg = (r'len\(left_on\) must equal the number of levels in the index'
' of "right"')
with pytest.raises(ValueError, match=msg):
merge(df, df2, left_on='a', right_index=True)
def test_join_on_fails_with_different_left_index(self):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)},
index=tm.makeCustomIndex(3, 2))
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)})
msg = (r'len\(right_on\) must equal the number of levels in the index'
' of "left"')
with pytest.raises(ValueError, match=msg):
merge(df, df2, right_on='b', left_index=True)
def test_join_on_fails_with_different_column_counts(self):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
msg = r"len\(right_on\) must equal len\(left_on\)"
with pytest.raises(ValueError, match=msg):
merge(df, df2, right_on='a', left_on=['a', 'b'])
@pytest.mark.parametrize("wrong_type", [2, 'str', None, np.array([0, 1])])
def test_join_on_fails_with_wrong_object_type(self, wrong_type):
# GH12081 - original issue
# GH21220 - merging of Series and DataFrame is now allowed
# Edited test to remove the Series object from test parameters
df = DataFrame({'a': [1, 1]})
msg = ("Can only merge Series or DataFrame objects, a {} was passed"
.format(str(type(wrong_type))))
with pytest.raises(TypeError, match=msg):
merge(wrong_type, df, left_on='a', right_on='a')
with pytest.raises(TypeError, match=msg):
merge(df, wrong_type, left_on='a', right_on='a')
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
del expected['C']
join_col = self.target.pop('C')
result = self.target.join(self.source, on=join_col)
assert_frame_equal(result, expected)
def test_join_with_len0(self):
# nothing to merge
merged = self.target.join(self.source.reindex([]), on='C')
for col in self.source:
assert col in merged
assert merged[col].isna().all()
merged2 = self.target.join(self.source.reindex([]), on='C',
how='inner')
tm.assert_index_equal(merged2.columns, merged.columns)
assert len(merged2) == 0
def test_join_on_inner(self):
df = DataFrame({'key': ['a', 'a', 'd', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1]}, index=['a', 'b'])
joined = df.join(df2, on='key', how='inner')
expected = df.join(df2, on='key')
expected = expected[expected['value'].notna()]
tm.assert_series_equal(joined['key'], expected['key'],
check_dtype=False)
tm.assert_series_equal(joined['value'], expected['value'],
check_dtype=False)
tm.assert_index_equal(joined.index, expected.index)
def test_join_on_singlekey_list(self):
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
# corner cases
joined = df.join(df2, on=['key'])
expected = df.join(df2, on='key')
assert_frame_equal(joined, expected)
def test_join_on_series(self):
result = self.target.join(self.source['MergedA'], on='C')
expected = self.target.join(self.source[['MergedA']], on='C')
assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({'a': [1, 1]})
ds = Series([2], index=[1], name='b')
result = df.join(ds, on='a')
expected = DataFrame({'a': [1, 1],
'b': [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self, join_type):
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1['bool'] = True
df1['string'] = 'foo'
df2 = DataFrame(index=np.arange(5, 15))
df2['int'] = 1
df2['float'] = 1.
joined = df1.join(df2, how=join_type)
expected = _join_by_hand(df1, df2, how=join_type)
assert_frame_equal(joined, expected)
joined = df2.join(df1, how=join_type)
expected = _join_by_hand(df2, df1, how=join_type)
assert_frame_equal(joined, expected)
def test_join_index_mixed_overlap(self):
df1 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(10),
columns=['A', 'B', 'C', 'D'])
assert df1['B'].dtype == np.int64
assert df1['D'].dtype == np.bool_
df2 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(0, 10, 2),
columns=['A', 'B', 'C', 'D'])
# overlap
joined = df1.join(df2, lsuffix='_one', rsuffix='_two')
expected_columns = ['A_one', 'B_one', 'C_one', 'D_one',
'A_two', 'B_two', 'C_two', 'D_two']
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=['A']), how='outer')
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(randn(30, 2), columns=['a', 'b'])
c = Series(randn(30))
a['c'] = c
d = DataFrame(randn(30, 1), columns=['q'])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays([['a', 'a', 'a', 'b', 'b', 'b'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
index2 = MultiIndex.from_arrays([['b', 'b', 'b', 'c', 'c', 'c'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
df1 = DataFrame(data=np.random.randn(6), index=index1,
columns=['var X'])
df2 = DataFrame(data=np.random.randn(6), index=index2,
columns=['var Y'])
df1 = df1.sort_index(level=0)
df2 = df2.sort_index(level=0)
joined = df1.join(df2, how='outer')
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
df1 = df1.sort_index(level=1)
df2 = df2.sort_index(level=1)
joined = df1.join(df2, how='outer').sort_index(level=0)
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
def test_join_inner_multiindex(self):
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
to_join = DataFrame(np.random.randn(10, 3), index=index,
columns=['j_one', 'j_two', 'j_three'])
joined = data.join(to_join, on=['key1', 'key2'], how='inner')
expected = merge(data, to_join.reset_index(),
left_on=['key1', 'key2'],
right_on=['first', 'second'], how='inner',
sort=False)
expected2 = merge(to_join, data,
right_on=['key1', 'key2'], left_index=True,
how='inner', sort=False)
assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(to_join, data, right_on=['key1', 'key2'],
left_index=True, how='inner', sort=False)
expected = expected.drop(['first', 'second'], axis=1)
expected.index = joined.index
assert joined.index.is_monotonic
assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.loc[:, expected.columns])
def test_join_hierarchical_mixed(self):
# GH 2024
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=['a', 'b', 'c'])
new_df = df.groupby(['a']).agg({'b': [np.mean, np.sum]})
other_df = DataFrame(
[(1, 2, 3), (7, 10, 6)], columns=['a', 'b', 'd'])
other_df.set_index('a', inplace=True)
# GH 9455, 12219
with tm.assert_produces_warning(UserWarning):
result = merge(new_df, other_df, left_index=True, right_index=True)
assert ('b', 'mean') in result
assert 'b' in result
def test_join_float64_float32(self):
a = DataFrame(randn(10, 2), columns=['a', 'b'], dtype=np.float64)
b = DataFrame(randn(10, 1), columns=['c'], dtype=np.float32)
joined = a.join(b)
assert joined.dtypes['a'] == 'float64'
assert joined.dtypes['b'] == 'float64'
assert joined.dtypes['c'] == 'float32'
a = np.random.randint(0, 5, 100).astype('int64')
b = np.random.random(100).astype('float64')
c = np.random.random(100).astype('float32')
df = DataFrame({'a': a, 'b': b, 'c': c})
xpdf = DataFrame({'a': a, 'b': b, 'c': c})
s = DataFrame(np.random.random(5).astype('float32'), columns=['md'])
rs = df.merge(s, left_on='a', right_index=True)
assert rs.dtypes['a'] == 'int64'
assert rs.dtypes['b'] == 'float64'
assert rs.dtypes['c'] == 'float32'
assert rs.dtypes['md'] == 'float32'
xp = xpdf.merge(s, left_on='a', right_index=True)
assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='outer')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='outer')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='outer')
result = result.reset_index()
expected = expected[result.columns]
expected['a'] = expected.a.astype('int64')
expected['b'] = expected.b.astype('int64')
assert_frame_equal(result, expected)
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame(
{"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='inner')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='inner')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='inner')
result = result.reset_index()
assert_frame_equal(result, expected.loc[:, result.columns])
# GH 11519
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
s = Series(np.repeat(np.arange(8), 2),
index=np.repeat(np.arange(8), 2), name='TEST')
inner = df.join(s, how='inner')
outer = df.join(s, how='outer')
left = df.join(s, how='left')
right = df.join(s, how='right')
assert_frame_equal(inner, outer)
assert_frame_equal(inner, left)
assert_frame_equal(inner, right)
def test_join_sort(self):
left = DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'value2': ['a', 'b', 'c']},
index=['bar', 'baz', 'foo'])
joined = left.join(right, on='key', sort=True)
expected = DataFrame({'key': ['bar', 'baz', 'foo', 'foo'],
'value': [2, 3, 1, 4],
'value2': ['a', 'b', 'c', 'c']},
index=[1, 2, 0, 3])
assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on='key', sort=False)
tm.assert_index_equal(joined.index, pd.Index(list(range(4))))
def test_join_mixed_non_unique_index(self):
# GH 12814, unorderable types in py3 with a non-unique index
df1 = DataFrame({'a': [1, 2, 3, 4]}, index=[1, 2, 3, 'a'])
df2 = DataFrame({'b': [5, 6, 7, 8]}, index=[1, 3, 3, 4])
result = df1.join(df2)
expected = DataFrame({'a': [1, 2, 3, 3, 4],
'b': [5, np.nan, 6, 7, np.nan]},
index=[1, 2, 3, 3, 'a'])
tm.assert_frame_equal(result, expected)
df3 = DataFrame({'a': [1, 2, 3, 4]}, index=[1, 2, 2, 'a'])
df4 = DataFrame({'b': [5, 6, 7, 8]}, index=[1, 2, 3, 4])
result = df3.join(df4)
expected = DataFrame({'a': [1, 2, 3, 4], 'b': [5, 6, 6, np.nan]},
index=[1, 2, 2, 'a'])
tm.assert_frame_equal(result, expected)
def test_join_non_unique_period_index(self):
# GH #16871
index = pd.period_range('2016-01-01', periods=16, freq='M')
df = DataFrame([i for i in range(len(index))],
index=index, columns=['pnum'])
df2 = concat([df, df])
result = df.join(df2, how='inner', rsuffix='_df2')
expected = DataFrame(
np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
columns=['pnum', 'pnum_df2'], index=df2.sort_index().index)
tm.assert_frame_equal(result, expected)
def test_mixed_type_join_with_suffix(self):
# GH #916
df = DataFrame(np.random.randn(20, 6),
columns=['a', 'b', 'c', 'd', 'e', 'f'])
df.insert(0, 'id', 0)
df.insert(5, 'dt', 'foo')
grouped = df.groupby('id')
mn = grouped.mean()
cn = grouped.count()
# it works!
mn.join(cn, rsuffix='_right')
def test_join_many(self):
df = DataFrame(np.random.randn(10, 6), columns=list('abcdef'))
df_list = [df[['a', 'b']], df[['c', 'd']], df[['e', 'f']]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[['a', 'b']][:-2],
df[['c', 'd']][2:], df[['e', 'f']][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how='outer')
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how='inner')
_check_diff_index(df_list, joined, df.index[2:8])
msg = "Joining multiple DataFrames only supported for joining on index"
with pytest.raises(ValueError, match=msg):
df_list[0].join(df_list[1:], on='a')
def test_join_many_mixed(self):
df = DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
df['key'] = ['foo', 'bar'] * 4
df1 = df.loc[:, ['A', 'B']]
df2 = df.loc[:, ['C', 'D']]
df3 = df.loc[:, ['key']]
result = df1.join([df2, df3])
assert_frame_equal(result, df)
def test_join_dups(self):
# joining dups
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
expected = concat([df, df], axis=1)
result = df.join(df, rsuffix='_2')
result.columns = expected.columns
assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
x = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
y = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
z = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
dta = x.merge(y, left_index=True, right_index=True).merge(
z, left_index=True, right_index=True, how="outer")
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x, y, z, w], axis=1)
expected.columns = ['x_x', 'y_x', 'x_y',
'y_y', 'x_x', 'y_x', 'x_y', 'y_y']
assert_frame_equal(dta, expected)
def test_join_multi_to_multi(self, join_type):
# GH 20475
leftindex = MultiIndex.from_product([list('abc'), list('xy'), [1, 2]],
names=['abc', 'xy', 'num'])
left = DataFrame({'v1': range(12)}, index=leftindex)
rightindex = MultiIndex.from_product([list('abc'), list('xy')],
names=['abc', 'xy'])
right = DataFrame({'v2': [100 * i for i in range(1, 7)]},
index=rightindex)
result = left.join(right, on=['abc', 'xy'], how=join_type)
expected = (left.reset_index()
.merge(right.reset_index(),
on=['abc', 'xy'], how=join_type)
.set_index(['abc', 'xy', 'num'])
)
assert_frame_equal(expected, result)
msg = (r'len\(left_on\) must equal the number of levels in the index'
' of "right"')
with pytest.raises(ValueError, match=msg):
left.join(right, on='xy', how=join_type)
with pytest.raises(ValueError, match=msg):
right.join(left, on=['abc', 'xy'], how=join_type)
def test_join_on_tz_aware_datetimeindex(self):
# GH 23931, 26335
df1 = pd.DataFrame(
{
'date': pd.date_range(start='2018-01-01', periods=5,
tz='America/Chicago'),
'vals': list('abcde')
}
)
df2 = pd.DataFrame(
{
'date': pd.date_range(start='2018-01-03', periods=5,
tz='America/Chicago'),
'vals_2': list('tuvwx')
}
)
result = df1.join(df2.set_index('date'), on='date')
expected = df1.copy()
expected['vals_2'] = pd.Series([np.nan] * 2 + list('tuv'),
dtype=object)
assert_frame_equal(result, expected)
def _check_join(left, right, result, join_col, how='left',
lsuffix='_x', rsuffix='_y'):
# some smoke tests
for c in join_col:
assert(result[c].notna().all())
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError:
if how in ('left', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError:
if how in ('right', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [c for c in group.columns
if c in columns or c.replace(suffix, '') in columns]
# filter
group = group.loc[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ''))
# put in the right order...
group = group.loc[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = {tuple(row) for row in jvalues}
assert(len(rows) == len(source))
assert(all(tuple(row) in rows for row in svalues))
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert(join_chunk[c].isna().all())
def _join_by_hand(a, b, how='left'):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in b_re.items():
a_re[col] = s
return a_re.reindex(columns=result_columns)
|
|
'''
Created on 2015/7/20/
:author: hubo
'''
from __future__ import print_function
import unittest
from misc.openflow import common, openflow10, openflow13
from namedstruct import nstruct, dump
import json
import misc.ethernet as ethernet
class Test(unittest.TestCase):
exclude = [common.ofp_error_experimenter_msg, openflow13.ofp_group_desc_stats, openflow13.ofp_oxm_mask, openflow13.ofp_oxm_nomask, openflow13._ofp_oxm_mask_value,
openflow13.ofp_action_set_field, openflow10.nx_flow_mod_spec, openflow13.nx_flow_mod_spec, openflow10.nx_matches, openflow13.nx_matches]
def testDefs10(self):
for k in dir(openflow10):
attr = getattr(openflow10, k)
if isinstance(attr, nstruct) and not attr in self.exclude and not k.startswith('nxm_') and not hasattr(ethernet, k):
if not attr.subclasses:
self.assertEqual(k, repr(attr), k + ' has different name: ' + repr(attr))
print(k, repr(attr))
obj = attr.new()
s = obj._tobytes()
r = attr.parse(s)
self.assertTrue(r is not None, repr(attr) + ' failed to parse')
obj2, size = r
self.assertEqual(size, len(s), repr(attr) + ' failed to parse')
self.assertEqual(dump(obj), dump(obj2), repr(attr) + ' changed after parsing')
def testDefs13(self):
for k in dir(openflow13):
attr = getattr(openflow13, k)
if isinstance(attr, nstruct) and not attr in self.exclude and not k.startswith('ofp_oxm_') and not k.startswith('nxm_') and not hasattr(ethernet, k):
if not attr.subclasses:
self.assertEqual(k, repr(attr), k + ' has different name: ' + repr(attr))
print(k, repr(attr))
obj = attr.new()
s = obj._tobytes()
r = attr.parse(s)
self.assertTrue(r is not None, repr(attr) + ' failed to parse')
obj2, size = r
self.assertEqual(size, len(s), repr(attr) + ' failed to parse')
self.assertEqual(dump(obj), dump(obj2), repr(attr) + ' changed after parsing')
def testOxm(self):
fm = openflow13.ofp_flow_mod.new(priority = openflow13.OFP_DEFAULT_PRIORITY, command = openflow13.OFPFC_ADD, buffer_id = openflow13.OFP_NO_BUFFER)
fm.cookie = 0x67843512
fm.match = openflow13.ofp_match_oxm.new()
fm.match.oxm_fields.append(openflow13.create_oxm(openflow13.OXM_OF_ETH_DST, b'\x06\x00\x0c\x15\x45\x99'))
fm.match.oxm_fields.append(openflow13.create_oxm(openflow13.OXM_OF_ETH_TYPE, common.ETHERTYPE_IP))
fm.match.oxm_fields.append(openflow13.create_oxm(openflow13.OXM_OF_IP_PROTO, 6))
fm.match.oxm_fields.append(openflow13.create_oxm(openflow13.OXM_OF_IPV4_SRC_W, [192,168,1,0], [255,255,255,0]))
apply = openflow13.ofp_instruction_actions.new(type = openflow13.OFPIT_APPLY_ACTIONS)
apply.actions.append(openflow13.ofp_action_set_field.new(field = openflow13.create_oxm(openflow13.OXM_OF_IPV4_SRC, [202, 102, 0, 37])))
apply.actions.append(openflow13.ofp_action_set_queue.new(queue_id = 1))
fm.instructions.append(apply)
write = openflow13.ofp_instruction_actions.new(type = openflow13.OFPIT_WRITE_ACTIONS)
write.actions.append(openflow13.ofp_action_output.new(port = 7))
fm.instructions.append(write)
goto = openflow13.ofp_instruction_goto_table.new(table_id = 1)
fm.instructions.append(goto)
s = fm._tobytes()
r = common.ofp_msg.parse(s)
self.assertTrue(r is not None, 'Cannot parse message')
obj2, size = r
self.assertEqual(size, len(s), 'Cannot parse message')
print(json.dumps(dump(fm), indent=2))
print(json.dumps(dump(obj2), indent=2))
self.assertEqual(dump(fm), dump(obj2), 'message changed after parsing')
def testDefs13Size(self):
# From openflow.h
self.assertEqual(len(openflow13.ofp_header()),8)
self.assertEqual(openflow13.ofp_hello_elem()._realsize(),4) # Excluding padding
self.assertEqual(openflow13.ofp_hello_elem_versionbitmap()._realsize(),4)
self.assertEqual(len(openflow13.ofp_hello()),8)
self.assertEqual(len(openflow13.ofp_switch_config()),12)
self.assertEqual(len(openflow13.ofp_table_mod()),16)
self.assertEqual(len(openflow13.ofp_port()),64)
self.assertEqual(len(openflow13.ofp_switch_features()),32)
self.assertEqual(len(openflow13.ofp_port_status()),80)
self.assertEqual(len(openflow13.ofp_port_mod()),40)
self.assertEqual(len(openflow13.ofp_match()),8)
self.assertEqual(len(openflow13.ofp_oxm_experimenter()),8)
self.assertEqual(len(openflow13.ofp_action()),8)
self.assertEqual(len(openflow13.ofp_action_output()),16)
self.assertEqual(len(openflow13.ofp_action_mpls_ttl()),8)
self.assertEqual(len(openflow13.ofp_action_push()),8)
self.assertEqual(len(openflow13.ofp_action_pop_mpls()),8)
self.assertEqual(len(openflow13.ofp_action_group()),8)
self.assertEqual(len(openflow13.ofp_action_nw_ttl()),8)
self.assertEqual(len(openflow13.ofp_action_set_field()),8)
self.assertEqual(len(openflow13.ofp_action_experimenter()),8)
self.assertEqual(openflow13.ofp_instruction()._realsize(),4)
self.assertEqual(len(openflow13.ofp_instruction_goto_table()),8)
self.assertEqual(len(openflow13.ofp_instruction_write_metadata()),24)
self.assertEqual(len(openflow13.ofp_instruction_actions()),8)
self.assertEqual(len(openflow13.ofp_instruction_meter()),8)
self.assertEqual(len(openflow13.ofp_instruction_experimenter()),8)
self.assertEqual(len(openflow13.ofp_flow_mod()),56)
self.assertEqual(len(openflow13.ofp_bucket()),16)
self.assertEqual(len(openflow13.ofp_group_mod()),16)
self.assertEqual(len(openflow13.ofp_packet_out()),24)
self.assertEqual(len(openflow13.ofp_packet_in()),34) # Add the extra padding
self.assertEqual(len(openflow13.ofp_flow_removed()),56)
self.assertEqual(openflow13.ofp_meter_band()._realsize(),12)
self.assertEqual(len(openflow13.ofp_meter_band_drop()),16)
self.assertEqual(len(openflow13.ofp_meter_band_dscp_remark()),16)
self.assertEqual(len(openflow13.ofp_meter_band_experimenter()),16)
self.assertEqual(len(openflow13.ofp_meter_mod()),16)
self.assertEqual(len(openflow13.ofp_error_msg()),12)
self.assertEqual(len(openflow13.ofp_error_experimenter_msg()),16)
self.assertEqual(len(openflow13.ofp_multipart_request()),16)
self.assertEqual(len(openflow13.ofp_multipart_reply()),16)
self.assertEqual(len(openflow13.ofp_desc()),1056)
self.assertEqual(len(openflow13.ofp_flow_stats_request()),40 + len(openflow13.ofp_multipart_request()))
self.assertEqual(len(openflow13.ofp_flow_stats()),56)
self.assertEqual(len(openflow13.ofp_aggregate_stats_request()),40 + len(openflow13.ofp_multipart_request()))
self.assertEqual(len(openflow13.ofp_aggregate_stats_reply()),24 + len(openflow13.ofp_multipart_reply()))
self.assertEqual(openflow13.ofp_table_feature_prop()._realsize(),4)
self.assertEqual(openflow13.ofp_table_feature_prop_instructions()._realsize(),4)
self.assertEqual(openflow13.ofp_table_feature_prop_next_tables()._realsize(),4)
self.assertEqual(openflow13.ofp_table_feature_prop_actions()._realsize(),4)
self.assertEqual(openflow13.ofp_table_feature_prop_oxm()._realsize(),4)
self.assertEqual(openflow13.ofp_table_feature_prop_experimenter()._realsize(),12)
self.assertEqual(len(openflow13.ofp_table_features()),64)
self.assertEqual(len(openflow13.ofp_table_stats()),24)
self.assertEqual(len(openflow13.ofp_port_stats_request()),8 + len(openflow13.ofp_multipart_request()))
self.assertEqual(len(openflow13.ofp_port_stats()),112)
self.assertEqual(len(openflow13.ofp_group_stats_request()),8 + len(openflow13.ofp_multipart_request()))
self.assertEqual(len(openflow13.ofp_bucket_counter()),16)
self.assertEqual(len(openflow13.ofp_group_stats()),40)
self.assertEqual(len(openflow13.ofp_group_desc()),8)
self.assertEqual(len(openflow13.ofp_group_features()),40)
self.assertEqual(len(openflow13.ofp_meter_multipart_request()),8 + len(openflow13.ofp_multipart_request()))
self.assertEqual(len(openflow13.ofp_meter_band_stats()),16)
self.assertEqual(len(openflow13.ofp_meter_stats()),40)
self.assertEqual(len(openflow13.ofp_meter_config()),8 + len(openflow13.ofp_multipart_reply()))
self.assertEqual(len(openflow13.ofp_meter_features()),16)
self.assertEqual(len(openflow13.ofp_experimenter_multipart_header()),8)
self.assertEqual(len(openflow13.ofp_experimenter()),16)
self.assertEqual(len(openflow13.ofp_queue_prop_header()),8)
self.assertEqual(len(openflow13.ofp_queue_prop_min_rate()),16)
self.assertEqual(len(openflow13.ofp_queue_prop_max_rate()),16)
self.assertEqual(len(openflow13.ofp_queue_prop_experimenter()),16)
self.assertEqual(len(openflow13.ofp_packet_queue()),16)
self.assertEqual(len(openflow13.ofp_queue_get_config_request()),16)
self.assertEqual(len(openflow13.ofp_queue_get_config_reply()),16)
self.assertEqual(len(openflow13.ofp_action_set_queue()),8)
self.assertEqual(len(openflow13.ofp_queue_stats_request()),8 + len(openflow13.ofp_multipart_reply()))
self.assertEqual(len(openflow13.ofp_queue_stats()),40)
self.assertEqual(len(openflow13.ofp_role_request()),24)
self.assertEqual(len(openflow13.ofp_async_config()),32)
def testDefs10Size(self):
self.assertEqual(len(openflow10.ofp_header()),8)
self.assertEqual(len(openflow10.ofp_phy_port()),48)
self.assertEqual(len(openflow10.ofp_packet_queue()),8)
self.assertEqual(len(openflow10.ofp_queue_prop_header()),8)
self.assertEqual(len(openflow10.ofp_queue_prop_min_rate()),16)
self.assertEqual(len(openflow10.ofp_match()),40)
self.assertEqual(len(openflow10.ofp_action()),8)
self.assertEqual(len(openflow10.ofp_action_output()),8)
self.assertEqual(len(openflow10.ofp_action_enqueue()),16)
self.assertEqual(len(openflow10.ofp_action_vlan_vid()),8)
self.assertEqual(len(openflow10.ofp_action_vlan_pcp()),8)
self.assertEqual(len(openflow10.ofp_action_dl_addr()),16)
self.assertEqual(len(openflow10.ofp_action_nw_addr()),8)
self.assertEqual(len(openflow10.ofp_action_nw_tos()),8)
self.assertEqual(len(openflow10.ofp_action_tp_port()),8)
self.assertEqual(len(openflow10.ofp_action_vendor()),8)
self.assertEqual(len(openflow10.ofp_switch_features()),32)
self.assertEqual(len(openflow10.ofp_switch_config()),12)
self.assertEqual(len(openflow10.ofp_flow_mod()),72)
self.assertEqual(len(openflow10.ofp_port_mod()),32)
self.assertEqual(len(openflow10.ofp_queue_get_config_request()),12)
self.assertEqual(len(openflow10.ofp_queue_get_config_reply()),16)
self.assertEqual(len(openflow10.ofp_stats_request()),12)
self.assertEqual(len(openflow10.ofp_stats_reply()),12)
self.assertEqual(len(openflow10.ofp_desc_stats()),1056)
self.assertEqual(len(openflow10.ofp_flow_stats_request()),44 + len(openflow10.ofp_stats_request()))
self.assertEqual(len(openflow10.ofp_flow_stats()),88)
self.assertEqual(len(openflow10.ofp_aggregate_stats_request()),44 + len(openflow10.ofp_stats_request()))
self.assertEqual(len(openflow10.ofp_aggregate_stats_reply()),24 + len(openflow10.ofp_stats_reply()))
self.assertEqual(len(openflow10.ofp_table_stats()),64)
self.assertEqual(len(openflow10.ofp_port_stats_request()),8 + len(openflow10.ofp_stats_request()))
self.assertEqual(len(openflow10.ofp_port_stats()),104)
self.assertEqual(len(openflow10.ofp_queue_stats_request()),8 + len(openflow10.ofp_stats_request()))
self.assertEqual(len(openflow10.ofp_queue_stats()),32)
self.assertEqual(len(openflow10.ofp_packet_out()),16)
self.assertEqual(len(openflow10.ofp_packet_in()),18) # No extra padding
self.assertEqual(len(openflow10.ofp_flow_removed()),88)
self.assertEqual(len(openflow10.ofp_port_status()),64)
self.assertEqual(len(openflow10.ofp_error_msg()),12)
self.assertEqual(len(openflow10.ofp_vendor()),12)
def testDefs10ExtSize(self):
self.assertEqual(len(openflow10.nicira_header()),16)
self.assertEqual(len(openflow10.nx_stats_request()),24)
self.assertEqual(len(openflow10.nx_flow_mod_table_id()),8 + len(openflow10.nicira_header()))
self.assertEqual(len(openflow10.nx_set_packet_in_format()),4 + len(openflow10.nicira_header()))
self.assertEqual(len(openflow10.nx_packet_in()),24 + len(openflow10.nicira_header()) + 2) # Extra padding
self.assertEqual(len(openflow10.nx_role_request()),4 + len(openflow10.nicira_header()))
self.assertEqual(len(openflow10.nx_async_config()),24 + len(openflow10.nicira_header()))
self.assertEqual(len(openflow10.nx_set_flow_format()),4 + len(openflow10.nicira_header()))
self.assertEqual(len(openflow10.nx_flow_mod()),32 + len(openflow10.nicira_header()))
self.assertEqual(len(openflow10.nx_flow_removed()),40 + len(openflow10.nicira_header()))
self.assertEqual(len(openflow10.nx_flow_stats_request()),8 + len(openflow10.nx_stats_request()))
self.assertEqual(len(openflow10.nx_flow_stats()),48)
self.assertEqual(len(openflow10.nx_aggregate_stats_request()),8 + len(openflow10.nx_stats_request()))
self.assertEqual(len(openflow10.nx_controller_id()),8 + len(openflow10.nicira_header()))
self.assertEqual(len(openflow10.nx_flow_monitor_request()),16 + len(openflow10.nx_stats_request()))
self.assertEqual(openflow10.nx_flow_update()._realsize(),4)
self.assertEqual(len(openflow10.nx_flow_update_full()),24)
self.assertEqual(len(openflow10.nx_flow_update_abbrev()),8)
self.assertEqual(len(openflow10.nx_flow_monitor_cancel()),4 + len(openflow10.nicira_header()))
def testDefs13ExtSize(self):
self.assertEqual(len(openflow13.nicira_header()),16)
self.assertEqual(len(openflow13.nx_stats_request()),24)
self.assertEqual(len(openflow13.nx_flow_mod_table_id()),8 + len(openflow13.nicira_header()))
self.assertEqual(len(openflow13.nx_set_packet_in_format()),4 + len(openflow13.nicira_header()))
self.assertEqual(len(openflow13.nx_packet_in()),24 + len(openflow13.nicira_header()) + 2) # Extra padding
self.assertEqual(len(openflow13.nx_role_request()),4 + len(openflow13.nicira_header()))
self.assertEqual(len(openflow13.nx_async_config()),24 + len(openflow13.nicira_header()))
self.assertEqual(len(openflow13.nx_set_flow_format()),4 + len(openflow13.nicira_header()))
self.assertEqual(len(openflow13.nx_flow_mod()),32 + len(openflow13.nicira_header()))
self.assertEqual(len(openflow13.nx_flow_removed()),40 + len(openflow13.nicira_header()))
self.assertEqual(len(openflow13.nx_flow_stats_request()),8 + len(openflow13.nx_stats_request()))
self.assertEqual(len(openflow13.nx_flow_stats()),48)
self.assertEqual(len(openflow13.nx_aggregate_stats_request()),8 + len(openflow13.nx_stats_request()))
self.assertEqual(len(openflow13.nx_controller_id()),8 + len(openflow13.nicira_header()))
self.assertEqual(len(openflow13.nx_flow_monitor_request()),16 + len(openflow13.nx_stats_request()))
self.assertEqual(openflow13.nx_flow_update()._realsize(),4)
self.assertEqual(len(openflow13.nx_flow_update_full()),24)
self.assertEqual(len(openflow13.nx_flow_update_abbrev()),8)
self.assertEqual(len(openflow13.nx_flow_monitor_cancel()),4 + len(openflow13.nicira_header()))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testDefs']
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
import htpc
from urllib import quote, urlencode
import requests
import logging
from cherrypy.lib.auth2 import require, member_of
from htpc.helpers import fix_basepath, get_image, striphttp
class Sickrage(object):
def __init__(self):
self.logger = logging.getLogger('modules.sickrage')
htpc.MODULES.append({
'name': 'Sickrage',
'id': 'sickrage',
'test': htpc.WEBDIR + 'sickrage/ping',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'sickrage_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'sickrage_name'},
{'type': 'text', 'label': 'IP / Host', 'placeholder': 'localhost', 'name': 'sickrage_host'},
{'type': 'text', 'label': 'Port', 'placeholder': '8081', 'name': 'sickrage_port'},
{'type': 'text', 'label': 'Basepath', 'placeholder': '/sickrage', 'name': 'sickrage_basepath'},
{'type': 'text', 'label': 'API key', 'name': 'sickrage_apikey'},
{'type': 'bool', 'label': 'Use SSL', 'name': 'sickrage_ssl'},
{'type': 'text', 'label': 'Reverse proxy link', 'placeholder': '', 'desc':'Reverse proxy link, e.g. https://sr.domain.com', 'name': 'sickrage_reverse_proxy_link'}
]
})
@cherrypy.expose()
@require()
def index(self):
return htpc.LOOKUP.get_template('sickrage.html').render(scriptname='sickrage', webinterface=self.webinterface())
def webinterface(self):
host = striphttp(htpc.settings.get('sickrage_host', ''))
port = str(htpc.settings.get('sickrage_port', ''))
apikey = htpc.settings.get('sickrage_apikey', '')
ssl = 's' if htpc.settings.get('sickrage_ssl', 0) else ''
sickrage_basepath = fix_basepath(htpc.settings.get('sickrage_basepath', '/'))
url = 'http%s://%s:%s%s' % (ssl, host, port, sickrage_basepath)
if htpc.settings.get('sickrage_reverse_proxy_link'):
url = htpc.settings.get('sickrage_reverse_proxy_link')
return url
@cherrypy.expose()
@require()
def view(self, indexerid):
if not (indexerid.isdigit()):
raise cherrypy.HTTPError('500 Error', 'Invalid show ID.')
self.logger.error('Invalid show ID was supplied: ' + str(indexerid))
return False
return htpc.LOOKUP.get_template('sickrage_view.html').render(scriptname='sickrage_view', indexerid=indexerid)
@cherrypy.expose()
@require(member_of(htpc.role_admin))
@cherrypy.tools.json_out()
def ping(self, sickrage_host, sickrage_port, sickrage_apikey, sickrage_basepath, sickrage_ssl=False, **kwargs):
ssl = 's' if sickrage_ssl else ''
self.logger.debug('Testing connectivity')
try:
sickrage_basepath = fix_basepath(sickrage_basepath)
url = 'http%s://%s:%s%sapi/%s/?cmd=sb.ping' % (ssl, striphttp(sickrage_host), sickrage_port, sickrage_basepath, sickrage_apikey)
self.logger.debug('Trying to contact sickrage via %s' % url)
response = requests.get(url, timeout=10, verify=False)
ret = response.json()
if ret.get('result') == 'success':
self.logger.debug('Sickrage connectivity test success')
return ret
except:
self.logger.error('Unable to contact sickrage via %s' % url)
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetShowList(self):
self.logger.debug('Fetching Show list')
return self.fetch('shows&sort=name', False, 200)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetNextAired(self):
self.logger.debug('Fetching Next Aired Episodes')
return self.fetch('future')
@cherrypy.expose()
@require()
def GetBanner(self, indexerid):
self.logger.debug('Fetching Banner')
cherrypy.response.headers['Content-Type'] = 'image/jpeg'
return self.fetch('show.getbanner&indexerid=' + indexerid, True)
@cherrypy.expose()
@require()
def GetPoster(self, indexerid):
self.logger.debug('Fetching Poster')
cherrypy.response.headers['Content-Type'] = 'image/jpeg'
return self.fetch('show.getposter&indexerid=' + indexerid, True)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetHistory(self, limit=''):
self.logger.debug('Fetching History')
return self.fetch('history&limit=' + limit)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetLogs(self):
self.logger.debug('Fetching Logs')
return self.fetch('logs&min_level=info')
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def AddShow(self, indexername='', indexerid='', **kwargs):
# indexername=tvrageid or tvdbid
self.logger.debug('Adding a Show')
return self.fetch('show.addnew&' + urlencode(kwargs))
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetShow(self, indexerid):
self.logger.debug('Fetching Show')
return self.fetch('show&indexerid=' + indexerid)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetEpisode(self, strShowID, strSeason, strEpisode):
return self.fetch('episode&indexerid=' + strShowID + '&season=' + strSeason + '&episode=' + strEpisode + '&full_path=1')
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetSeason(self, indexerid, season):
self.logger.debug('Fetching Season')
return self.fetch('show.seasons&indexerid=' + indexerid + '&season=' + season)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def Postprocess(self, path='', force_replace=False, return_data=False, is_priority=False, type=False):
self.logger.debug('Postprocess')
if path:
path = '&%s' % path
return self.fetch('postprocess' + path, False, 120)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def Restart(self):
self.logger.debug('Restart sr')
return self.fetch('sb.restart', False, 15)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def SearchEpisodeDownload(self, indexerid, season, episode):
self.logger.debug('Fetching Episode Downloads')
return self.fetch('episode.search&indexerid=' + indexerid + '&season=' + season + '&episode=' + episode, False, 45)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def SearchSubtitle(self, indexerid, season, episode):
self.logger.debug('Fetching subtitle')
return self.fetch('episode.subtitlesearch&indexerid=' + indexerid + '&season=' + season + '&episode=' + episode, False, 45)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def Shutdown(self):
self.logger.debug('Shutdown sickrage')
return self.fetch('sb.shutdown', False, 20)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def ForceFullUpdate(self, indexerid):
self.logger.debug('Force full update for indexerid %s' % indexerid)
return self.fetch('show.update&indexerid=' + indexerid)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def RescanFiles(self, indexerid):
self.logger.debug('Rescan all local files for indexerid %s' % indexerid)
return self.fetch('show.refresh&indexerid=' + indexerid)
@cherrypy.expose()
@cherrypy.tools.json_out()
@require(member_of(htpc.role_user))
def RemoveShow(self, indexerid, show_name=''):
self.logger.debug('Delete %s from Sickrage indexerid %s' % (show_name, indexerid))
return self.fetch('show.delete&indexerid=%s' % indexerid)
@cherrypy.expose()
@cherrypy.tools.json_out()
@require()
def SearchShow(self, query):
self.logger.debug('Searching tvdb and tvrage for %s query')
return self.fetch('sb.searchindexers&indexer=0&name=%s' % quote(query), False, 60)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def ShowsStats(self):
self.logger.debug('Grabbing tvrage statistics')
return self.fetch('shows.stats')
def fetch(self, cmd, img=False, timeout=20):
host = striphttp(htpc.settings.get('sickrage_host', ''))
port = str(htpc.settings.get('sickrage_port', ''))
apikey = htpc.settings.get('sickrage_apikey', '')
ssl = 's' if htpc.settings.get('sickrage_ssl', 0) else ''
sickrage_basepath = fix_basepath(htpc.settings.get('sickrage_basepath', '/'))
url = 'http%s://%s:%s%sapi/%s/?cmd=%s' % (ssl, host, port, sickrage_basepath, apikey, cmd)
self.logger.debug('Fetching information from: %s' % url)
try:
if img is True:
# Cache the images
return get_image(url)
res = requests.get(url, timeout=timeout, verify=False)
return res.json()
except Exception as e:
self.logger.error('Unable to fetch information')
self.logger.error(url)
self.logger.error(e)
return
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import functools
import inspect
import pkg_resources
import six
from nova.openstack.common._i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class deprecated(object):
"""A decorator to mark callables as deprecated.
This decorator logs a deprecation message when the callable it decorates is
used. The message will include the release where the callable was
deprecated, the release where it may be removed and possibly an optional
replacement.
Examples:
1. Specifying the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
2. Specifying a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
4. Specifying the deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
... def d(): pass
5. Specifying a replacement, deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
... def e(): pass
"""
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
BEXAR = 'B'
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
JUNO = 'J'
KILO = 'K'
_RELEASES = {
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
'B': 'Bexar',
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
'J': 'Juno',
'K': 'Kilo',
}
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
_deprecated_msg_with_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
_deprecated_msg_with_no_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
:param as_of: the release deprecating the callable. Constants
are define in this class for convenience.
:param in_favor_of: the replacement for the callable (optional)
:param remove_in: an integer specifying how many releases to wait
before removing (default: 2)
:param what: name of the thing being deprecated (default: the
callable's name)
"""
self.as_of = as_of
self.in_favor_of = in_favor_of
self.remove_in = remove_in
self.what = what
def __call__(self, func_or_cls):
if not self.what:
self.what = func_or_cls.__name__ + '()'
msg, details = self._build_message()
if inspect.isfunction(func_or_cls):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
LOG.deprecated(msg, details)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
orig_init = func_or_cls.__init__
# TODO(tsufiev): change `functools` module to `six` as
# soon as six 1.7.4 (with fix for passing `assigned`
# argument to underlying `functools.wraps`) is released
# and added to the nova-incubator requrements
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
def new_init(self, *args, **kwargs):
LOG.deprecated(msg, details)
orig_init(self, *args, **kwargs)
func_or_cls.__init__ = new_init
return func_or_cls
else:
raise TypeError('deprecated can be used only with functions or '
'classes')
def _get_safe_to_remove_release(self, release):
# TODO(dstanek): this method will have to be reimplemented once
# when we get to the X release because once we get to the Y
# release, what is Y+2?
new_release = chr(ord(release) + self.remove_in)
if new_release in self._RELEASES:
return self._RELEASES[new_release]
else:
return new_release
def _build_message(self):
details = dict(what=self.what,
as_of=self._RELEASES[self.as_of],
remove_in=self._get_safe_to_remove_release(self.as_of))
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
if self.remove_in > 0:
msg = self._deprecated_msg_with_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_alternative_no_removal
else:
if self.remove_in > 0:
msg = self._deprecated_msg_no_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_no_alternative_no_removal
return msg, details
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts
|
|
# Copyright 2016-2017 Nitor Creations Oy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Command line tools for nitor-deploy-tools
"""
from __future__ import print_function
from builtins import input
from builtins import str
import argparse
import json
import locale
import os
import sys
import time
import re
import inspect
from datetime import datetime, timedelta
from dateutil.parser import parse
from dateutil.tz import tzutc
from inspect import trace, getframeinfo
from subprocess import PIPE, Popen
import argcomplete
import yaml
from argcomplete.completers import ChoicesCompleter, FilesCompleter
from pygments import highlight, lexers, formatters
from pygments.styles import get_style_by_name
from . import aws_infra_util
from . import cf_bootstrap
from . import cf_deploy
from . import cf_utils
from . import volumes
from .cf_utils import InstanceInfo, is_ec2, region, regions, stacks, \
stack_params_and_outputs, get_images, promote_image, \
share_to_another_region, set_region, register_private_dns, interpolate_file, \
assumed_role_name
from .cloudfront_utils import distributions, distribution_comments, \
upsert_cloudfront_records
from n_utils.ecr_utils import ensure_repo, repo_uri
from n_utils.log_events import CloudWatchLogsGroups, CloudFormationEvents, CloudWatchLogsThread
from n_utils.maven_utils import add_server
from n_utils.mfa_utils import mfa_add_token, mfa_delete_token, mfa_generate_code, \
mfa_generate_code_with_secret, list_mfa_tokens, mfa_backup_tokens, mfa_decrypt_backup_tokens, \
mfa_to_qrcode
from n_utils.account_utils import list_created_accounts, create_account
from n_utils.aws_infra_util import load_parameters
from n_utils.ndt import find_include, find_all_includes, include_dirs
from n_utils.profile_util import update_profile, print_profile
from n_utils.ndt_project import list_jobs, list_components
from n_utils.git_utils import Git
from n_utils.ndt_project import Project
SYS_ENCODING = locale.getpreferredencoding()
NoneType = type(None)
def get_parser(formatter=None):
func_name = inspect.stack()[1][3]
caller = sys._getframe().f_back
func = caller.f_locals.get(
func_name, caller.f_globals.get(
func_name
)
)
if formatter:
return argparse.ArgumentParser(formatter_class=formatter, description=func.__doc__)
else:
return argparse.ArgumentParser(description=func.__doc__)
def list_file_to_json():
""" Convert a file with an entry on each line to a json document with
a single element (name as argument) containg file rows as list.
"""
parser = get_parser()
parser.add_argument("arrayname", help="The name in the json object given" +
"to the array").completer = \
ChoicesCompleter(())
parser.add_argument("file", help="The file to parse").completer = \
FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
content = [line.rstrip('\n') for line in open(args.file)]
json.dump({args.arrayname: content}, sys.stdout)
def add_deployer_server():
"""Add a server into a maven configuration file. Password is taken from the
environment variable 'DEPLOYER_PASSWORD'
"""
parser = get_parser()
parser.add_argument("file", help="The file to modify").completer = \
FilesCompleter()
parser.add_argument("username",
help="The username to access the server.").completer = \
ChoicesCompleter(())
parser.add_argument("--id", help="Optional id for the server. Default is" +
" deploy. One server with this id is " +
"added and another with '-release' " +
"appended", default="deploy").completer = \
ChoicesCompleter(())
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
add_server(args.file, args.id, args.username)
add_server(args.file, args.id + "-release", args.username)
def get_userdata():
"""Get userdata defined for an instance into a file
"""
parser = get_parser()
parser.add_argument("file", help="File to write userdata into").completer =\
FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
dirname = os.path.dirname(args.file)
if dirname:
if os.path.isfile(dirname):
parser.error(dirname + " exists and is a file")
elif not os.path.isdir(dirname):
os.makedirs(dirname)
cf_utils.get_userdata(args.file)
return
def get_account_id():
"""Get current account id. Either from instance metadata or current cli
configuration.
"""
parser = get_parser()
parser.parse_args()
print(cf_utils.resolve_account())
def colorprint(data, output_format="yaml"):
""" Colorized print for either a yaml or a json document given as argument
"""
lexer = lexers.get_lexer_by_name(output_format)
formatter = formatters.get_formatter_by_name("256")
formatter.__init__(style=get_style_by_name('emacs'))
colored = highlight(str(data, 'UTF-8'), lexer, formatter)
sys.stdout.write(colored)
def yaml_to_json():
"""Convert Nitor CloudFormation yaml to CloudFormation json with some
preprosessing
"""
parser = get_parser()
parser.add_argument("--colorize", "-c", help="Colorize output", action="store_true")
parser.add_argument("--merge", "-m", help="Merge other yaml files to the main file", nargs="*")
parser.add_argument("--small", "-s", help="Compact representration of json", action="store_true")
parser.add_argument("file", help="File to parse").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
doc = aws_infra_util.yaml_to_dict(args.file, merge=args.merge)
if args.small:
dump = lambda out_doc: json.dumps(out_doc)
else:
dump = lambda out_doc: json.dumps(out_doc, indent=2)
if args.colorize:
colorprint(dump(doc), output_format="json")
else:
print(dump(doc))
def yaml_to_yaml():
""" Do ndt preprocessing for a yaml file
"""
parser = get_parser()
parser.add_argument("--colorize", "-c", help="Colorize output", action="store_true")
parser.add_argument("file", help="File to parse").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
doc = aws_infra_util.yaml_to_yaml(args.file)
if args.colorize:
colorprint(doc)
else:
print(doc)
def json_to_yaml():
"""Convert CloudFormation json to an approximation of a Nitor CloudFormation
yaml with for example scripts externalized
"""
parser = get_parser()
parser.add_argument("--colorize", "-c", help="Colorize output",
action="store_true")
parser.add_argument("file", help="File to parse").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
doc = aws_infra_util.json_to_yaml(args.file)
if args.colorize:
colorprint(doc)
else:
print(doc)
def read_and_follow():
"""Read and print a file and keep following the end for new data
"""
parser = get_parser()
parser.add_argument("file", help="File to follow").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
cf_utils.read_and_follow(args.file, sys.stdout.write)
def logs_to_cloudwatch():
"""Read a file and send rows to cloudwatch and keep following the end for new data.
The log group will be the stack name that created instance and the logstream
will be the instance id and filename.
"""
parser = get_parser()
parser.add_argument("file", help="File to follow").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
cf_utils.send_logs_to_cloudwatch(args.file)
def signal_cf_status():
"""Signal CloudFormation status to a logical resource in CloudFormation
that is either given on the command line or resolved from CloudFormation
tags
"""
parser = get_parser()
parser.add_argument("status",
help="Status to indicate: SUCCESS | FAILURE").completer\
= ChoicesCompleter(("SUCCESS", "FAILURE"))
parser.add_argument("-r", "--resource", help="Logical resource name to " +
"signal. Looked up from " +
"cloudformation tags by " +
"default")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.status != "SUCCESS" and args.status != "FAILURE":
parser.error("Status needs to be SUCCESS or FAILURE")
cf_utils.signal_status(args.status, resource_name=args.resource)
def associate_eip():
"""Associate an Elastic IP for the instance that this script runs on
"""
parser = get_parser()
parser.add_argument("-i", "--ip", help="Elastic IP to allocate - default" +
" is to get paramEip from the stack" +
" that created this instance")
parser.add_argument("-a", "--allocationid", help="Elastic IP allocation " +
"id to allocate - " +
"default is to get " +
"paramEipAllocationId " +
"from the stack " +
"that created this instance")
parser.add_argument("-e", "--eipparam", help="Parameter to look up for " +
"Elastic IP in the stack - " +
"default is paramEip",
default="paramEip")
parser.add_argument("-p", "--allocationidparam", help="Parameter to look" +
" up for Elastic " +
"IP Allocation ID " +
"in the stack - " +
"default is " +
"paramEipAllocatio" +
"nId",
default="paramEipAllocationId")
argcomplete.autocomplete(parser)
args = parser.parse_args()
cf_utils.associate_eip(eip=args.ip, allocation_id=args.allocationid,
eip_param=args.eipparam,
allocation_id_param=args.allocationidparam)
def instance_id():
""" Get id for instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.instance_id())
else:
sys.exit(1)
def ec2_region():
""" Get default region - the region of the instance if run in an EC2 instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
print(region())
def tag():
""" Get the value of a tag for an ec2 instance
"""
parser = get_parser()
parser.add_argument("name", help="The name of the tag to get")
args = parser.parse_args()
argcomplete.autocomplete(parser)
if is_ec2():
info = InstanceInfo()
value = info.tag(args.name)
if value is not None:
print(value)
else:
sys.exit("Tag " + args.name + " not found")
else:
parser.error("Only makes sense on an EC2 instance")
def stack_name():
""" Get name of the stack that created this instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.stack_name())
else:
parser.error("Only makes sense on an EC2 instance cretated from a CF stack")
def stack_id():
""" Get id of the stack the creted this instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.stack_id())
else:
parser.error("Only makes sense on an EC2 instance cretated from a CF stack")
def logical_id():
""" Get the logical id that is expecting a signal from this instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.logical_id())
else:
parser.error("Only makes sense on an EC2 instance cretated from a CF stack")
def cf_region():
""" Get region of the stack that created this instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.stack_id().split(":")[3])
else:
parser.error("Only makes sense on an EC2 instance cretated from a CF stack")
def update_stack():
""" Create or update existing CloudFormation stack
"""
parser = argparse.ArgumentParser(description="Create or update existing " +
"CloudFormation stack")
parser.add_argument("stack_name", help="Name of the stack to create or " +
"update")
parser.add_argument("yaml_template", help="Yaml template to pre-process " +
"and use for creation")
parser.add_argument("region", help="The region to deploy the stack to")
parser.add_argument("-d", "--dry-run", action="store_true",
help="Do not actually deploy anything, but just " +
"assemble the json and associated parameters")
args = parser.parse_args()
if not os.path.isfile(args.yaml_template):
parser.error(args.yaml_template + " not found")
cf_deploy.deploy(args.stack_name, args.yaml_template, args.region,
args.dry_run)
return
def delete_stack():
"""Delete an existing CloudFormation stack
"""
parser = get_parser()
parser.add_argument("stack_name", help="Name of the stack to delete")
parser.add_argument("region", help="The region to delete the stack from")
args = parser.parse_args()
cf_deploy.delete(args.stack_name, args.region)
return
def tail_stack_logs():
"""Tail logs from the log group of a cloudformation stack
"""
parser = get_parser()
parser.add_argument("stack_name", help="Name of the stack to watch logs " +
"for")
parser.add_argument("-s", "--start", help="Start time in seconds since " +
"epoc")
argcomplete.autocomplete(parser)
args = parser.parse_args()
cwlogs = CloudWatchLogsThread(args.stack_name, start_time=args.start)
cwlogs.start()
cfevents = CloudFormationEvents(args.stack_name, start_time=args.start)
cfevents.start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
print('Closing...')
cwlogs.stop()
cfevents.stop()
return
def get_logs():
"""Get logs from multiple CloudWatch log groups and possibly filter them.
"""
parser = get_parser()
parser.add_argument("log_group_pattern", help="Regular expression to filter log groups with")
parser.add_argument("-f", "--filter", help="CloudWatch filter pattern")
parser.add_argument("-s", "--start", help="Start time (x m|h|d|w ago | now | <seconds since epoc>)", nargs="+")
parser.add_argument("-e", "--end", help="End time (x m|h|d|w ago | now | <seconds since epoc>)", nargs="+")
parser.add_argument("-o", "--order", help="Best effort ordering of log entries", action="store_true")
parser.usage = "ndt logs log_group_pattern [-h] [-f FILTER] [-s START [START ...]] [-e END [END ...]] [-o]"
argcomplete.autocomplete(parser)
args = parser.parse_args()
cwlogs_groups = CloudWatchLogsGroups(
log_group_filter=args.log_group_pattern,
log_filter=args.filter,
start_time=' '.join(args.start) if args.start else None,
end_time=' '.join(args.end) if args.end else None,
sort=args.order
)
cwlogs_groups.get_logs()
def resolve_include():
"""Find a file from the first of the defined include paths
"""
parser = get_parser()
parser.add_argument("file", help="The file to find")
argcomplete.autocomplete(parser)
args = parser.parse_args()
inc_file = find_include(args.file)
if not inc_file:
parser.error("Include " + args.file + " not found on include paths " +
str(include_dirs))
print(inc_file)
def resolve_all_includes():
"""Find a file from the first of the defined include paths
"""
parser = get_parser()
parser.add_argument("pattern", help="The file pattern to find")
argcomplete.autocomplete(parser)
args = parser.parse_args()
inc_file = find_all_includes(args.pattern)
if not inc_file:
parser.error("Include " + args.pattern + " not found on include paths " +
str(include_dirs))
for next_file in inc_file:
print(next_file)
def assume_role():
"""Assume a defined role. Prints out environment variables
to be eval'd to current context for use:
eval $(ndt assume-role 'arn:aws:iam::43243246645:role/DeployRole')
"""
parser = get_parser()
parser.add_argument("role_arn", help="The ARN of the role to assume")
parser.add_argument("-t", "--mfa-token", metavar="TOKEN_NAME",
help="Name of MFA token to use", required=False)
parser.add_argument("-d", "--duration", help="Duration for the session in minutes",
default="60", type=int, required=False)
parser.add_argument("-p", "--profile", help="Profile to edit in ~/.aws/credentials " + \
"to make role persist in that file for " + \
"the duration of the session.", required=False)
argcomplete.autocomplete(parser)
args = parser.parse_args()
creds = cf_utils.assume_role(args.role_arn, mfa_token_name=args.mfa_token,
duration_minutes=args.duration)
if args.profile:
update_profile(args.profile, creds)
else:
print("AWS_ROLE_ARN=\"" + args.role_arn + "\"")
print("AWS_ACCESS_KEY_ID=\"" + creds['AccessKeyId'] + "\"")
print("AWS_SECRET_ACCESS_KEY=\"" + creds['SecretAccessKey'] + "\"")
print("AWS_SESSION_TOKEN=\"" + creds['SessionToken'] + "\"")
print("AWS_SESSION_EXPIRATION=\"" + creds['Expiration'].strftime("%a, %d %b %Y %H:%M:%S +0000") + "\"")
print("export AWS_ROLE_ARN AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN AWS_SESSION_EXPIRATION")
def get_parameter():
"""Get a parameter value from the stack
"""
parser = get_parser()
parser.add_argument("parameter", help="The name of the parameter to print")
argcomplete.autocomplete(parser)
args = parser.parse_args()
info = InstanceInfo()
print(info.stack_data(args.parameter))
def volume_from_snapshot():
""" Create a volume from an existing snapshot and mount it on the given
path. The snapshot is identified by a tag key and value. If no tag is
found, an empty volume is created, attached, formatted and mounted.
"""
parser = get_parser()
parser.add_argument("tag_key", help="Key of the tag to find volume with")
parser.add_argument("tag_value", help="Value of the tag to find volume with")
parser.add_argument("mount_path", help="Where to mount the volume")
parser.add_argument("size_gb", nargs="?", help="Size in GB for the volum" +
"e. If different from sna" +
"pshot size, volume and " +
"filesystem are resized",
default=None, type=int)
parser.add_argument("-n", "--no_delete_on_termination",
help="Whether to skip deleting the volume on termi" +
"nation, defaults to false", action="store_true")
parser.add_argument("-c", "--copytags", nargs="*", help="Tag to copy to the volume from instance. Multiple values allowed.")
parser.add_argument("-t", "--tags", nargs="*", help="Tag to add to the volume in the format name=value. Multiple values allowed.")
argcomplete.autocomplete(parser)
args = parser.parse_args()
tags = {}
if args.tags:
for tag in args.tags:
try:
key, value = tag.split('=', 1)
tags[key] = value
except ValueError:
parser.error("Invalid tag/value input: " + tag)
if is_ec2():
volumes.volume_from_snapshot(args.tag_key, args.tag_value, args.mount_path,
size_gb=args.size_gb,
del_on_termination=not args.no_delete_on_termination,
copytags=args.copytags, tags=tags)
else:
parser.error("Only makes sense on an EC2 instance")
def snapshot_from_volume():
""" Create a snapshot of a volume identified by it's mount path
"""
parser = get_parser()
parser.add_argument("-w", "--wait", help="Wait for the snapshot to finish" +
" before returning",
action="store_true")
parser.add_argument("tag_key", help="Key of the tag to find volume with")
parser.add_argument("tag_value", help="Value of the tag to find volume with")
parser.add_argument("mount_path", help="Where to mount the volume")
parser.add_argument("-c", "--copytags", nargs="*", help="Tag to copy to the snapshot from instance. Multiple values allowed.")
parser.add_argument("-t", "--tags", nargs="*", help="Tag to add to the snapshot in the format name=value. Multiple values allowed.")
argcomplete.autocomplete(parser)
args = parser.parse_args()
tags = {}
if args.tags:
for tag in args.tags:
try:
key, value = tag.split('=', 1)
tags[key] = value
except ValueError:
parser.error("Invalid tag/value input: " + tag)
if is_ec2():
print(volumes.create_snapshot(args.tag_key, args.tag_value,
args.mount_path, wait=args.wait, tags=tags, copytags=args.copytags))
else:
parser.error("Only makes sense on an EC2 instance")
def detach_volume():
""" Create a snapshot of a volume identified by it's mount path
"""
parser = get_parser()
parser.add_argument("mount_path", help="Where to mount the volume")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if is_ec2():
volumes.detach_volume(args.mount_path)
else:
parser.error("Only makes sense on an EC2 instance")
def clean_snapshots():
"""Clean snapshots that are older than a number of days (30 by default) and
have one of specified tag values
"""
parser = get_parser()
parser.add_argument("-r", "--region", help="The region to delete " +
"snapshots from. Can also be " +
"set with env variable " +
"AWS_DEFAULT_REGION or is " +
"gotten from instance " +
"metadata as a last resort")
parser.add_argument("-d", "--days", help="The number of days that is the" +
"minimum age for snapshots to " +
"be deleted", type=int, default=30)
parser.add_argument("tags", help="The tag values to select deleted " +
"snapshots", nargs="+")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.region:
os.environ['AWS_DEFAULT_REGION'] = args.region
volumes.clean_snapshots(args.days, args.tags)
def setup_cli():
"""Setup the command line environment to define an aws cli profile with
the given name and credentials. If an identically named profile exists,
it will not be overwritten.
"""
parser = get_parser()
parser.add_argument("-n", "--name", help="Name for the profile to create")
parser.add_argument("-k", "--key-id", help="Key id for the profile")
parser.add_argument("-s", "--secret", help="Secret to set for the profile")
parser.add_argument("-r", "--region", help="Default region for the profile")
argcomplete.autocomplete(parser)
args = parser.parse_args()
cf_bootstrap.setup_cli(**vars(args))
def show_stack_params_and_outputs():
""" Show stack parameters and outputs as a single json documents
"""
parser = get_parser()
parser.add_argument("-r", "--region", help="Region for the stack to show",
default=region()).completer = ChoicesCompleter(regions())
parser.add_argument("-p", "--parameter", help="Name of paremeter if only" +
" one parameter required")
parser.add_argument("stack_name", help="The stack name to show").completer = \
ChoicesCompleter(stacks())
argcomplete.autocomplete(parser)
args = parser.parse_args()
resp = stack_params_and_outputs(args.region, args.stack_name)
if args.parameter:
if args.parameter in resp:
print(resp[args.parameter])
else:
parser.error("Parameter " + args.parameter + " not found")
else:
print(json.dumps(resp, indent=2))
def cli_get_images():
""" Gets a list of images given a bake job name
"""
parser = get_parser()
parser.add_argument("job_name", help="The job name to look for")
argcomplete.autocomplete(parser)
args = parser.parse_args()
set_region()
images = get_images(args.job_name)
for image in images:
print(image['ImageId'] + ":" + image['Name'])
def cli_promote_image():
""" Promotes an image for use in another branch
"""
parser = get_parser()
parser.add_argument("image_id", help="The image to promote")
parser.add_argument("target_job", help="The job name to promote the image to")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if ":" in args.image_id:
args.image_id = args.image_id.split(":")[0]
promote_image(args.image_id, args.target_job)
def cli_share_to_another_region():
""" Shares an image to another region for potentially another account
"""
parser = get_parser()
parser.add_argument("ami_id", help="The ami to share")
parser.add_argument("to_region", help="The region to share to").completer =\
ChoicesCompleter(regions())
parser.add_argument("ami_name", help="The name for the ami")
parser.add_argument("account_id", nargs="+", help="The account ids to sh" +
"are ami to")
argcomplete.autocomplete(parser)
args = parser.parse_args()
share_to_another_region(args.ami_id, args.to_region, args.ami_name,
args.account_id)
def cli_register_private_dns():
""" Register local private IP in route53 hosted zone usually for internal
use.
"""
parser = get_parser()
parser.add_argument("dns_name", help="The name to update in route 53")
parser.add_argument("hosted_zone", help="The name of the hosted zone to update")
argcomplete.autocomplete(parser)
args = parser.parse_args()
register_private_dns(args.dns_name, args.hosted_zone)
def cli_interpolate_file():
""" Replace placeholders in file with parameter values from stack and
optionally from vault
"""
parser = get_parser()
parser.add_argument("-s", "--stack", help="Stack name for values. " +
"Automatically resolved on ec2" +
" instances")
parser.add_argument("-v", "--vault", help="Use vault values as well." +
"Vault resovled from env " +
"variables or default is used",
action="store_true")
parser.add_argument("-o", "--output", help="Output file")
parser.add_argument("-e", "--encoding", help="Encoding to use for the " +
"file. Defaults to utf-8",
default='utf-8')
parser.add_argument("file", help="File to interpolate").completer = \
FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
interpolate_file(args.file, stack_name=args.stack, use_vault=args.vault,
destination=args.output, encoding=args.encoding)
def cli_ecr_ensure_repo():
""" Ensure that an ECR repository exists and get the uri and login token for
it """
parser = get_parser()
parser.add_argument("name", help="The name of the ecr repository to verify")
argcomplete.autocomplete(parser)
args = parser.parse_args()
ensure_repo(args.name)
def cli_ecr_repo_uri():
""" Get the repo uri for a named docker """
parser = get_parser()
parser.add_argument("name", help="The name of the ecr repository")
argcomplete.autocomplete(parser)
args = parser.parse_args()
uri = repo_uri(args.name)
if not uri:
parser.error("Did not find uri for repo '" + args.name + "'")
else:
print(uri)
def cli_upsert_cloudfront_records():
""" Upsert Route53 records for all aliases of a CloudFront distribution """
parser = get_parser()
stack_select = parser.add_mutually_exclusive_group(required=True)
stack_select.add_argument("-i", "--distribution_id", help="Id for the " +
"distribution to " +
"upsert").completer = \
ChoicesCompleter(distributions())
stack_select.add_argument("-c", "--distribution_comment", help="Comment for the" +
" distribution " +
"to upsert").completer = \
ChoicesCompleter(distribution_comments())
parser.add_argument("-w", "--wait", help="Wait for request to sync", action="store_true")
argcomplete.autocomplete(parser)
args = parser.parse_args()
upsert_cloudfront_records(args)
def cli_mfa_add_token():
""" Adds an MFA token to be used with role assumption.
Tokens will be saved in a .ndt subdirectory in the user's home directory.
If a token with the same name already exists, it will not be overwritten."""
parser = get_parser()
parser.add_argument("token_name",
help="Name for the token. Use this to refer to the token later with " +
"the assume-role command.")
parser.add_argument("-i", "--interactive", help="Ask for token details interactively.",
action="store_true")
parser.add_argument("-a", "--token_arn", help="ARN identifier for the token.")
parser.add_argument("-s", "--token_secret", help="Token secret.")
parser.add_argument("-f", "--force", help="Force an overwrite if the token already exists.",
action="store_true")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.interactive:
args.token_secret = eval(input("Enter token secret: "))
code_1 = mfa_generate_code_with_secret(args.token_secret)
print("First sync code: " + code_1)
print("Waiting to generate second sync code. This could take 30 seconds...")
code_2 = mfa_generate_code_with_secret(args.token_secret)
while code_1 == code_2:
time.sleep(5)
code_2 = mfa_generate_code_with_secret(args.token_secret)
print("Second sync code: " + code_2)
args.token_arn = eval(input("Enter token ARN: "))
elif args.token_arn is None or args.token_secret is None:
parser.error("Both token_arn and token_secret are required when not adding interactively.")
try:
mfa_add_token(args)
except ValueError as error:
parser.error(error)
def cli_mfa_delete_token():
""" Deletes an MFA token file from the .ndt subdirectory in the user's
home directory """
parser = get_parser()
parser.add_argument("token_name",
help="Name of the token to delete.").completer = \
ChoicesCompleter(list_mfa_tokens())
argcomplete.autocomplete(parser)
args = parser.parse_args()
mfa_delete_token(args.token_name)
def cli_mfa_code():
""" Generates a TOTP code using an MFA token. """
parser = get_parser()
parser.add_argument("token_name",
help="Name of the token to use.").completer = \
ChoicesCompleter(list_mfa_tokens())
argcomplete.autocomplete(parser)
args = parser.parse_args()
print(mfa_generate_code(args.token_name))
def cli_mfa_to_qrcode():
""" Generates a QR code to import a token to other devices. """
parser = get_parser()
parser.add_argument("token_name",
help="Name of the token to use.").completer = \
ChoicesCompleter(list_mfa_tokens())
argcomplete.autocomplete(parser)
args = parser.parse_args()
mfa_to_qrcode(args.token_name)
def cli_mfa_backup_tokens():
""" Encrypt or decrypt a backup JSON structure of tokens.
To output an encrypted backup, provide an encryption secret.
To decrypt an existing backup, use --decrypt <file>.
"""
parser = get_parser()
parser.add_argument("backup_secret",
help="Secret to use for encrypting or decrypts the backup.")
parser.add_argument("-d",
"--decrypt",
help="Outputs a decrypted token backup read from given file.",
nargs=1,
metavar="FILE")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.decrypt:
print(mfa_decrypt_backup_tokens(args.backup_secret, args.decrypt[0]))
else:
print(mfa_backup_tokens(args.backup_secret))
def cli_create_account():
""" Creates a subaccount. """
parser = get_parser()
parser.add_argument("email", help="Email for account root")
parser.add_argument("account_name", help="Organization unique account name")
parser.add_argument("-d", "--deny-billing-access", action="store_true")
parser.add_argument("-o", "--organization-role-name", help="Role name for " +
"admin access from" +
" parent account",
default="OrganizationAccountAccessRole")
parser.add_argument("-r", "--trust-role-name", help="Role name for admin " +
"access from parent account",
default="TrustedAccountAccessRole")
parser.add_argument("-a", "--trusted-accounts", nargs="*",
help="Account to trust with user management").completer = \
ChoicesCompleter(list_created_accounts())
parser.add_argument("-t", "--mfa-token", metavar="TOKEN_NAME",
help="Name of MFA token to use", required=False)
argcomplete.autocomplete(parser)
args = parser.parse_args()
create_account(args.email, args.account_name, role_name=args.organization_role_name,
trust_role=args.trust_role_name, access_to_billing=not args.deny_billing_access,
trusted_accounts=args.trusted_accounts, mfa_token=args.mfa_token)
def cli_load_parameters():
""" Load parameters from infra*.properties files in the order:
infra.properties,
infra-[branch].properties,
[component]/infra.properties,
[component]/infra-[branch].properties,
[component]/[subcomponent-type]-[subcomponent]/infra.properties,
[component]/[subcomponent-type]-[subcomponent]/infra-[branch].properties
Last parameter defined overwrites ones defined before in the files. Supports parameter expansion
and bash -like transformations. Namely:
${PARAM##prefix} # strip prefix greedy
${PARAM%%suffix} # strip suffix greedy
${PARAM#prefix} # strip prefix not greedy
${PARAM%suffix} # strip suffix not greedy
${PARAM:-default} # default if empty
${PARAM:4:2} # start:len
${PARAM/substr/replace}
${PARAM^} # upper initial
${PARAM,} # lower initial
${PARAM^^} # upper
${PARAM,,} # lower
Comment lines start with '#'
Lines can be continued by adding '\' at the end
See https://www.tldp.org/LDP/Bash-Beginners-Guide/html/sect_10_03.html
(arrays not supported)
"""
parser = get_parser(formatter=argparse.RawDescriptionHelpFormatter)
parser.add_argument("component", nargs="?", help="Compenent to descend into").completer = \
ChoicesCompleter([c.name for c in Project().get_components()])
parser.add_argument("--branch", "-b", help="Branch to get active parameters for").completer = \
ChoicesCompleter(Git().get_branches())
parser.add_argument("--resolve-images", "-r", action="store_true", help="Also resolve subcomponent AMI IDs and docker repo urls")
subcomponent_group = parser.add_mutually_exclusive_group()
subcomponent_group.add_argument("--stack", "-s", help="CloudFormation subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("stack", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--serverless", "-l", help="Serverless subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("serverless", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--docker", "-d", help="Docker image subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("docker", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--image", "-i", const="", nargs="?", help="AMI image subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("image", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--cdk", "-c", help="CDK subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("cdk", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--terraform", "-t", help="Terraform subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("terraform", prefix, parsed_args, **kwargs)
format_group = parser.add_mutually_exclusive_group()
format_group.add_argument("--json", "-j", action="store_true", help="JSON format output (default)")
format_group.add_argument("--yaml", "-y", action="store_true", help="YAML format output")
format_group.add_argument("--properties", "-p", action="store_true", help="properties file format output")
format_group.add_argument("--export-statements", "-e", action="store_true",
help="Output as eval-able export statements")
argcomplete.autocomplete(parser)
args = parser.parse_args()
transform = json.dumps
if args.export_statements:
transform = map_to_exports
if args.properties:
transform = map_to_properties
if args.yaml:
transform = yaml.dump
del args.export_statements
del args.yaml
del args.json
del args.properties
if (args.stack or args.serverless or args.docker or not isinstance(args.image, NoneType)) \
and not args.component:
parser.error("image, stack, doker or serverless do not make sense without component")
print(transform(load_parameters(**vars(args))), end="")
def component_typed_subcomponents(sc_type, prefix, parsed_args, **kwargs):
p_args = {}
if parsed_args.branch:
p_args["branch"] = parsed_args.branch
if parsed_args.component:
return [sc.name for sc in Project(**p_args).get_component(parsed_args.component).get_subcomponents() if sc.type == sc_type and sc.name.startswith(prefix)]
else:
return [sc.name for sc in Project(**p_args).get_all_subcomponents() if sc.type == sc_type]
return None
def map_to_exports(map):
""" Prints the map as eval-able set of environment variables. Keys
will be cleaned of all non-word letters and values will be escaped so
that they will be exported as literal values."""
ret = ""
keys = []
for key, val in list(map.items()):
key = re.sub("[^a-zA-Z0-9_]", "", key)
ret += key + "='" + val.replace("'", "'\"'\"'") + "'" + os.linesep
keys.append(key)
ret += "export " + " ".join(keys) + os.linesep
return ret
def map_to_properties(map):
""" Prints the map as loadable set of java properties. Keys
will be cleaned of all non-word letters."""
ret = ""
for key, val in list(map.items()):
key = re.sub("[^a-zA-Z0-9_]", "", key)
ret += key + "=" + val + os.linesep
return ret
def wait_for_metadata():
""" Waits for metadata service to be available. All errors are ignored until
time expires or a socket can be established to the metadata service """
parser = get_parser()
parser.add_argument('--timeout', '-t', type=int, help="Maximum time to wait in seconds for the metadata service to be available", default=300)
argcomplete.autocomplete(parser)
args = parser.parse_args()
start = datetime.utcnow().replace(tzinfo=tzutc())
cutoff = start + timedelta(seconds=args.timeout)
timeout = args.timeout
connected = False
while not connected:
try:
connected = cf_utils.wait_net_service("169.254.169.254", 80, timeout)
except:
pass
if datetime.utcnow().replace(tzinfo=tzutc()) >= cutoff:
print("Timed out waiting for metadata service")
sys.exit(1)
time.sleep(1)
timeout = max(1, args.timeout - (datetime.utcnow().replace(tzinfo=tzutc()) - start).total_seconds())
def cli_assumed_role_name():
""" Read the name of the assumed role if currently defined """
parser = get_parser()
argcomplete.autocomplete(parser)
_ = parser.parse_args()
print(assumed_role_name())
def cli_list_jobs():
""" Prints a line for every runnable job in this git repository, in all branches and
optionally exports the properties for each under '$root/job-properties/"""
parser = get_parser()
parser.add_argument("-e", "--export-job-properties", action="store_true",
help="Set if you want the properties of all jobs into files under job-properties/")
parser.add_argument("-j", "--json", action="store_true", help="Print in json format. Optionally " \
"exported parameters will be in the json document")
parser.add_argument("-b", "--branch", help="The branch to process. Default is to process all branches").completer = \
ChoicesCompleter(Git().get_branches())
parser.add_argument("-c", "--component", help="Component to process. Default is to process all components").completer = \
branch_components
argcomplete.autocomplete(parser)
args = parser.parse_args()
ret = list_jobs(**vars(args))
if args.json:
print(json.dumps(ret, indent=2))
else:
print("\n".join(ret))
def branch_components(prefix, parsed_args, **kwargs):
if parsed_args.branch:
return [c.name for c in Project(branch=parsed_args.branch).get_components()]
else:
return [c.name for c in Project().get_components()]
def cli_list_components():
""" Prints the components in a branch, by default the current branch """
parser = get_parser()
parser.add_argument("-j", "--json", action="store_true", help="Print in json format.")
parser.add_argument("-b", "--branch", help="The branch to get components from. Default is to process current branch").completer = \
ChoicesCompleter(Git().get_branches())
argcomplete.autocomplete(parser)
args = parser.parse_args()
ret = list_components(**vars(args))
if args.json:
print(json.dumps(ret, indent=2))
else:
print("\n".join(ret))
|
|
import array
import sys
import fabric
import math
import BaseHTTPServer
import urlparse
import MathExt
# shutdown server after maxRequests requests
maxRequests = 20
useFabric = False
debug = False
fabricClients = []
numRequests = 0
if len( sys.argv ) > 1:
if sys.argv[1] == '-f':
useFabric = True
else:
print 'invalid command line flag'
sys.exit(1)
class VARServer( BaseHTTPServer.HTTPServer ):
# don't shut down the connection after do_GET (waiting for callback)
def shutdown_request( self, request ):
pass
class VARHandler( BaseHTTPServer.BaseHTTPRequestHandler ):
def writeHeader( self, code ):
self.send_response( code )
self.send_header( 'Content-type', 'text/plain' )
self.end_headers()
# finally close connection
def close( self ):
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
self.rfile.close()
try:
self.request.shutdown()
except Exception:
self.request.close()
# do not allow stream to be closed immediately, wait for
# async callback to return
def finish( self ):
pass
def do_GET( self ):
global numRequests
numRequests += 1
numTradingDays = 252
dt = 1.0/numTradingDays
sqrtDT = math.sqrt(dt)
query = urlparse.parse_qs(urlparse.urlparse(self.path).query)
numStocks = 0
initialPrices = []
priceMeans = []
priceDevs = []
priceCorrs = []
while True:
i = numStocks
initialPriceName = "ip_"+str(i+1)
priceMeanName = "pm_"+str(i+1)
priceDevName = "pd_"+str(i+1)
if initialPriceName in query and priceMeanName in query and priceDevName in query:
initialPrices.insert(i, float(query[initialPriceName][0]))
priceMeans.insert(i, float(query[priceMeanName][0]) / numTradingDays)
priceDevs.insert(i, float(query[priceDevName][0]) / numTradingDays)
priceCorrs.insert(i, [])
for j in range(0, numStocks):
priceCorrelationName = "pc_" + str(j+1) + "_" + str(i+1)
if priceCorrelationName in query:
priceCorrs[i].insert(j, float(query[priceCorrelationName][0]))
else:
priceCorrs[i].insert(j, 0.0)
priceCorrs[j].insert(i, priceCorrs[i][j])
priceCorrs[i].insert(i, 1.0)
numStocks += 1
else:
break
if numStocks <= 0:
self.writeHeader( 400 )
self.wfile.write("You must provide at least one stock!\n")
else:
if debug:
print("priceMeans:")
print(priceMeans)
print("priceDevs:")
print(priceDevs)
print("priceCorrs:")
print(priceCorrs)
priceCovariance = []
for i in range(0, numStocks):
priceCovariance.insert(i, [])
for j in range(0, numStocks):
priceCovariance[i].insert(j, priceDevs[i] * priceDevs[j] * priceCorrs[i][j])
choleskyTrans = MathExt.choleskyTrans(priceCovariance)
drifts = []
for i in range(0, numStocks):
drifts.insert(i, priceMeans[i] - priceCovariance[i][i]/2)
totalInitialPrice = 0.0
for i in range(0, numStocks):
totalInitialPrice += initialPrices[i]
numTrials = 16384
if useFabric:
fabricClient = fabric.createClient()
fabricClients.append( fabricClient )
params = fabricClient.DG.createNode("params")
params.addMember('numTradingDays', 'Size', numTradingDays)
params.addMember('dt', 'Float64', dt)
params.addMember('sqrtDT', 'Float64', sqrtDT)
params.addMember('initialPrices', 'Float64['+str(numStocks)+']')
params.addMember('choleskyTrans', 'Float64['+str(numStocks)+']['+str(numStocks)+']')
params.setData('choleskyTrans', choleskyTrans)
params.addMember('drifts', 'Float64['+str(numStocks)+']')
params.setData('drifts', drifts)
runTrialOp = fabricClient.DG.createOperator("runTrial")
runTrial = str(numStocks).join( open('runTrial.kl').read().split('%NS%') )
#print(runTrial)
runTrialOp.setSourceCode('runTrial.kl', runTrial)
runTrialOp.setEntryFunctionName('runTrial')
if len( runTrialOp.getDiagnostics() ) > 0:
print(runTrialOp.getDiagnostics())
raise Exception( "Compile errors, aborting" )
runTrialBinding = fabricClient.DG.createBinding()
runTrialBinding.setOperator(runTrialOp)
runTrialBinding.setParameterLayout([
'self.index',
'params.numTradingDays',
'params.dt',
'params.sqrtDT',
'params.choleskyTrans',
'params.drifts',
'self.value'
])
sortOp = fabricClient.DG.createOperator("sort")
sort = str(numStocks).join( open('sort.kl').read().split('%NS%') )
#print(sort)
sortOp.setSourceCode('sort.kl', sort)
sortOp.setEntryFunctionName('sort')
if len( sortOp.getDiagnostics() ) > 0:
print(sortOp.getDiagnostics())
raise Exception( "Compile errors, aborting" )
sortBinding = fabricClient.DG.createBinding()
sortBinding.setOperator(sortOp)
sortBinding.setParameterLayout([
'self.value<>'
])
trials = fabricClient.DG.createNode('trials')
trials.setCount(numTrials)
trials.setDependency(params, 'params')
trials.addMember('value', 'Float64')
trials.bindings.append(runTrialBinding)
trials.bindings.append(sortBinding)
if len( trials.getErrors() ) > 0:
print(trials.getErrors())
raise Exception( "DG errors, aborting" )
def callback():
valueAtRisk = totalInitialPrice - trials.getData('value', int(round(numTrials*0.05)))
self.writeHeader( 200 )
self.wfile.write(str(valueAtRisk) + "\n")
fabricClient.close()
fabricClients.remove( fabricClient )
self.close()
trials.evaluateAsync( callback )
else:
prng = MathExt.Random.ExpGenerator
trialResults = []
for trial in range(0, numTrials):
prng.seed(4096*(1+trial))
#print("trial="+trial+" numTradingDays="+numTradingDays+" dt="+dt+" sqrtDT="+sqrtDT)
#print("choleskyTrans="+choleskyTrans)
#print("drifts="+drifts)
amounts = []
for i in range(0, numStocks):
amounts.insert(i, initialPrices[i])
for day in range(1, numTradingDays+1):
Z = MathExt.normalVec(numStocks, prng)
#print("Z = "+Z)
X = MathExt.mat.mulVec(choleskyTrans, Z)
#print("X = "+X)
for i in range(0, numStocks):
amounts[i] *= math.exp(drifts[i]*dt + X[i]*sqrtDT)
value = 0.0
for i in range(0, numStocks):
value += amounts[i]
trialResults.append(value)
def sort(v):
def partition(a, begin, end, pivot):
piv = a[pivot]
a[pivot] = a[end-1]
a[end-1] = piv
store = begin
for i in range(begin, end-1):
if a[i] <= piv:
t = a[store]
a[store] = a[i]
a[i] = t
store += 1
t = a[end-1]
a[end-1] = a[store]
a[store] = t
return store
def qsort(a, begin, end):
if end - begin <= 1:
return
else:
pivot = partition(a, begin, end, begin+int(round((end-begin)/2)))
qsort(a, begin, pivot)
qsort(a, pivot+1, end)
return qsort(v, 0, len(v))
sort(trialResults)
valueAtRisk = totalInitialPrice - trialResults[int(round(numTrials*0.05))]
self.writeHeader( 200 )
self.wfile.write( str(valueAtRisk) + "\n" )
self.close()
httpd = VARServer( ('', 1337), VARHandler )
print('Server running at http://127.0.0.1:1337/')
if useFabric:
# XXX initial Python alpha is not truly async, will be fixed in next release
httpd.timeout = 0.1
while numRequests < maxRequests:
for c in fabricClients:
c.running()
httpd.handle_request()
else:
while numRequests < maxRequests:
httpd.handle_request()
|
|
from __future__ import absolute_import, print_function, division
import mailcap
import mimetypes
import os
import os.path
import shlex
import signal
import stat
import subprocess
import sys
import tempfile
import traceback
import weakref
import six
import urwid
from typing import Optional # noqa
from mitmproxy import builtins
from mitmproxy import contentviews
from mitmproxy import controller
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import script
from mitmproxy import utils
import mitmproxy.options
from mitmproxy.console import flowlist
from mitmproxy.console import flowview
from mitmproxy.console import grideditor
from mitmproxy.console import help
from mitmproxy.console import options
from mitmproxy.console import palettepicker
from mitmproxy.console import palettes
from mitmproxy.console import signals
from mitmproxy.console import statusbar
from mitmproxy.console import window
from mitmproxy.filt import FMarked
from netlib import tcp, strutils
EVENTLOG_SIZE = 500
class ConsoleState(flow.State):
def __init__(self):
flow.State.__init__(self)
self.focus = None
self.follow_focus = None
self.default_body_view = contentviews.get("Auto")
self.flowsettings = weakref.WeakKeyDictionary()
self.last_search = None
self.last_filter = ""
self.mark_filter = False
def __setattr__(self, name, value):
self.__dict__[name] = value
signals.update_settings.send(self)
def add_flow_setting(self, flow, key, value):
d = self.flowsettings.setdefault(flow, {})
d[key] = value
def get_flow_setting(self, flow, key, default=None):
d = self.flowsettings.get(flow, {})
return d.get(key, default)
def add_flow(self, f):
super(ConsoleState, self).add_flow(f)
self.update_focus()
return f
def update_flow(self, f):
super(ConsoleState, self).update_flow(f)
self.update_focus()
return f
def set_view_filter(self, txt):
ret = super(ConsoleState, self).set_view_filter(txt)
self.set_focus(self.focus)
return ret
def get_focus(self):
if not self.view or self.focus is None:
return None, None
return self.view[self.focus], self.focus
def set_focus(self, idx):
if self.view:
if idx is None or idx < 0:
idx = 0
elif idx >= len(self.view):
idx = len(self.view) - 1
self.focus = idx
else:
self.focus = None
def update_focus(self):
if self.focus is None:
self.set_focus(0)
elif self.follow_focus:
self.set_focus(len(self.view) - 1)
def set_focus_flow(self, f):
self.set_focus(self.view.index(f))
def get_from_pos(self, pos):
if len(self.view) <= pos or pos < 0:
return None, None
return self.view[pos], pos
def get_next(self, pos):
return self.get_from_pos(pos + 1)
def get_prev(self, pos):
return self.get_from_pos(pos - 1)
def delete_flow(self, f):
if f in self.view and self.view.index(f) <= self.focus:
self.focus -= 1
if self.focus < 0:
self.focus = None
ret = super(ConsoleState, self).delete_flow(f)
self.set_focus(self.focus)
return ret
def get_nearest_matching_flow(self, flow, filt):
fidx = self.view.index(flow)
dist = 1
fprev = fnext = True
while fprev or fnext:
fprev, _ = self.get_from_pos(fidx - dist)
fnext, _ = self.get_from_pos(fidx + dist)
if fprev and fprev.match(filt):
return fprev
elif fnext and fnext.match(filt):
return fnext
dist += 1
return None
def enable_marked_filter(self):
marked_flows = [f for f in self.flows if f.marked]
if not marked_flows:
return
marked_filter = "~%s" % FMarked.code
# Save Focus
last_focus, _ = self.get_focus()
nearest_marked = self.get_nearest_matching_flow(last_focus, marked_filter)
self.last_filter = self.filter_txt
self.set_view_filter(marked_filter)
# Restore Focus
if last_focus.marked:
self.set_focus_flow(last_focus)
else:
self.set_focus_flow(nearest_marked)
self.mark_filter = True
def disable_marked_filter(self):
marked_filter = "~%s" % FMarked.code
# Save Focus
last_focus, _ = self.get_focus()
nearest_marked = self.get_nearest_matching_flow(last_focus, marked_filter)
self.set_view_filter(self.last_filter)
self.last_filter = ""
# Restore Focus
if last_focus.marked:
self.set_focus_flow(last_focus)
else:
self.set_focus_flow(nearest_marked)
self.mark_filter = False
def clear(self):
marked_flows = [f for f in self.view if f.marked]
super(ConsoleState, self).clear()
for f in marked_flows:
self.add_flow(f)
f.marked = True
if len(self.flows.views) == 0:
self.focus = None
else:
self.focus = 0
self.set_focus(self.focus)
class Options(mitmproxy.options.Options):
def __init__(
self,
eventlog=False, # type: bool
follow=False, # type: bool
intercept=False, # type: bool
filter=None, # type: Optional[str]
palette=None, # type: Optional[str]
palette_transparent=False, # type: bool
no_mouse=False, # type: bool
**kwargs
):
self.eventlog = eventlog
self.follow = follow
self.intercept = intercept
self.filter = filter
self.palette = palette
self.palette_transparent = palette_transparent
self.no_mouse = no_mouse
super(Options, self).__init__(**kwargs)
class ConsoleMaster(flow.FlowMaster):
palette = []
def __init__(self, server, options):
flow.FlowMaster.__init__(self, options, server, ConsoleState())
self.stream_path = None
# This line is just for type hinting
self.options = self.options # type: Options
self.options.errored.connect(self.options_error)
r = self.set_intercept(options.intercept)
if r:
print("Intercept error: {}".format(r), file=sys.stderr)
sys.exit(1)
if options.filter:
self.set_view_filter(options.filter)
self.set_stream_large_bodies(options.stream_large_bodies)
self.palette = options.palette
self.palette_transparent = options.palette_transparent
self.logbuffer = urwid.SimpleListWalker([])
self.follow = options.follow
if options.client_replay:
self.client_playback_path(options.client_replay)
if options.server_replay:
self.server_playback_path(options.server_replay)
self.view_stack = []
if options.app:
self.start_app(self.options.app_host, self.options.app_port)
signals.call_in.connect(self.sig_call_in)
signals.pop_view_state.connect(self.sig_pop_view_state)
signals.replace_view_state.connect(self.sig_replace_view_state)
signals.push_view_state.connect(self.sig_push_view_state)
signals.sig_add_log.connect(self.sig_add_log)
self.addons.add(options, *builtins.default_addons())
def __setattr__(self, name, value):
self.__dict__[name] = value
signals.update_settings.send(self)
def options_error(self, opts, exc):
signals.status_message.send(
message=str(exc),
expire=1
)
def sig_add_log(self, sender, e, level):
if self.options.verbosity < utils.log_tier(level):
return
if level in ("error", "warn"):
signals.status_message.send(
message = "{}: {}".format(level.title(), e)
)
e = urwid.Text((level, str(e)))
else:
e = urwid.Text(str(e))
self.logbuffer.append(e)
if len(self.logbuffer) > EVENTLOG_SIZE:
self.logbuffer.pop(0)
self.logbuffer.set_focus(len(self.logbuffer) - 1)
def add_log(self, e, level):
signals.add_log(e, level)
def sig_call_in(self, sender, seconds, callback, args=()):
def cb(*_):
return callback(*args)
self.loop.set_alarm_in(seconds, cb)
def sig_replace_view_state(self, sender):
"""
A view has been pushed onto the stack, and is intended to replace
the current view rather tha creating a new stack entry.
"""
if len(self.view_stack) > 1:
del self.view_stack[1]
def sig_pop_view_state(self, sender):
"""
Pop the top view off the view stack. If no more views will be left
after this, prompt for exit.
"""
if len(self.view_stack) > 1:
self.view_stack.pop()
self.loop.widget = self.view_stack[-1]
else:
signals.status_prompt_onekey.send(
self,
prompt = "Quit",
keys = (
("yes", "y"),
("no", "n"),
),
callback = self.quit,
)
def sig_push_view_state(self, sender, window):
"""
Push a new view onto the view stack.
"""
self.view_stack.append(window)
self.loop.widget = window
self.loop.draw_screen()
def _run_script_method(self, method, s, f):
status, val = s.run(method, f)
if val:
if status:
signals.add_log("Method %s return: %s" % (method, val), "debug")
else:
signals.add_log(
"Method %s error: %s" %
(method, val[1]), "error")
def run_script_once(self, command, f):
if not command:
return
signals.add_log("Running script on flow: %s" % command, "debug")
try:
s = script.Script(command)
s.load()
except script.ScriptException as e:
signals.status_message.send(
message='Error loading "{}".'.format(command)
)
signals.add_log('Error loading "{}":\n{}'.format(command, e), "error")
return
if f.request:
self._run_script_method("request", s, f)
if f.response:
self._run_script_method("response", s, f)
if f.error:
self._run_script_method("error", s, f)
s.unload()
signals.flow_change.send(self, flow = f)
def toggle_eventlog(self):
self.options.eventlog = not self.options.eventlog
self.view_flowlist()
signals.replace_view_state.send(self)
def _readflows(self, path):
"""
Utitility function that reads a list of flows
or prints an error to the UI if that fails.
Returns
- None, if there was an error.
- a list of flows, otherwise.
"""
try:
return flow.read_flows_from_paths(path)
except exceptions.FlowReadException as e:
signals.status_message.send(message=str(e))
def client_playback_path(self, path):
if not isinstance(path, list):
path = [path]
flows = self._readflows(path)
if flows:
self.start_client_playback(flows, False)
def server_playback_path(self, path):
if not isinstance(path, list):
path = [path]
flows = self._readflows(path)
if flows:
self.start_server_playback(
flows,
self.options.kill, self.options.rheaders,
False, self.options.nopop,
self.options.replay_ignore_params,
self.options.replay_ignore_content,
self.options.replay_ignore_payload_params,
self.options.replay_ignore_host
)
def spawn_editor(self, data):
text = not isinstance(data, bytes)
fd, name = tempfile.mkstemp('', "mproxy", text=text)
if six.PY2:
os.close(fd)
with open(name, "w" if text else "wb") as f:
f.write(data)
else:
with open(fd, "w" if text else "wb") as f:
f.write(data)
# if no EDITOR is set, assume 'vi'
c = os.environ.get("EDITOR") or "vi"
cmd = shlex.split(c)
cmd.append(name)
self.ui.stop()
try:
subprocess.call(cmd)
except:
signals.status_message.send(
message="Can't start editor: %s" % " ".join(c)
)
else:
with open(name, "r" if text else "rb") as f:
data = f.read()
self.ui.start()
os.unlink(name)
return data
def spawn_external_viewer(self, data, contenttype):
if contenttype:
contenttype = contenttype.split(";")[0]
ext = mimetypes.guess_extension(contenttype) or ""
else:
ext = ""
fd, name = tempfile.mkstemp(ext, "mproxy")
os.write(fd, data)
os.close(fd)
# read-only to remind the user that this is a view function
os.chmod(name, stat.S_IREAD)
cmd = None
shell = False
if contenttype:
c = mailcap.getcaps()
cmd, _ = mailcap.findmatch(c, contenttype, filename=name)
if cmd:
shell = True
if not cmd:
# hm which one should get priority?
c = os.environ.get("PAGER") or os.environ.get("EDITOR")
if not c:
c = "less"
cmd = shlex.split(c)
cmd.append(name)
self.ui.stop()
try:
subprocess.call(cmd, shell=shell)
except:
signals.status_message.send(
message="Can't start external viewer: %s" % " ".join(c)
)
self.ui.start()
os.unlink(name)
def set_palette(self, name):
self.palette = name
self.ui.register_palette(
palettes.palettes[name].palette(self.palette_transparent)
)
self.ui.clear()
def ticker(self, *userdata):
changed = self.tick(timeout=0)
if changed:
self.loop.draw_screen()
signals.update_settings.send()
self.loop.set_alarm_in(0.01, self.ticker)
def run(self):
self.ui = urwid.raw_display.Screen()
self.ui.set_terminal_properties(256)
self.set_palette(self.palette)
self.loop = urwid.MainLoop(
urwid.SolidFill("x"),
screen = self.ui,
handle_mouse = not self.options.no_mouse,
)
self.ab = statusbar.ActionBar()
if self.options.rfile:
ret = self.load_flows_path(self.options.rfile)
if ret and self.state.flow_count():
signals.add_log(
"File truncated or corrupted. "
"Loaded as many flows as possible.",
"error"
)
elif ret and not self.state.flow_count():
self.shutdown()
print("Could not load file: {}".format(ret), file=sys.stderr)
sys.exit(1)
self.loop.set_alarm_in(0.01, self.ticker)
if self.options.http2 and not tcp.HAS_ALPN: # pragma: no cover
def http2err(*args, **kwargs):
signals.status_message.send(
message = "HTTP/2 disabled - OpenSSL 1.0.2+ required."
" Use --no-http2 to silence this warning.",
expire=5
)
self.loop.set_alarm_in(0.01, http2err)
# It's not clear why we need to handle this explicitly - without this,
# mitmproxy hangs on keyboard interrupt. Remove if we ever figure it
# out.
def exit(s, f):
raise urwid.ExitMainLoop
signal.signal(signal.SIGINT, exit)
self.loop.set_alarm_in(
0.0001,
lambda *args: self.view_flowlist()
)
self.start()
try:
self.loop.run()
except Exception:
self.loop.stop()
sys.stdout.flush()
print(traceback.format_exc(), file=sys.stderr)
print("mitmproxy has crashed!", file=sys.stderr)
print("Please lodge a bug report at:", file=sys.stderr)
print("\thttps://github.com/mitmproxy/mitmproxy", file=sys.stderr)
print("Shutting down...", file=sys.stderr)
sys.stderr.flush()
self.shutdown()
def view_help(self, helpctx):
signals.push_view_state.send(
self,
window = window.Window(
self,
help.HelpView(helpctx),
None,
statusbar.StatusBar(self, help.footer),
None
)
)
def view_options(self):
for i in self.view_stack:
if isinstance(i["body"], options.Options):
return
signals.push_view_state.send(
self,
window = window.Window(
self,
options.Options(self),
None,
statusbar.StatusBar(self, options.footer),
options.help_context,
)
)
def view_palette_picker(self):
signals.push_view_state.send(
self,
window = window.Window(
self,
palettepicker.PalettePicker(self),
None,
statusbar.StatusBar(self, palettepicker.footer),
palettepicker.help_context,
)
)
def view_grideditor(self, ge):
signals.push_view_state.send(
self,
window = window.Window(
self,
ge,
None,
statusbar.StatusBar(self, grideditor.base.FOOTER),
ge.make_help()
)
)
def view_flowlist(self):
if self.ui.started:
self.ui.clear()
if self.state.follow_focus:
self.state.set_focus(self.state.flow_count())
if self.options.eventlog:
body = flowlist.BodyPile(self)
else:
body = flowlist.FlowListBox(self)
if self.follow:
self.toggle_follow_flows()
signals.push_view_state.send(
self,
window = window.Window(
self,
body,
None,
statusbar.StatusBar(self, flowlist.footer),
flowlist.help_context
)
)
def view_flow(self, flow, tab_offset=0):
self.state.set_focus_flow(flow)
signals.push_view_state.send(
self,
window = window.Window(
self,
flowview.FlowView(self, self.state, flow, tab_offset),
flowview.FlowViewHeader(self, flow),
statusbar.StatusBar(self, flowview.footer),
flowview.help_context
)
)
def _write_flows(self, path, flows):
if not path:
return
path = os.path.expanduser(path)
try:
f = open(path, "wb")
fw = flow.FlowWriter(f)
for i in flows:
fw.add(i)
f.close()
except IOError as v:
signals.status_message.send(message=v.strerror)
def save_one_flow(self, path, flow):
return self._write_flows(path, [flow])
def save_flows(self, path):
return self._write_flows(path, self.state.view)
def load_flows_callback(self, path):
if not path:
return
ret = self.load_flows_path(path)
return ret or "Flows loaded from %s" % path
def load_flows_path(self, path):
reterr = None
try:
flow.FlowMaster.load_flows_file(self, path)
except exceptions.FlowReadException as e:
reterr = str(e)
signals.flowlist_change.send(self)
return reterr
def accept_all(self):
self.state.accept_all(self)
def set_view_filter(self, txt):
v = self.state.set_view_filter(txt)
signals.flowlist_change.send(self)
return v
def set_intercept(self, txt):
return self.state.set_intercept(txt)
def change_default_display_mode(self, t):
v = contentviews.get_by_shortcut(t)
self.state.default_body_view = v
self.refresh_focus()
def edit_scripts(self, scripts):
self.options.scripts = [x[0] for x in scripts]
def stop_client_playback_prompt(self, a):
if a != "n":
self.stop_client_playback()
def stop_server_playback_prompt(self, a):
if a != "n":
self.stop_server_playback()
def quit(self, a):
if a != "n":
raise urwid.ExitMainLoop
def shutdown(self):
self.state.killall(self)
flow.FlowMaster.shutdown(self)
def clear_flows(self):
self.state.clear()
signals.flowlist_change.send(self)
def toggle_follow_flows(self):
# toggle flow follow
self.state.follow_focus = not self.state.follow_focus
# jump to most recent flow if follow is now on
if self.state.follow_focus:
self.state.set_focus(self.state.flow_count())
signals.flowlist_change.send(self)
def delete_flow(self, f):
self.state.delete_flow(f)
signals.flowlist_change.send(self)
def refresh_focus(self):
if self.state.view:
signals.flow_change.send(
self,
flow = self.state.view[self.state.focus]
)
def process_flow(self, f):
should_intercept = any(
[
self.state.intercept and f.match(self.state.intercept) and not f.request.is_replay,
f.intercepted,
]
)
if should_intercept:
f.intercept(self)
f.reply.take()
signals.flowlist_change.send(self)
signals.flow_change.send(self, flow = f)
def clear_events(self):
self.logbuffer[:] = []
# Handlers
@controller.handler
def error(self, f):
f = flow.FlowMaster.error(self, f)
if f:
self.process_flow(f)
return f
@controller.handler
def request(self, f):
f = flow.FlowMaster.request(self, f)
if f:
self.process_flow(f)
return f
@controller.handler
def response(self, f):
f = flow.FlowMaster.response(self, f)
if f:
self.process_flow(f)
return f
@controller.handler
def tcp_message(self, f):
super(ConsoleMaster, self).tcp_message(f)
message = f.messages[-1]
direction = "->" if message.from_client else "<-"
self.add_log("{client} {direction} tcp {direction} {server}".format(
client=repr(f.client_conn.address),
server=repr(f.server_conn.address),
direction=direction,
), "info")
self.add_log(strutils.bytes_to_escaped_str(message.content), "debug")
|
|
##
# pytibrv/events.py
# TIBRV Library for PYTHON
# tibrvEvent_XXX
#
# LAST MODIFIED : V1.1 20170220 ARIEN arien.chen@gmail.com
#
# DESCRIPTIONS
# ---------------------------------------------------
# 1. Callback cause Segment Fault in OSX
# for detail: http://osdir.com/ml/python.ctypes/2008-05/msg00010.html
#
# sample code as below:
#
# _clib = ctypes.cdll.LoadLibrary()
# _callback = CFUNCTYPE(...)
# ...
# def my_callback() :
# ...
#
# def reg_callback() :
# ...
# cb = _callback(my_calback)
# # Call C Lib to register
# _clib.register(cb)
# ...
#
# IT WOULD SEGMENT FAULT WHEN my_callback was triggered.
# The local variable:cb would store the callback function pointer
# and it would be garbage collected when reg_callback() was returned,
#
# Callback is async. When C try to 'callback' the Python Function,
# ctypes would access a destroy object in Python.
# then it cause SEGFAULT.
#
# I found this issue only in OSX, Linux seems OK.
#
# BUGFIX:
# create a dict variable to store the CFUNCTYPE objects
# rather than a local variable inner a function which is GC ASAP
#
# 2. Callback Closure as Python Object
# plz refer : http://stackoverflow.com/questions/3245859/back-casting-a-ctypes-py-object-in-a-callback
#
# Hints:
# (1) Declare Callback prototype as c_void_p
# callback = CFUNCTYPE( , c_void_p, ...)
#
# (2) Declare C API as py_object
# reg_func.argtypes = [ , py_object, ...]
#
#
# (3) Call C API to pass Python my_obj
# cb = callback(my_func)
# reg_func( cb, py_object(my_obj), ...)
#
# (4) Cast in Callback
# def my_func(,closure,...):
# obj = cast(closure, py_object).value
# # obj is my_obj
#
# (5) Beware for immutable object, such as Python native data type: bool, int, float, str
# for example:
#
# cnt = int(1)
# ...
# reg_func(cb, py_object(cnt))
# ...
# cnt = cnt + 1 -> cnt would refer to a new int object, not 1
# -> my_func would always get the original object (=1)
# -> Python pass object reference BY VALUE.
#
# (6) GC Issue
# like as GC issue of CFUNCTYPE()/Callback
# if closure object was create inner a function,
# this local object would be GC and destroyed when when function returned
# it cause Segment Fault either.
#
# 3. callback function inner class
# DONT pass class function into API
# All class function predefine 1'st parameter as 'self'.
#
# In common, callback function should be module level.
# If you use class function for callback, please user pytibrv Python Object Model
# ex: TibrvListener, TibrvMsgCallback
#
# FEATURES: * = un-implement
# ------------------------------------------------------
# tibrvEvent_CreateListener
# tibrvEvent_CreateTime
# tibrvEvent_CreateVectorListener
# tibrvEvent_DestroyEx
# tibrvEvent_GetListenerSubject
# tibrvEvent_GetListenerTransport
# tibrvEvent_GetTimerInterval
# tibrvEvent_GetType
# tibrvEvent_GetQueue
# tibrvEvent_ResetTimerInterval
#
# *tibrvEvent_CreateGroupVectorListener
# *tibrvEvent_CreateIO
# *tibrvEvent_GetIOSource
# *tibrvEvent_GetIOType
#
# CHANGED LOGS
# -------------------------------------------------------
# 20170220 V1.1 ARIEN arien.chen@gmail.com
# REMOVE TIBRV C Header
#
# 20161211 V1.0 ARIEN arien.chen@gmail.com
# CREATED
##
import ctypes as _ctypes
from .types import tibrv_status, tibrvTransport, tibrvQueue, tibrvEvent, tibrvEventType, \
tibrvEventCallback, tibrvEventOnComplete, tibrvEventVectorCallback
from .status import TIBRV_OK, TIBRV_INVALID_EVENT, TIBRV_INVALID_ARG, TIBRV_INVALID_QUEUE, \
TIBRV_INVALID_TRANSPORT, TIBRV_INVALID_CALLBACK
from .api import _rv, _cstr, _pystr, \
_c_tibrvTransport, _c_tibrvQueue, _c_tibrvEvent, _c_tibrvEventType, \
_c_tibrvEventOnComplete, _c_tibrvEventCallback, _c_tibrvEventVectorCallback, \
_c_tibrv_status, _c_tibrv_f64, _c_tibrv_str
# keep callback/closure object from GC
# key = tibrvEvent
__callback = {}
__closure = {}
def __reg(event, func, closure):
__callback[event] = func
if closure is not None:
__closure[event] = closure
return
def __unreg(event):
if event in __callback:
del __callback[event]
if event in __closure:
del __closure[event]
return
##-----------------------------------------------------------------------------
# HELPER FUNCTION
# tibrvClosure -> cast ctypes clousure to Python Object
##-----------------------------------------------------------------------------
def tibrvClosure(closure) -> object:
return _ctypes.cast(closure, _ctypes.py_object).value
##-----------------------------------------------------------------------------
# TIBRV API : tibrv/events.h
##-----------------------------------------------------------------------------
##
_rv.tibrvEvent_CreateListener.argtypes = [_ctypes.POINTER(_c_tibrvEvent),
_c_tibrvQueue,
_c_tibrvEventCallback,
_c_tibrvTransport,
_c_tibrv_str,
_ctypes.py_object]
_rv.tibrvEvent_CreateListener.restype = _c_tibrv_status
def tibrvEvent_CreateListener(queue: tibrvQueue, callback: tibrvEventCallback, transport: tibrvTransport,
subject: str, closure = None) -> (tibrv_status, tibrvEvent):
if queue is None or queue == 0:
return TIBRV_INVALID_QUEUE, None
if callback is None:
return TIBRV_INVALID_CALLBACK, None
if transport is None or transport == 0:
return TIBRV_INVALID_TRANSPORT, None
if str is None:
return TIBRV_INVALID_ARG, None
ev = _c_tibrvEvent(0)
try:
que = _c_tibrvQueue(queue)
except:
return TIBRV_INVALID_QUEUE, None
try:
cb = _c_tibrvEventCallback(callback)
except:
return TIBRV_INVALID_CALLBACK, None
try:
tx = _c_tibrvTransport(transport)
except:
return TIBRV_INVALID_TRANSPORT, None
try:
subj = _cstr(subject)
cz = _ctypes.py_object(closure)
except:
return TIBRV_INVALID_ARG, None
status = _rv.tibrvEvent_CreateListener(_ctypes.byref(ev), que, cb, tx, subj, cz)
# save cb to prevent GC
if status == TIBRV_OK:
__reg(ev.value, cb, cz)
return status, ev.value
##
_rv.tibrvEvent_CreateVectorListener.argtypes = [_ctypes.POINTER(_c_tibrvEvent),
_c_tibrvQueue,
_c_tibrvEventVectorCallback,
_c_tibrvTransport,
_c_tibrv_str,
_ctypes.py_object]
_rv.tibrvEvent_CreateVectorListener.restype = _c_tibrv_status
def tibrvEvent_CreateVectorListener(queue: tibrvQueue, callback: tibrvEventVectorCallback,
transport: tibrvTransport, subject: str,
closure = None) -> (tibrv_status, tibrvEvent):
if queue is None or queue == 0:
return TIBRV_INVALID_QUEUE, None
if callback is None:
return TIBRV_INVALID_CALLBACK, None
if transport is None or transport == 0:
return TIBRV_INVALID_TRANSPORT, None
if str is None:
return TIBRV_INVALID_ARG, None
ev = _c_tibrvEvent(0)
try:
que = _c_tibrvQueue(queue)
except:
return TIBRV_INVALID_QUEUE, None
try:
cb = _c_tibrvEventVectorCallback(callback)
except:
return TIBRV_INVALID_CALLBACK, None
try:
tx = _c_tibrvTransport(transport)
except:
return TIBRV_INVALID_TRANSPORT, None
try:
subj = _cstr(subject)
cz = _ctypes.py_object(closure)
except:
return TIBRV_INVALID_ARG, None
status = _rv.tibrvEvent_CreateVectorListener(_ctypes.byref(ev), que, cb, tx, subj, cz)
# save cb to prevent GC
if status == TIBRV_OK:
__reg(ev.value, cb, cz)
return status, ev.value
##
_rv.tibrvEvent_CreateTimer.argtypes = [_ctypes.POINTER(_c_tibrvEvent),
_c_tibrvQueue,
_c_tibrvEventCallback,
_c_tibrv_f64,
_ctypes.py_object]
_rv.tibrvEvent_CreateTimer.restype = _c_tibrv_status
def tibrvEvent_CreateTimer(queue: tibrvQueue, callback: tibrvEventCallback, interval: float,
closure=None) -> (tibrv_status, tibrvEvent):
if queue is None or queue == 0:
return TIBRV_INVALID_QUEUE, None
if callback is None:
return TIBRV_INVALID_CALLBACK, None
ev = _c_tibrvEvent(0)
try:
que = _c_tibrvQueue(queue)
except:
return TIBRV_INVALID_QUEUE, None
try:
cb = _c_tibrvEventCallback(callback)
except:
return TIBRV_INVALID_CALLBACK, None
try:
n = _c_tibrv_f64(interval)
cz = _ctypes.py_object(closure)
except:
return TIBRV_INVALID_ARG, None
status = _rv.tibrvEvent_CreateTimer(_ctypes.byref(ev), que, cb, n, cz)
# save cb to prevent GC
if status == TIBRV_OK:
__reg(ev.value, cb, cz)
return status, ev.value
##
_rv.tibrvEvent_DestroyEx.argtypes = [_c_tibrvEvent, _c_tibrvEventOnComplete]
_rv.tibrvEvent_DestroyEx.restype = _c_tibrv_status
def tibrvEvent_Destroy(event: tibrvEvent, callback: tibrvEventOnComplete = None) -> tibrv_status:
if event is None or event == 0:
return TIBRV_INVALID_EVENT
try:
ev = _c_tibrvEvent(event)
except:
return TIBRV_INVALID_EVENT
if callback is None:
cb = _c_tibrvEventOnComplete(0)
else:
try:
cb = _c_tibrvEventOnComplete(callback)
except:
return TIBRV_INVALID_CALLBACK
status = _rv.tibrvEvent_DestroyEx(ev, cb)
if status == TIBRV_OK:
__unreg(event)
# THIS MAY CAUSE MEMORY LEAK
if callback is not None:
__reg(event, cb)
return status
##
_rv.tibrvEvent_GetType.argtypes = [_c_tibrvEvent, _ctypes.POINTER(_c_tibrvEventType)]
_rv.tibrvEvent_GetType.restype = _c_tibrv_status
def tibrvEvent_GetType(event: tibrvEvent) -> (tibrv_status, tibrvEventType):
if event is None or event == 0:
return TIBRV_INVALID_EVENT, None
try:
ev = _c_tibrvEvent(event)
except:
return TIBRV_INVALID_EVENT, None
ty = _c_tibrvEventType(0)
status = _rv.tibrvEvent_GetType(ev, _ctypes.byref(ty))
return status, ty.value
##
_rv.tibrvEvent_GetQueue.argtypes = [_c_tibrvEvent, _ctypes.POINTER(_c_tibrvQueue)]
_rv.tibrvEvent_GetQueue.restype = _c_tibrv_status
def tibrvEvent_GetQueue(event: tibrvEvent) -> (tibrv_status, tibrvQueue):
if event is None or event == 0:
return TIBRV_INVALID_EVENT, None
try:
ev = _c_tibrvEvent(event)
except:
return TIBRV_INVALID_EVENT, None
que = _c_tibrvQueue(0)
status = _rv.tibrvEvent_GetQueue(ev, _ctypes.byref(que))
return status, que.value
##
_rv.tibrvEvent_GetListenerSubject.argtypes = [_c_tibrvEvent, _ctypes.POINTER(_c_tibrv_str)]
_rv.tibrvEvent_GetListenerSubject.restype = _c_tibrv_status
def tibrvEvent_GetListenerSubject(event: tibrvEvent) -> (tibrv_status, str):
if event is None or event == 0:
return TIBRV_INVALID_EVENT, None
try:
ev = _c_tibrvEvent(event)
except:
return TIBRV_INVALID_EVENT, None
sz = _c_tibrv_str()
status = _rv.tibrvEvent_GetListenerSubject(ev, _ctypes.byref(sz))
return status, _pystr(sz)
##
_rv.tibrvEvent_GetListenerTransport.argtypes = [_c_tibrvEvent, _ctypes.POINTER(_c_tibrvTransport)]
_rv.tibrvEvent_GetListenerTransport.restype = _c_tibrv_status
def tibrvEvent_GetListenerTransport(event: tibrvEvent) -> (tibrv_status, tibrvTransport):
if event is None or event == 0:
return TIBRV_INVALID_EVENT, None
try:
ev = _c_tibrvEvent(event)
except:
return TIBRV_INVALID_ARG, None
tx = _c_tibrvTransport(0)
status = _rv.tibrvEvent_GetListenerTransport(ev, _ctypes.byref(tx))
return status, tx.value
##
_rv.tibrvEvent_GetTimerInterval.argtypes = [_c_tibrvEvent, _ctypes.POINTER(_c_tibrv_f64)]
_rv.tibrvEvent_GetTimerInterval.restype = _c_tibrv_status
def tibrvEvent_GetTimerInterval(event: tibrvEvent) -> (tibrv_status, float):
if event is None or event == 0:
return TIBRV_INVALID_EVENT, None
try:
ev = _c_tibrvEvent(event)
except:
return TIBRV_INVALID_EVENT, None
n = _c_tibrv_f64(0)
status = _rv.tibrvEvent_GetTimerInterval(ev, _ctypes.byref(n))
return status, n.value
##
_rv.tibrvEvent_ResetTimerInterval.argtypes = [_c_tibrvEvent, _c_tibrv_f64]
_rv.tibrvEvent_ResetTimerInterval.restype = _c_tibrv_status
def tibrvEvent_ResetTimerInterval(event: tibrvEvent, newInterval: float) -> tibrv_status:
if event is None or event == 0:
return TIBRV_INVALID_EVENT
if newInterval is None:
return TIBRV_INVALID_ARG
try:
ev = _c_tibrvEvent(event)
except:
return TIBRV_INVALID_EVENT
try:
n = _c_tibrv_f64(newInterval)
except:
return TIBRV_INVALID_ARG
status = _rv.tibrvEvent_ResetTimerInterval(ev, n)
return status
|
|
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from django.views.generic import DetailView
from django_filters.rest_framework import DjangoFilterBackend
from guardian.mixins import (
LoginRequiredMixin,
PermissionListMixin,
PermissionRequiredMixin as ObjectPermissionRequiredMixin,
)
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.mixins import (
CreateModelMixin,
ListModelMixin,
RetrieveModelMixin,
)
from rest_framework.permissions import DjangoObjectPermissions
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.viewsets import GenericViewSet, ReadOnlyModelViewSet
from rest_framework_guardian.filters import ObjectPermissionsFilter
from grandchallenge.algorithms.tasks import create_algorithm_jobs_for_session
from grandchallenge.archives.tasks import add_images_to_archive
from grandchallenge.cases.filters import ImageFilterSet
from grandchallenge.cases.models import (
Image,
ImageFile,
RawImageFile,
RawImageUploadSession,
)
from grandchallenge.cases.serializers import (
HyperlinkedImageSerializer,
RawImageFileSerializer,
RawImageUploadSessionPatchSerializer,
RawImageUploadSessionSerializer,
)
from grandchallenge.core.renderers import PaginatedCSVRenderer
from grandchallenge.datatables.views import Column, PaginatedTableListView
from grandchallenge.jqfileupload.models import StagedFile
from grandchallenge.reader_studies.tasks import (
add_image_to_answer,
add_images_to_reader_study,
)
from grandchallenge.subdomains.utils import reverse_lazy
class RawImageUploadSessionList(
LoginRequiredMixin, PermissionListMixin, PaginatedTableListView,
):
model = RawImageUploadSession
permission_required = f"{RawImageUploadSession._meta.app_label}.view_{RawImageUploadSession._meta.model_name}"
login_url = reverse_lazy("account_login")
row_template = "cases/rawimageuploadsession_row.html"
search_fields = [
"pk",
]
columns = [
Column(title="ID", sort_field="pk"),
Column(title="Created", sort_field="created"),
Column(title="Status", sort_field="status"),
]
default_sort_column = 1
class RawImageUploadSessionDetail(
LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView
):
model = RawImageUploadSession
permission_required = f"{RawImageUploadSession._meta.app_label}.view_{RawImageUploadSession._meta.model_name}"
raise_exception = True
login_url = reverse_lazy("account_login")
class OSDImageDetail(
LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView
):
model = Image
permission_required = (
f"{Image._meta.app_label}.view_{Image._meta.model_name}"
)
raise_exception = True
login_url = reverse_lazy("account_login")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
try:
dzi = self.object.files.get(image_type=ImageFile.IMAGE_TYPE_DZI)
except ObjectDoesNotExist:
raise Http404
context.update({"dzi_url": dzi.file.url})
return context
class ImageViewSet(ReadOnlyModelViewSet):
serializer_class = HyperlinkedImageSerializer
queryset = Image.objects.all().prefetch_related("files")
permission_classes = (DjangoObjectPermissions,)
filter_backends = (
DjangoFilterBackend,
ObjectPermissionsFilter,
)
filterset_class = ImageFilterSet
renderer_classes = (
*api_settings.DEFAULT_RENDERER_CLASSES,
PaginatedCSVRenderer,
)
class RawImageUploadSessionViewSet(
CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet
):
queryset = RawImageUploadSession.objects.prefetch_related(
"rawimagefile_set"
).all()
permission_classes = [DjangoObjectPermissions]
filter_backends = [ObjectPermissionsFilter]
def perform_create(self, serializer):
serializer.save(creator=self.request.user)
def get_serializer_class(self):
if self.request.method == "PATCH":
return RawImageUploadSessionPatchSerializer
else:
return RawImageUploadSessionSerializer
def validate_staged_files(self, *, staged_files):
file_ids = [f.staged_file_id for f in staged_files]
if any(f_id is None for f_id in file_ids):
raise ValidationError("File has not been staged")
chunks = StagedFile.objects.filter(file_id__in=file_ids)
if len({c.client_filename for c in chunks}) != len(staged_files):
raise ValidationError("Filenames must be unique")
if (
sum([f.end_byte - f.start_byte for f in chunks])
> settings.UPLOAD_SESSION_MAX_BYTES
):
raise ValidationError(
"Total size of all files exceeds the upload limit"
)
@action(detail=True, methods=["patch"])
def process_images(self, request, pk=None):
upload_session: RawImageUploadSession = self.get_object()
serializer = self.get_serializer(
upload_session, data=request.data, partial=True
)
if serializer.is_valid():
try:
self.validate_staged_files(
staged_files=upload_session.rawimagefile_set.all()
)
except ValidationError as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
if (
upload_session.status == upload_session.PENDING
and not upload_session.rawimagefile_set.filter(
consumed=True
).exists()
):
upload_session.process_images(
linked_task=self.get_linked_task(
validated_data=serializer.validated_data
)
)
return Response(
"Image processing job queued.", status=status.HTTP_200_OK
)
else:
return Response(
"Image processing job could not be queued.",
status=status.HTTP_400_BAD_REQUEST,
)
else:
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
def get_linked_task(self, *, validated_data):
if "algorithm" in validated_data:
return create_algorithm_jobs_for_session.signature(
kwargs={
"algorithm_image_pk": validated_data[
"algorithm"
].latest_ready_image.pk
},
immutable=True,
)
elif "archive" in validated_data:
return add_images_to_archive.signature(
kwargs={"archive_pk": validated_data["archive"].pk},
immutable=True,
)
elif "reader_study" in validated_data:
return add_images_to_reader_study.signature(
kwargs={"reader_study_pk": validated_data["reader_study"].pk},
immutable=True,
)
elif "answer" in validated_data:
return add_image_to_answer.signature(
kwargs={"answer_pk": validated_data["answer"].pk},
immutable=True,
)
else:
raise RuntimeError(
"Algorithm image, archive or reader study must be set"
)
class RawImageFileViewSet(
CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet
):
serializer_class = RawImageFileSerializer
queryset = RawImageFile.objects.all()
permission_classes = [DjangoObjectPermissions]
filter_backends = [ObjectPermissionsFilter]
|
|
"""Support for the Netatmo cameras."""
from __future__ import annotations
import logging
from typing import Any, cast
import aiohttp
import pyatmo
import voluptuous as vol
from homeassistant.components.camera import SUPPORT_STREAM, Camera
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import (
ATTR_CAMERA_LIGHT_MODE,
ATTR_PERSON,
ATTR_PERSONS,
ATTR_PSEUDO,
CAMERA_LIGHT_MODES,
DATA_CAMERAS,
DATA_EVENTS,
DATA_HANDLER,
DATA_PERSONS,
DOMAIN,
EVENT_TYPE_LIGHT_MODE,
EVENT_TYPE_OFF,
EVENT_TYPE_ON,
MANUFACTURER,
MODELS,
SERVICE_SET_CAMERA_LIGHT,
SERVICE_SET_PERSON_AWAY,
SERVICE_SET_PERSONS_HOME,
SIGNAL_NAME,
WEBHOOK_LIGHT_MODE,
WEBHOOK_NACAMERA_CONNECTION,
WEBHOOK_PUSH_TYPE,
)
from .data_handler import CAMERA_DATA_CLASS_NAME, NetatmoDataHandler
from .netatmo_entity_base import NetatmoBase
_LOGGER = logging.getLogger(__name__)
DEFAULT_QUALITY = "high"
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the Netatmo camera platform."""
if "access_camera" not in entry.data["token"]["scope"]:
_LOGGER.info(
"Cameras are currently not supported with this authentication method"
)
data_handler = hass.data[DOMAIN][entry.entry_id][DATA_HANDLER]
await data_handler.register_data_class(
CAMERA_DATA_CLASS_NAME, CAMERA_DATA_CLASS_NAME, None
)
data_class = data_handler.data.get(CAMERA_DATA_CLASS_NAME)
if not data_class or not data_class.raw_data:
raise PlatformNotReady
all_cameras = []
for home in data_class.cameras.values():
for camera in home.values():
all_cameras.append(camera)
entities = [
NetatmoCamera(
data_handler,
camera["id"],
camera["type"],
camera["home_id"],
DEFAULT_QUALITY,
)
for camera in all_cameras
]
for person_id, person_data in data_handler.data[
CAMERA_DATA_CLASS_NAME
].persons.items():
hass.data[DOMAIN][DATA_PERSONS][person_id] = person_data.get(ATTR_PSEUDO)
_LOGGER.debug("Adding cameras %s", entities)
async_add_entities(entities, True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_PERSONS_HOME,
{vol.Required(ATTR_PERSONS): vol.All(cv.ensure_list, [cv.string])},
"_service_set_persons_home",
)
platform.async_register_entity_service(
SERVICE_SET_PERSON_AWAY,
{vol.Optional(ATTR_PERSON): cv.string},
"_service_set_person_away",
)
platform.async_register_entity_service(
SERVICE_SET_CAMERA_LIGHT,
{vol.Required(ATTR_CAMERA_LIGHT_MODE): vol.In(CAMERA_LIGHT_MODES)},
"_service_set_camera_light",
)
class NetatmoCamera(NetatmoBase, Camera):
"""Representation of a Netatmo camera."""
def __init__(
self,
data_handler: NetatmoDataHandler,
camera_id: str,
camera_type: str,
home_id: str,
quality: str,
) -> None:
"""Set up for access to the Netatmo camera images."""
Camera.__init__(self)
super().__init__(data_handler)
self._data_classes.append(
{"name": CAMERA_DATA_CLASS_NAME, SIGNAL_NAME: CAMERA_DATA_CLASS_NAME}
)
self._id = camera_id
self._home_id = home_id
self._device_name = self._data.get_camera(camera_id=camera_id)["name"]
self._attr_name = f"{MANUFACTURER} {self._device_name}"
self._model = camera_type
self._attr_unique_id = f"{self._id}-{self._model}"
self._quality = quality
self._vpnurl: str | None = None
self._localurl: str | None = None
self._status: str | None = None
self._sd_status: str | None = None
self._alim_status: str | None = None
self._is_local: str | None = None
self._light_state = None
async def async_added_to_hass(self) -> None:
"""Entity created."""
await super().async_added_to_hass()
for event_type in (EVENT_TYPE_LIGHT_MODE, EVENT_TYPE_OFF, EVENT_TYPE_ON):
self._listeners.append(
async_dispatcher_connect(
self.hass,
f"signal-{DOMAIN}-webhook-{event_type}",
self.handle_event,
)
)
self.hass.data[DOMAIN][DATA_CAMERAS][self._id] = self._device_name
@callback
def handle_event(self, event: dict) -> None:
"""Handle webhook events."""
data = event["data"]
if not data.get("camera_id"):
return
if data["home_id"] == self._home_id and data["camera_id"] == self._id:
if data[WEBHOOK_PUSH_TYPE] in ("NACamera-off", "NACamera-disconnection"):
self.is_streaming = False
self._status = "off"
elif data[WEBHOOK_PUSH_TYPE] in (
"NACamera-on",
WEBHOOK_NACAMERA_CONNECTION,
):
self.is_streaming = True
self._status = "on"
elif data[WEBHOOK_PUSH_TYPE] == WEBHOOK_LIGHT_MODE:
self._light_state = data["sub_type"]
self._attr_extra_state_attributes.update(
{"light_state": self._light_state}
)
self.async_write_ha_state()
return
@property
def _data(self) -> pyatmo.AsyncCameraData:
"""Return data for this entity."""
return cast(
pyatmo.AsyncCameraData,
self.data_handler.data[self._data_classes[0]["name"]],
)
async def async_camera_image(
self, width: int | None = None, height: int | None = None
) -> bytes | None:
"""Return a still image response from the camera."""
try:
return cast(
bytes, await self._data.async_get_live_snapshot(camera_id=self._id)
)
except (
aiohttp.ClientPayloadError,
aiohttp.ContentTypeError,
aiohttp.ServerDisconnectedError,
aiohttp.ClientConnectorError,
pyatmo.exceptions.ApiError,
) as err:
_LOGGER.debug("Could not fetch live camera image (%s)", err)
return None
@property
def available(self) -> bool:
"""Return True if entity is available."""
return bool(self._alim_status == "on" or self._status == "disconnected")
@property
def supported_features(self) -> int:
"""Return supported features."""
return SUPPORT_STREAM
@property
def brand(self) -> str:
"""Return the camera brand."""
return MANUFACTURER
@property
def motion_detection_enabled(self) -> bool:
"""Return the camera motion detection status."""
return bool(self._status == "on")
@property
def is_on(self) -> bool:
"""Return true if on."""
return self.is_streaming
async def async_turn_off(self) -> None:
"""Turn off camera."""
await self._data.async_set_state(
home_id=self._home_id, camera_id=self._id, monitoring="off"
)
async def async_turn_on(self) -> None:
"""Turn on camera."""
await self._data.async_set_state(
home_id=self._home_id, camera_id=self._id, monitoring="on"
)
async def stream_source(self) -> str:
"""Return the stream source."""
url = "{0}/live/files/{1}/index.m3u8"
if self._localurl:
return url.format(self._localurl, self._quality)
return url.format(self._vpnurl, self._quality)
@property
def model(self) -> str:
"""Return the camera model."""
return MODELS[self._model]
@callback
def async_update_callback(self) -> None:
"""Update the entity's state."""
camera = self._data.get_camera(self._id)
self._vpnurl, self._localurl = self._data.camera_urls(self._id)
self._status = camera.get("status")
self._sd_status = camera.get("sd_status")
self._alim_status = camera.get("alim_status")
self._is_local = camera.get("is_local")
self.is_streaming = bool(self._status == "on")
if self._model == "NACamera": # Smart Indoor Camera
self.hass.data[DOMAIN][DATA_EVENTS][self._id] = self.process_events(
self._data.events.get(self._id, {})
)
elif self._model == "NOC": # Smart Outdoor Camera
self.hass.data[DOMAIN][DATA_EVENTS][self._id] = self.process_events(
self._data.outdoor_events.get(self._id, {})
)
self._attr_extra_state_attributes.update(
{
"id": self._id,
"status": self._status,
"sd_status": self._sd_status,
"alim_status": self._alim_status,
"is_local": self._is_local,
"vpn_url": self._vpnurl,
"local_url": self._localurl,
"light_state": self._light_state,
}
)
def process_events(self, events: dict) -> dict:
"""Add meta data to events."""
for event in events.values():
if "video_id" not in event:
continue
if self._is_local:
event[
"media_url"
] = f"{self._localurl}/vod/{event['video_id']}/files/{self._quality}/index.m3u8"
else:
event[
"media_url"
] = f"{self._vpnurl}/vod/{event['video_id']}/files/{self._quality}/index.m3u8"
return events
async def _service_set_persons_home(self, **kwargs: Any) -> None:
"""Service to change current home schedule."""
persons = kwargs.get(ATTR_PERSONS, {})
person_ids = []
for person in persons:
for pid, data in self._data.persons.items():
if data.get("pseudo") == person:
person_ids.append(pid)
await self._data.async_set_persons_home(
person_ids=person_ids, home_id=self._home_id
)
_LOGGER.debug("Set %s as at home", persons)
async def _service_set_person_away(self, **kwargs: Any) -> None:
"""Service to mark a person as away or set the home as empty."""
person = kwargs.get(ATTR_PERSON)
person_id = None
if person:
for pid, data in self._data.persons.items():
if data.get("pseudo") == person:
person_id = pid
if person_id:
await self._data.async_set_persons_away(
person_id=person_id,
home_id=self._home_id,
)
_LOGGER.debug("Set %s as away", person)
else:
await self._data.async_set_persons_away(
person_id=person_id,
home_id=self._home_id,
)
_LOGGER.debug("Set home as empty")
async def _service_set_camera_light(self, **kwargs: Any) -> None:
"""Service to set light mode."""
mode = str(kwargs.get(ATTR_CAMERA_LIGHT_MODE))
_LOGGER.debug("Turn %s camera light for '%s'", mode, self._attr_name)
await self._data.async_set_state(
home_id=self._home_id,
camera_id=self._id,
floodlight=mode,
)
|
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import time
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase
from test import support
from test.support import HOST
threading = support.import_module('threading')
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
super(DummyDTPHandler, self).push(what.encode('ascii'))
def handle_error(self):
raise
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.rest = None
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.socket() as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.socket(socket.AF_INET6) as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(RETR_DATA[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
self.del_channel()
socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False,
certfile=CERTFILE, server_side=True,
do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_SSLv23)
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except socket.error as err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
super(SSLConnection, self).close()
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode('ascii'))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.assertEqual(''.join(received), RETR_DATA[rest:],
msg='rest test case %d %d %d' % (rest,
len(''.join(received)),
len(RETR_DATA[rest:])))
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (socket.error, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=10) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=10) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=10) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=10)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv3
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=10)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
self.port = support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
self.sock.close()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send(b"1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
if ssl is not None:
tests.extend([TestTLS_FTPClassMixin, TestTLS_FTPClass])
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
|
import functools
import sys
from toolz.functoolz import curry, is_valid_args, is_partial_args
from toolz._signatures import has_unknown_args
from toolz.compatibility import PY3
from toolz.utils import raises
def make_func(param_string, raise_if_called=True):
if not param_string.startswith('('):
param_string = '(%s)' % param_string
if raise_if_called:
body = 'raise ValueError("function should not be called")'
else:
body = 'return True'
d = {}
exec('def func%s:\n %s' % (param_string, body), globals(), d)
return d['func']
def test_make_func():
f = make_func('')
assert raises(ValueError, lambda: f())
assert raises(TypeError, lambda: f(1))
f = make_func('', raise_if_called=False)
assert f()
assert raises(TypeError, lambda: f(1))
f = make_func('x, y=1', raise_if_called=False)
assert f(1)
assert f(x=1)
assert f(1, 2)
assert f(x=1, y=2)
assert raises(TypeError, lambda: f(1, 2, 3))
f = make_func('(x, y=1)', raise_if_called=False)
assert f(1)
assert f(x=1)
assert f(1, 2)
assert f(x=1, y=2)
assert raises(TypeError, lambda: f(1, 2, 3))
def test_is_valid(check_valid=is_valid_args, incomplete=False):
orig_check_valid = check_valid
check_valid = lambda func, *args, **kwargs: orig_check_valid(func, args, kwargs)
f = make_func('')
assert check_valid(f)
assert check_valid(f, 1) is False
assert check_valid(f, x=1) is False
f = make_func('x')
assert check_valid(f) is incomplete
assert check_valid(f, 1)
assert check_valid(f, x=1)
assert check_valid(f, 1, x=2) is False
assert check_valid(f, 1, y=2) is False
assert check_valid(f, 1, 2) is False
assert check_valid(f, x=1, y=2) is False
f = make_func('x=1')
assert check_valid(f)
assert check_valid(f, 1)
assert check_valid(f, x=1)
assert check_valid(f, 1, x=2) is False
assert check_valid(f, 1, y=2) is False
assert check_valid(f, 1, 2) is False
assert check_valid(f, x=1, y=2) is False
f = make_func('*args')
assert check_valid(f)
assert check_valid(f, 1)
assert check_valid(f, 1, 2)
assert check_valid(f, x=1) is False
f = make_func('**kwargs')
assert check_valid(f)
assert check_valid(f, x=1)
assert check_valid(f, x=1, y=2)
assert check_valid(f, 1) is False
f = make_func('x, *args')
assert check_valid(f) is incomplete
assert check_valid(f, 1)
assert check_valid(f, 1, 2)
assert check_valid(f, x=1)
assert check_valid(f, 1, x=1) is False
assert check_valid(f, 1, y=1) is False
f = make_func('x, y=1, **kwargs')
assert check_valid(f) is incomplete
assert check_valid(f, 1)
assert check_valid(f, x=1)
assert check_valid(f, 1, 2)
assert check_valid(f, x=1, y=2, z=3)
assert check_valid(f, 1, 2, y=3) is False
f = make_func('a, b, c=3, d=4')
assert check_valid(f) is incomplete
assert check_valid(f, 1) is incomplete
assert check_valid(f, 1, 2)
assert check_valid(f, 1, c=3) is incomplete
assert check_valid(f, 1, e=3) is False
assert check_valid(f, 1, 2, e=3) is False
assert check_valid(f, 1, 2, b=3) is False
assert check_valid(1) is False
def test_is_valid_py3(check_valid=is_valid_args, incomplete=False):
if not PY3:
return
orig_check_valid = check_valid
check_valid = lambda func, *args, **kwargs: orig_check_valid(func, args, kwargs)
f = make_func('x, *, y=1')
assert check_valid(f) is incomplete
assert check_valid(f, 1)
assert check_valid(f, x=1)
assert check_valid(f, 1, y=2)
assert check_valid(f, 1, 2) is False
assert check_valid(f, 1, z=2) is False
f = make_func('x, *args, y=1')
assert check_valid(f) is incomplete
assert check_valid(f, 1)
assert check_valid(f, x=1)
assert check_valid(f, 1, y=2)
assert check_valid(f, 1, 2, y=2)
assert check_valid(f, 1, 2)
assert check_valid(f, 1, z=2) is False
f = make_func('*, y=1')
assert check_valid(f)
assert check_valid(f, 1) is False
assert check_valid(f, y=1)
assert check_valid(f, z=1) is False
f = make_func('x, *, y')
assert check_valid(f) is incomplete
assert check_valid(f, 1) is incomplete
assert check_valid(f, x=1) is incomplete
assert check_valid(f, 1, y=2)
assert check_valid(f, x=1, y=2)
assert check_valid(f, 1, 2) is False
assert check_valid(f, 1, z=2) is False
assert check_valid(f, 1, y=1, z=2) is False
f = make_func('x=1, *, y, z=3')
assert check_valid(f) is incomplete
assert check_valid(f, 1, z=3) is incomplete
assert check_valid(f, y=2)
assert check_valid(f, 1, y=2)
assert check_valid(f, x=1, y=2)
assert check_valid(f, x=1, y=2, z=3)
assert check_valid(f, 1, x=1, y=2) is False
assert check_valid(f, 1, 3, y=2) is False
f = make_func('w, x=2, *args, y, z=4')
assert check_valid(f) is incomplete
assert check_valid(f, 1) is incomplete
assert check_valid(f, 1, y=3)
f = make_func('a, b, c=3, d=4, *args, e=5, f=6, g, h')
assert check_valid(f) is incomplete
assert check_valid(f, 1) is incomplete
assert check_valid(f, 1, 2) is incomplete
assert check_valid(f, 1, 2, g=7) is incomplete
assert check_valid(f, 1, 2, g=7, h=8)
assert check_valid(f, 1, 2, 3, 4, 5, 6, 7, 8, 9) is incomplete
f = make_func('a: int, b: float')
assert check_valid(f) is incomplete
assert check_valid(f, 1) is incomplete
assert check_valid(f, b=1) is incomplete
assert check_valid(f, 1, 2)
f = make_func('(a: int, b: float) -> float')
assert check_valid(f) is incomplete
assert check_valid(f, 1) is incomplete
assert check_valid(f, b=1) is incomplete
assert check_valid(f, 1, 2)
f.__signature__ = 34
assert check_valid(f) is False
class RaisesValueError(object):
def __call__(self):
pass
@property
def __signature__(self):
raise ValueError('Testing Python 3.4')
f = RaisesValueError()
assert check_valid(f) is None
def test_is_partial():
test_is_valid(check_valid=is_partial_args, incomplete=True)
test_is_valid_py3(check_valid=is_partial_args, incomplete=True)
def test_is_valid_curry():
def check_curry(func, args, kwargs, incomplete=True):
try:
curry(func)(*args, **kwargs)
curry(func, *args)(**kwargs)
curry(func, **kwargs)(*args)
curry(func, *args, **kwargs)()
if not isinstance(func, type(lambda: None)):
return None
return incomplete
except ValueError:
return True
except TypeError:
return False
check_valid = functools.partial(check_curry, incomplete=True)
test_is_valid(check_valid=check_valid, incomplete=True)
test_is_valid_py3(check_valid=check_valid, incomplete=True)
check_valid = functools.partial(check_curry, incomplete=False)
test_is_valid(check_valid=check_valid, incomplete=False)
test_is_valid_py3(check_valid=check_valid, incomplete=False)
def test_func_keyword():
def f(func=None):
pass
assert is_valid_args(f, (), {})
assert is_valid_args(f, (None,), {})
assert is_valid_args(f, (), {'func': None})
assert is_valid_args(f, (None,), {'func': None}) is False
assert is_partial_args(f, (), {})
assert is_partial_args(f, (None,), {})
assert is_partial_args(f, (), {'func': None})
assert is_partial_args(f, (None,), {'func': None}) is False
def test_has_unknown_args():
assert has_unknown_args(1) is False
assert has_unknown_args(map) is False
assert has_unknown_args(make_func('')) is False
assert has_unknown_args(make_func('x, y, z')) is False
assert has_unknown_args(make_func('*args'))
assert has_unknown_args(make_func('**kwargs')) is False
assert has_unknown_args(make_func('x, y, *args, **kwargs'))
assert has_unknown_args(make_func('x, y, z=1')) is False
assert has_unknown_args(make_func('x, y, z=1, **kwargs')) is False
if PY3:
f = make_func('*args')
f.__signature__ = 34
assert has_unknown_args(f) is False
class RaisesValueError(object):
def __call__(self):
pass
@property
def __signature__(self):
raise ValueError('Testing Python 3.4')
f = RaisesValueError()
assert has_unknown_args(f)
|
|
# vim:ts=4:sw=4:et:
# Copyright 2012-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
import tempfile
import json
import os
import subprocess
import signal
try:
import pwd
except ImportError:
# Windows
pass
import pywatchman
import time
import threading
import uuid
import traceback
import sys
import TempDir
tls = threading.local()
def setSharedInstance(inst):
global tls
tls.instance = inst
def getSharedInstance():
global tls
if hasattr(tls, 'instance'):
return tls.instance
if os.environ.get('TESTING_VIA_BUCK', '0') == '1':
# Ensure that the temporary dir is configured
TempDir.get_temp_dir().get_dir()
tls.instance = Instance()
tls.instance.start()
return tls.instance
def hasSharedInstance():
global tls
return hasattr(tls, 'instance')
class InitWithFilesMixin(object):
def _init_state(self):
self.base_dir = tempfile.mkdtemp(prefix='inst')
# no separate user directory here -- that's only in InitWithDirMixin
self.user_dir = None
self.cfg_file = os.path.join(self.base_dir, "config.json")
self.log_file_name = os.path.join(self.base_dir, "log")
self.cli_log_file_name = os.path.join(self.base_dir, 'cli-log')
self.pid_file = os.path.join(self.base_dir, "pid")
if os.name == 'nt':
self.sock_file = '\\\\.\\pipe\\watchman-test-%s' % uuid.uuid4().hex
else:
self.sock_file = os.path.join(self.base_dir, "sock")
self.state_file = os.path.join(self.base_dir, "state")
def get_state_args(self):
return [
'--sockname={0}'.format(self.sock_file),
'--logfile={0}'.format(self.log_file_name),
'--statefile={0}'.format(self.state_file),
'--pidfile={0}'.format(self.pid_file),
]
class InitWithDirMixin(object):
'''A mixin to allow setting up a state dir rather than a state file. This is
only meant to test state dir creation and permissions -- most operations are
unlikely to work.
'''
def _init_state(self):
self.base_dir = tempfile.mkdtemp(prefix='inst')
self.cfg_file = os.path.join(self.base_dir, 'config.json')
# This needs to be separate from the log_file_name because the
# log_file_name won't exist in the beginning, but the cli_log_file_name
# will.
self.cli_log_file_name = os.path.join(self.base_dir, 'cli-log')
# This doesn't work on Windows, but we don't expect to be hitting this
# codepath on Windows anyway
username = pwd.getpwuid(os.getuid())[0]
self.user_dir = os.path.join(self.base_dir, '%s-state' % username)
self.log_file_name = os.path.join(self.user_dir, 'log')
self.sock_file = os.path.join(self.user_dir, 'sock')
self.state_file = os.path.join(self.user_dir, 'state')
def get_state_args(self):
return ['--test-state-dir={0}'.format(self.base_dir)]
class _Instance(object):
# Tracks a running watchman instance. It is created with an
# overridden global configuration file; you may pass that
# in to the constructor
def __init__(self, config={}, start_timeout=5.0, debug_watchman=False):
self.start_timeout = start_timeout
self.base_dir = tempfile.mkdtemp(prefix='inst')
self._init_state()
self.proc = None
self.pid = None
self.debug_watchman = debug_watchman
with open(self.cfg_file, "w") as f:
f.write(json.dumps(config))
# The log file doesn't exist right now, so we can't open it.
self.cli_log_file = open(self.cli_log_file_name, 'w+')
def __del__(self):
self.stop()
def getSockPath(self):
return self.sock_file
def getCLILogContents(self):
with open(self.cli_log_file_name, 'r') as f:
return f.read()
def getServerLogContents(self):
with open(self.log_file_name, 'r') as f:
return f.read()
def stop(self):
if self.proc:
self.proc.kill()
self.proc.wait()
self.proc = None
self.cli_log_file.close()
def watchmanBinary(self):
return os.environ.get('WATCHMAN_BINARY', 'watchman')
def commandViaCLI(self, cmd):
'''a very bare bones helper to test the site spawner functionality'''
args = [
self.watchmanBinary(),
'--log-level=2',
]
args.extend(self.get_state_args())
args.extend(cmd)
env = os.environ.copy()
env["WATCHMAN_CONFIG_FILE"] = self.cfg_file
proc = subprocess.Popen(args,
env=env,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return proc.communicate()
def start(self):
args = [
self.watchmanBinary(),
'--foreground',
'--log-level=2',
]
args.extend(self.get_state_args())
env = os.environ.copy()
env["WATCHMAN_CONFIG_FILE"] = self.cfg_file
self.proc = subprocess.Popen(args,
env=env,
stdin=None,
stdout=self.cli_log_file,
stderr=self.cli_log_file)
if self.debug_watchman:
print('Watchman instance PID: ' + str(self.proc.pid))
if pywatchman.compat.PYTHON3:
user_input = input
else:
user_input = raw_input
user_input('Press Enter to continue...')
# wait for it to come up
deadline = time.time() + self.start_timeout
while time.time() < deadline:
try:
client = pywatchman.client(sockpath=self.sock_file)
self.pid = client.query('get-pid')['pid']
break
except Exception as e:
t, val, tb = sys.exc_info()
time.sleep(0.1)
finally:
client.close()
if self.pid is None:
# self.proc didn't come up: wait for it to die
self.stop()
pywatchman.compat.reraise(t, val, tb)
def _waitForSuspend(self, suspended, timeout):
if os.name == 'nt':
# There's no 'ps' equivalent we can use
return True
# Check the information in the 'ps' output
deadline = time.time() + timeout
state = 's' if sys.platform == 'SunOS' else 'state'
while time.time() < deadline:
out, err = subprocess.Popen(['ps', '-o', state, '-p',
str(self.pid)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
status = out.splitlines()[-1]
is_suspended = 'T' in status.decode('utf-8', 'surrogateescape')
if is_suspended == suspended:
return True
time.sleep(0.03)
return False
def suspend(self):
if self.proc.poll() or self.pid <= 1:
raise Exception("watchman process isn't running")
if os.name == 'nt':
subprocess.check_call(['susres.exe', 'suspend', str(self.pid)])
else:
os.kill(self.pid, signal.SIGSTOP)
if not self._waitForSuspend(True, 5):
raise Exception("watchman process didn't stop in 5 seconds")
def resume(self):
if self.proc.poll() or self.pid <= 1:
raise Exception("watchman process isn't running")
if os.name == 'nt':
subprocess.check_call(['susres.exe', 'resume', str(self.pid)])
else:
os.kill(self.pid, signal.SIGCONT)
if not self._waitForSuspend(False, 5):
raise Exception("watchman process didn't resume in 5 seconds")
class Instance(_Instance, InitWithFilesMixin):
pass
class InstanceWithStateDir(_Instance, InitWithDirMixin):
pass
|
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from mock import MagicMock
import testtools
from testtools import matchers
import swiftclient.client
from trove.tests.fakes.swift import SwiftClientStub
from trove.common.context import TroveContext
from trove.common import remote
from trove.common import exception
from trove.common import cfg
class TestRemote(testtools.TestCase):
def setUp(self):
super(TestRemote, self).setUp()
def tearDown(self):
super(TestRemote, self).tearDown()
def test_creation(self):
swiftclient.client.Connection.get_auth = MagicMock(return_value=None)
conn = swiftclient.client.Connection()
self.assertIsNone(conn.get_auth())
def test_create_swift_client(self):
mock_resp = MagicMock()
swiftclient.client.Connection.get_container = MagicMock(
return_value=["text", mock_resp])
service_catalog = [{'endpoints': [{'publicURL': 'example.com'}],
'type': 'object-store'}]
client = remote.create_swift_client(TroveContext(
tenant='123',
service_catalog=service_catalog))
headers, container = client.get_container('bob')
self.assertIs(headers, "text")
self.assertIs(container, mock_resp)
def test_empty_account(self):
"""
this is an account with no containers and no objects
"""
# setup expectation
swift_stub = SwiftClientStub()
swift_stub.with_account('123223')
# interact
conn = swiftclient.client.Connection()
account_info = conn.get_account()
self.assertThat(account_info, matchers.Not(matchers.Is(None)))
self.assertThat(len(account_info), matchers.Is(2))
self.assertThat(account_info, matchers.IsInstance(tuple))
self.assertThat(account_info[0], matchers.IsInstance(dict))
self.assertThat(account_info[0],
matchers.KeysEqual('content-length', 'accept-ranges',
'x-timestamp', 'x-trans-id', 'date',
'x-account-bytes-used',
'x-account-container-count',
'content-type',
'x-account-object-count'))
self.assertThat(account_info[1], matchers.IsInstance(list))
self.assertThat(len(account_info[1]), matchers.Is(0))
def test_one_container(self):
"""
tests to ensure behavior is normal with one container
"""
# setup expectation
swift_stub = SwiftClientStub()
swift_stub.with_account('123223')
cont_name = 'a-container-name'
swift_stub.with_container(cont_name)
# interact
conn = swiftclient.client.Connection()
conn.get_auth()
conn.put_container(cont_name)
# get headers plus container metadata
self.assertThat(len(conn.get_account()), matchers.Is(2))
# verify container details
account_containers = conn.get_account()[1]
self.assertThat(len(account_containers), matchers.Is(1))
self.assertThat(account_containers[0],
matchers.KeysEqual('count', 'bytes', 'name'))
self.assertThat(account_containers[0]['name'], matchers.Is(cont_name))
# get container details
cont_info = conn.get_container(cont_name)
self.assertIsNotNone(cont_info)
self.assertThat(cont_info[0], matchers.KeysEqual('content-length',
'x-container-object-count', 'accept-ranges',
'x-container-bytes-used', 'x-timestamp',
'x-trans-id', 'date', 'content-type'))
self.assertThat(len(cont_info[1]), matchers.Equals(0))
# remove container
swift_stub.without_container(cont_name)
with testtools.ExpectedException(swiftclient.ClientException):
conn.get_container(cont_name)
# ensure there are no more containers in account
self.assertThat(len(conn.get_account()[1]), matchers.Is(0))
def test_one_object(self):
swift_stub = SwiftClientStub()
swift_stub.with_account('123223')
swift_stub.with_container('bob')
swift_stub.with_object('bob', 'test', 'test_contents')
# create connection
conn = swiftclient.client.Connection()
# test container lightly
cont_info = conn.get_container('bob')
self.assertIsNotNone(cont_info)
self.assertThat(cont_info[0],
matchers.KeysEqual('content-length',
'x-container-object-count',
'accept-ranges',
'x-container-bytes-used',
'x-timestamp', 'x-trans-id', 'date',
'content-type'))
cont_objects = cont_info[1]
self.assertThat(len(cont_objects), matchers.Equals(1))
obj_1 = cont_objects[0]
self.assertThat(obj_1, matchers.Equals(
{'bytes': 13, 'last_modified': '2013-03-15T22:10:49.361950',
'hash': 'ccc55aefbf92aa66f42b638802c5e7f6', 'name': 'test',
'content_type': 'application/octet-stream',
'contents': 'test_contents'}))
# test object api - not much to do here
self.assertThat(conn.get_object('bob', 'test')[1],
matchers.Is('test_contents'))
# test remove object
swift_stub.without_object('bob', 'test')
# interact
with testtools.ExpectedException(swiftclient.ClientException):
conn.delete_object('bob', 'test')
self.assertThat(len(conn.get_container('bob')[1]), matchers.Is(0))
def test_two_objects(self):
swift_stub = SwiftClientStub()
swift_stub.with_account('123223')
swift_stub.with_container('bob')
swift_stub.with_container('bob2')
swift_stub.with_object('bob', 'test', 'test_contents')
swift_stub.with_object('bob', 'test2', 'test_contents2')
conn = swiftclient.client.Connection()
self.assertIs(len(conn.get_account()), 2)
cont_info = conn.get_container('bob')
self.assertIsNotNone(cont_info)
self.assertThat(cont_info[0],
matchers.KeysEqual('content-length',
'x-container-object-count',
'accept-ranges',
'x-container-bytes-used',
'x-timestamp', 'x-trans-id', 'date',
'content-type'))
self.assertThat(len(cont_info[1]), matchers.Equals(2))
self.assertThat(cont_info[1][0], matchers.Equals(
{'bytes': 13, 'last_modified': '2013-03-15T22:10:49.361950',
'hash': 'ccc55aefbf92aa66f42b638802c5e7f6', 'name': 'test',
'content_type': 'application/octet-stream',
'contents': 'test_contents'}))
self.assertThat(conn.get_object('bob', 'test')[1],
matchers.Is('test_contents'))
self.assertThat(conn.get_object('bob', 'test2')[1],
matchers.Is('test_contents2'))
swift_stub.without_object('bob', 'test')
with testtools.ExpectedException(swiftclient.ClientException):
conn.delete_object('bob', 'test')
self.assertThat(len(conn.get_container('bob')[1]), matchers.Is(1))
swift_stub.without_container('bob')
with testtools.ExpectedException(swiftclient.ClientException):
conn.get_container('bob')
self.assertThat(len(conn.get_account()), matchers.Is(2))
def test_nonexisting_container(self):
"""
when a container does not exist and is accessed then a 404 is returned
"""
from trove.tests.fakes.swift import SwiftClientStub
swift_stub = SwiftClientStub()
swift_stub.with_account('123223')
swift_stub.with_container('existing')
conn = swiftclient.client.Connection()
with testtools.ExpectedException(swiftclient.ClientException):
conn.get_container('nonexisting')
def test_replace_object(self):
"""
Test to ensure that if an object is updated the container object
count is the same and the contents of the object are updated
"""
swift_stub = SwiftClientStub()
swift_stub.with_account('1223df2')
swift_stub.with_container('new-container')
swift_stub.with_object('new-container', 'new-object',
'new-object-contents')
conn = swiftclient.client.Connection()
conn.put_object('new-container', 'new-object', 'new-object-contents')
obj_resp = conn.get_object('new-container', 'new-object')
self.assertThat(obj_resp, matchers.Not(matchers.Is(None)))
self.assertThat(len(obj_resp), matchers.Is(2))
self.assertThat(obj_resp[1], matchers.Is('new-object-contents'))
# set expected behavior - trivial here since it is the intended
# behavior however keep in mind this is just to support testing of
# trove components
swift_stub.with_object('new-container', 'new-object',
'updated-object-contents')
conn.put_object('new-container', 'new-object',
'updated-object-contents')
obj_resp = conn.get_object('new-container', 'new-object')
self.assertThat(obj_resp, matchers.Not(matchers.Is(None)))
self.assertThat(len(obj_resp), matchers.Is(2))
self.assertThat(obj_resp[1], matchers.Is('updated-object-contents'))
# ensure object count has not increased
self.assertThat(len(conn.get_container('new-container')[1]),
matchers.Is(1))
class TestCreateCinderClient(testtools.TestCase):
def setUp(self):
super(TestCreateCinderClient, self).setUp()
self.volumev2_public_url = 'http://publicURL/v2'
self.volume_public_url_region_two = 'http://publicURL-r2/v1'
self.service_catalog = [
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': self.volumev2_public_url,
}
],
'type': 'volumev2'
},
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': 'http://publicURL-r1/v1',
},
{
'region': 'RegionTwo',
'publicURL': self.volume_public_url_region_two,
}
],
'type': 'volume'
}
]
def tearDown(self):
super(TestCreateCinderClient, self).tearDown()
cfg.CONF.clear_override('cinder_url')
cfg.CONF.clear_override('cinder_service_type')
cfg.CONF.clear_override('os_region_name')
def test_create_with_no_conf_no_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.create_cinder_client,
TroveContext())
def test_create_with_conf_override(self):
cinder_url_from_conf = 'http://example.com'
tenant_from_ctx = 'abc'
cfg.CONF.set_override('cinder_url', cinder_url_from_conf)
client = remote.create_cinder_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s/%s' % (cinder_url_from_conf, tenant_from_ctx),
client.client.management_url)
def test_create_with_conf_override_trailing_slash(self):
cinder_url_from_conf = 'http://example.com/'
tenant_from_ctx = 'abc'
cfg.CONF.set_override('cinder_url', cinder_url_from_conf)
client = remote.create_cinder_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s%s' % (cinder_url_from_conf, tenant_from_ctx),
client.client.management_url)
def test_create_with_catalog_and_default_service_type(self):
client = remote.create_cinder_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.volumev2_public_url,
client.client.management_url)
def test_create_with_catalog_all_opts(self):
cfg.CONF.set_override('cinder_service_type', 'volume')
cfg.CONF.set_override('os_region_name', 'RegionTwo')
client = remote.create_cinder_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.volume_public_url_region_two,
client.client.management_url)
class TestCreateNovaClient(testtools.TestCase):
def setUp(self):
super(TestCreateNovaClient, self).setUp()
self.compute_public_url = 'http://publicURL/v2'
self.computev3_public_url_region_two = 'http://publicURL-r2/v3'
self.service_catalog = [
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': self.compute_public_url,
}
],
'type': 'compute'
},
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': 'http://publicURL-r1/v1',
},
{
'region': 'RegionTwo',
'publicURL': self.computev3_public_url_region_two,
}
],
'type': 'computev3'
}
]
def tearDown(self):
super(TestCreateNovaClient, self).tearDown()
cfg.CONF.clear_override('nova_compute_url')
cfg.CONF.clear_override('nova_compute_service_type')
cfg.CONF.clear_override('os_region_name')
def test_create_with_no_conf_no_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.create_nova_client,
TroveContext())
def test_create_with_conf_override(self):
nova_url_from_conf = 'http://example.com'
tenant_from_ctx = 'abc'
cfg.CONF.set_override('nova_compute_url', nova_url_from_conf)
client = remote.create_nova_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s/%s' % (nova_url_from_conf, tenant_from_ctx),
client.client.management_url)
def test_create_with_conf_override_trailing_slash(self):
nova_url_from_conf = 'http://example.com/'
tenant_from_ctx = 'abc'
cfg.CONF.set_override('nova_compute_url', nova_url_from_conf)
client = remote.create_nova_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s%s' % (nova_url_from_conf, tenant_from_ctx),
client.client.management_url)
def test_create_with_catalog_and_default_service_type(self):
client = remote.create_nova_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.compute_public_url,
client.client.management_url)
def test_create_with_catalog_all_opts(self):
cfg.CONF.set_override('nova_compute_service_type', 'computev3')
cfg.CONF.set_override('os_region_name', 'RegionTwo')
client = remote.create_nova_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.computev3_public_url_region_two,
client.client.management_url)
class TestCreateHeatClient(testtools.TestCase):
def setUp(self):
super(TestCreateHeatClient, self).setUp()
self.heat_public_url = 'http://publicURL/v2'
self.heatv3_public_url_region_two = 'http://publicURL-r2/v3'
self.service_catalog = [
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': self.heat_public_url,
}
],
'type': 'orchestration'
},
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': 'http://publicURL-r1/v1',
},
{
'region': 'RegionTwo',
'publicURL': self.heatv3_public_url_region_two,
}
],
'type': 'orchestrationv3'
}
]
def tearDown(self):
super(TestCreateHeatClient, self).tearDown()
cfg.CONF.clear_override('heat_url')
cfg.CONF.clear_override('heat_service_type')
cfg.CONF.clear_override('os_region_name')
def test_create_with_no_conf_no_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.create_heat_client,
TroveContext())
def test_create_with_conf_override(self):
heat_url_from_conf = 'http://example.com'
tenant_from_ctx = 'abc'
cfg.CONF.set_override('heat_url', heat_url_from_conf)
client = remote.create_heat_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s/%s' % (heat_url_from_conf, tenant_from_ctx),
client.http_client.endpoint)
def test_create_with_conf_override_trailing_slash(self):
heat_url_from_conf = 'http://example.com/'
tenant_from_ctx = 'abc'
cfg.CONF.set_override('heat_url', heat_url_from_conf)
client = remote.create_heat_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s%s' % (heat_url_from_conf, tenant_from_ctx),
client.http_client.endpoint)
def test_create_with_catalog_and_default_service_type(self):
client = remote.create_heat_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.heat_public_url,
client.http_client.endpoint)
def test_create_with_catalog_all_opts(self):
cfg.CONF.set_override('heat_service_type', 'orchestrationv3')
cfg.CONF.set_override('os_region_name', 'RegionTwo')
client = remote.create_heat_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.heatv3_public_url_region_two,
client.http_client.endpoint)
class TestCreateSwiftClient(testtools.TestCase):
def setUp(self):
super(TestCreateSwiftClient, self).setUp()
self.swift_public_url = 'http://publicURL/v2'
self.swiftv3_public_url_region_two = 'http://publicURL-r2/v3'
self.service_catalog = [
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': self.swift_public_url,
}
],
'type': 'object-store'
},
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': 'http://publicURL-r1/v1',
},
{
'region': 'RegionTwo',
'publicURL': self.swiftv3_public_url_region_two,
}
],
'type': 'object-storev3'
}
]
def tearDown(self):
super(TestCreateSwiftClient, self).tearDown()
cfg.CONF.clear_override('swift_url')
cfg.CONF.clear_override('swift_service_type')
cfg.CONF.clear_override('os_region_name')
def test_create_with_no_conf_no_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.create_swift_client,
TroveContext())
def test_create_with_conf_override(self):
swift_url_from_conf = 'http://example.com/AUTH_'
tenant_from_ctx = 'abc'
cfg.CONF.set_override('swift_url', swift_url_from_conf)
client = remote.create_swift_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s%s' % (swift_url_from_conf, tenant_from_ctx),
client.url)
def test_create_with_catalog_and_default_service_type(self):
client = remote.create_swift_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.swift_public_url,
client.url)
def test_create_with_catalog_all_opts(self):
cfg.CONF.set_override('swift_service_type', 'object-storev3')
cfg.CONF.set_override('os_region_name', 'RegionTwo')
client = remote.create_swift_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.swiftv3_public_url_region_two,
client.url)
class TestEndpoints(testtools.TestCase):
"""
Copied from glance/tests/unit/test_auth.py.
"""
def setUp(self):
super(TestEndpoints, self).setUp()
self.service_catalog = [
{
'endpoint_links': [],
'endpoints': [
{
'adminURL': 'http://localhost:8080/',
'region': 'RegionOne',
'internalURL': 'http://internalURL/',
'publicURL': 'http://publicURL/',
},
{
'adminURL': 'http://localhost:8081/',
'region': 'RegionTwo',
'internalURL': 'http://internalURL2/',
'publicURL': 'http://publicURL2/',
},
],
'type': 'object-store',
'name': 'Object Storage Service',
}
]
def test_get_endpoint_empty_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.get_endpoint,
None)
def test_get_endpoint_with_custom_server_type(self):
endpoint = remote.get_endpoint(self.service_catalog,
service_type='object-store',
endpoint_region='RegionOne')
self.assertEqual('http://publicURL/', endpoint)
def test_get_endpoint_with_custom_endpoint_type(self):
endpoint = remote.get_endpoint(self.service_catalog,
service_type='object-store',
endpoint_type='internalURL',
endpoint_region='RegionOne')
self.assertEqual('http://internalURL/', endpoint)
def test_get_endpoint_raises_with_ambiguous_endpoint_region(self):
self.assertRaises(exception.RegionAmbiguity,
remote.get_endpoint,
self.service_catalog,
service_type='object-store')
def test_get_endpoint_raises_with_invalid_service_type(self):
self.assertRaises(exception.NoServiceEndpoint,
remote.get_endpoint,
self.service_catalog,
service_type='foo')
def test_get_endpoint_raises_with_invalid_endpoint_type(self):
self.assertRaises(exception.NoServiceEndpoint,
remote.get_endpoint,
self.service_catalog,
service_type='object-store',
endpoint_type='foo',
endpoint_region='RegionOne')
def test_get_endpoint_raises_with_invalid_endpoint_region(self):
self.assertRaises(exception.NoServiceEndpoint,
remote.get_endpoint,
self.service_catalog,
service_type='object-store',
endpoint_region='foo',
endpoint_type='internalURL')
def test_get_endpoint_ignores_missing_type(self):
service_catalog = [
{
'name': 'Other Service',
},
{
'endpoint_links': [],
'endpoints': [
{
'adminURL': 'http://localhost:8080/',
'region': 'RegionOne',
'internalURL': 'http://internalURL/',
'publicURL': 'http://publicURL/',
},
{
'adminURL': 'http://localhost:8081/',
'region': 'RegionTwo',
'internalURL': 'http://internalURL2/',
'publicURL': 'http://publicURL2/',
},
],
'type': 'object-store',
'name': 'Object Storage Service',
}
]
endpoint = remote.get_endpoint(service_catalog,
service_type='object-store',
endpoint_region='RegionOne')
self.assertEqual('http://publicURL/', endpoint)
|
|
import json
import math
import os
import shutil
import sys
import time
import unittest
from multiprocessing import Process
import requests
from app import configs
from app import generator
from common import posts
ec = configs.EnjoliverConfig(importer=__file__)
ec.api_uri = "http://127.0.0.1:5000"
class TestAPIGunicorn(unittest.TestCase):
p_matchbox = Process
p_api = Process
inte_path = "%s" % os.path.dirname(__file__)
tests_path = "%s" % os.path.dirname(inte_path)
app_path = os.path.dirname(tests_path)
project_path = os.path.dirname(app_path)
assets_path = "%s/matchbox/assets" % project_path
test_matchbox_path = "%s/test_matchbox" % tests_path
@staticmethod
def process_target_matchbox():
os.environ["ENJOLIVER_MATCHBOX_PATH"] = TestAPIGunicorn.test_matchbox_path
os.environ["ENJOLIVER_MATCHBOX_ASSETS"] = TestAPIGunicorn.assets_path
cmd = [
"%s" % sys.executable,
"%s/manage.py" % TestAPIGunicorn.project_path,
"matchbox"
]
print("PID -> %s\n"
"exec -> %s\n" % (
os.getpid(), " ".join(cmd)))
sys.stdout.flush()
os.execve(cmd[0], cmd, os.environ)
@staticmethod
def process_target_api():
os.environ["ENJOLIVER_API_URI"] = ec.api_uri
cmd = [
"%s" % sys.executable,
"%s/manage.py" % TestAPIGunicorn.project_path,
"gunicorn",
]
os.execve(cmd[0], cmd, os.environ)
@classmethod
def setUpClass(cls):
time.sleep(0.2)
cls.clean_sandbox()
try:
os.remove(ec.db_path)
except OSError:
pass
shutil.rmtree(ec.ignition_journal_dir, ignore_errors=True)
cls.p_matchbox = Process(target=TestAPIGunicorn.process_target_matchbox)
cls.p_api = Process(target=TestAPIGunicorn.process_target_api)
print("PPID -> %s\n" % os.getpid())
cls.p_matchbox.start()
assert cls.p_matchbox.is_alive() is True
cls.p_api.start()
assert cls.p_api.is_alive() is True
cls.matchbox_running(ec.matchbox_uri, cls.p_matchbox)
cls.api_running(ec.api_uri, cls.p_api)
@classmethod
def tearDownClass(cls):
print("TERM -> %d\n" % cls.p_matchbox.pid)
sys.stdout.flush()
cls.p_matchbox.terminate()
cls.p_matchbox.join(timeout=5)
cls.p_api.terminate()
cls.p_api.join(timeout=5)
time.sleep(0.2)
@staticmethod
def matchbox_running(matchbox_endpoint, p_matchbox):
response_body = ""
response_code = 404
for i in range(10):
assert p_matchbox.is_alive() is True
try:
request = requests.get(matchbox_endpoint)
response_body = request.content
response_code = request.status_code
request.close()
break
except requests.exceptions.ConnectionError:
pass
time.sleep(0.2)
assert b"matchbox\n" == response_body
assert 200 == response_code
@staticmethod
def api_running(api_endpoint, p_api):
response_code = 404
for i in range(10):
assert p_api.is_alive() is True
try:
request = requests.get(api_endpoint)
response_code = request.status_code
request.close()
break
except requests.exceptions.ConnectionError:
pass
time.sleep(0.2)
assert 200 == response_code
@staticmethod
def clean_sandbox():
dirs = ["%s/%s" % (TestAPIGunicorn.test_matchbox_path, k) for k in (
"profiles", "groups")]
for d in dirs:
for f in os.listdir(d):
if ".json" in f:
os.remove("%s/%s" % (d, f))
def setUp(self):
self.assertTrue(self.p_matchbox.is_alive())
self.assertTrue(self.p_api.is_alive())
self.clean_sandbox()
def test_00_healthz(self):
expect = {
u'flask': True,
u'global': False,
u'db': True,
u'discovery': {
'ignition': False, 'ipxe': False
},
u'matchbox': {
u'/': True,
u'/boot.ipxe': True,
u'/boot.ipxe.0': True,
u'/assets': True,
u"/metadata": True
}}
request = requests.get("%s/healthz" % ec.api_uri)
response_body = request.content
response_code = request.status_code
request.close()
self.assertEqual(json.loads(response_body.decode()), expect)
self.assertEqual(503, response_code)
def test_01_boot_ipxe(self):
expect = \
b"#!ipxe\n" \
b"echo start /boot.ipxe\n" \
b":retry_dhcp\n" \
b"dhcp || goto retry_dhcp\n" \
b"chain http://127.0.0.1:5000/ipxe?uuid=${uuid}&mac=${net0/mac:hexhyp}&domain=${domain}&hostname=${hostname}&serial=${serial}\n"
request = requests.get("%s/boot.ipxe" % ec.api_uri)
response_body = request.content
response_code = request.status_code
request.close()
self.assertEqual(response_code, 200)
self.assertEqual(response_body, expect)
def test_01_boot_ipxe_zero(self):
expect = \
b"#!ipxe\n" \
b"echo start /boot.ipxe\n" \
b":retry_dhcp\n" \
b"dhcp || goto retry_dhcp\n" \
b"chain http://127.0.0.1:5000/ipxe?uuid=${uuid}&mac=${net0/mac:hexhyp}&domain=${domain}&hostname=${hostname}&serial=${serial}\n"
request = requests.get("%s/boot.ipxe" % ec.api_uri)
response_body = request.content
response_code = request.status_code
request.close()
self.assertEqual(response_code, 200)
self.assertEqual(response_body, expect)
def test_02_root(self):
request = requests.get("%s/" % ec.api_uri)
response_code = request.status_code
request.close()
self.assertEqual(response_code, 200)
def test_03_ipxe_404(self):
r = requests.get("%s/404" % ec.api_uri)
self.assertEqual(404, r.status_code)
def test_04_ipxe(self):
marker = "%s-%s" % (TestAPIGunicorn.__name__.lower(), self.test_04_ipxe.__name__)
ignition_file = "inte-%s.yaml" % marker
gen = generator.Generator(
api_uri=ec.api_uri,
profile_id="id-%s" % marker,
name="name-%s" % marker,
ignition_id=ignition_file,
matchbox_path=self.test_matchbox_path
)
gen.dumps()
request = requests.get("%s/ipxe" % ec.api_uri)
response_body = request.content.decode()
response_code = request.status_code
request.close()
expect = "#!ipxe\n" \
"kernel " \
"%s/assets/coreos/serve/coreos_production_pxe.vmlinuz " \
"console=ttyS0 console=ttyS1 " \
"coreos.config.url=%s/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp} " \
"coreos.first_boot " \
"coreos.oem.id=pxe\n" \
"initrd %s/assets/coreos/serve/coreos_production_pxe_image.cpio.gz \n" \
"boot\n" % (gen.profile.api_uri, gen.profile.api_uri, gen.profile.api_uri)
self.assertEqual(200, response_code)
self.maxDiff = None
self.assertEqual(expect, response_body)
def test_05_ipxe_selector(self):
mac = "00:00:00:00:00:00"
marker = "%s-%s" % (TestAPIGunicorn.__name__.lower(), self.test_05_ipxe_selector.__name__)
ignition_file = "inte-%s.yaml" % marker
gen = generator.Generator(
api_uri=ec.api_uri,
profile_id="id-%s" % marker,
name="name-%s" % marker,
ignition_id=ignition_file,
selector={"mac": mac},
matchbox_path=self.test_matchbox_path
)
gen.dumps()
r = requests.get("%s/ipxe" % ec.api_uri)
self.assertEqual(200, r.status_code)
request = requests.get("%s/ipxe?mac=%s" % (ec.api_uri, mac))
response_body = request.content.decode()
response_code = request.status_code
request.close()
expect = "#!ipxe\n" \
"kernel %s/assets/coreos/serve/coreos_production_pxe.vmlinuz " \
"console=ttyS0 console=ttyS1 " \
"coreos.config.url=%s/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp} " \
"coreos.first_boot coreos.oem.id=pxe\n" \
"initrd %s/assets/coreos/serve/coreos_production_pxe_image.cpio.gz \n" \
"boot\n" % (gen.profile.api_uri, gen.profile.api_uri, gen.profile.api_uri)
self.maxDiff = None
self.assertEqual(expect, response_body)
self.assertEqual(200, response_code)
def test_06_discovery_00(self):
req = requests.post("%s/discovery" % ec.api_uri, data=json.dumps(posts.M01))
self.assertEqual(200, req.status_code)
response = req.content.decode()
req.close()
self.assertEqual(json.loads(response), {'new-discovery': True})
req = requests.get("%s/discovery" % ec.api_uri)
self.assertEqual(200, req.status_code)
response = json.loads(req.content.decode())
expect = {
u'boot-info': {
u'mac': u'52:54:00:e8:32:5b',
u'uuid': u'b7f5f93a-b029-475f-b3a4-479ba198cb8a'
},
u'interfaces': [
{
u'name': u'eth0',
u'as_boot': True,
u'netmask': 21,
u'mac': u'52:54:00:e8:32:5b',
u'ipv4': u'172.20.0.65',
u'cidrv4': u'172.20.0.65/21',
"gateway": "172.20.0.1",
u'fqdn': None,
}
]
}
self.assertEqual(1, len(response))
first = response[0]
self.assertEqual(first["boot-info"]["uuid"], expect["boot-info"]["uuid"])
self.assertEqual(first["boot-info"]["mac"], expect["boot-info"]["mac"])
self.assertEqual(first["interfaces"][0]["mac"], expect["interfaces"][0]["mac"])
self.assertEqual(first["interfaces"][0]["as_boot"], expect["interfaces"][0]["as_boot"])
def test_06_discovery_01(self):
req = requests.post("%s/discovery" % ec.api_uri, json.dumps(posts.M02))
self.assertEqual(200, req.status_code)
response = req.content.decode()
req.close()
r = json.loads(response)
self.assertEqual({'new-discovery': True}, r)
def test_06_discovery_02(self):
req = requests.post("%s/discovery" % ec.api_uri, json.dumps(posts.M03))
self.assertEqual(200, req.status_code)
response = req.content.decode()
req.close()
self.assertEqual(json.loads(response), {'new-discovery': True})
all_machines = requests.get("%s/discovery" % ec.api_uri)
content = json.loads(all_machines.content.decode())
all_machines.close()
req = requests.post("%s/discovery" % ec.api_uri, json.dumps(posts.M01))
self.assertEqual(200, req.status_code)
response = req.content
req.close()
self.assertEqual(json.loads(response.decode()), {'new-discovery': False})
self.assertEqual(posts.M01["boot-info"]["uuid"], content[0]["boot-info"]["uuid"])
def test_06_discovery_03(self):
for p in posts.ALL:
req = requests.post("%s/discovery" % ec.api_uri, json.dumps(p))
self.assertEqual(200, req.status_code)
req.close()
req = requests.post("%s/discovery" % ec.api_uri, json.dumps(p))
self.assertEqual(200, req.status_code)
response = req.content.decode()
self.assertEqual({'new-discovery': False}, json.loads(response))
req.close()
r = requests.get("%s/discovery" % ec.api_uri)
nb_elt = len(json.loads(r.content.decode()))
r.close()
self.assertEqual(len(posts.ALL), nb_elt)
def test_07_get(self):
"""
Cache non regression
"""
r = requests.get("%s/discovery" % ec.api_uri)
l = len(json.loads(r.content.decode()))
r.close()
self.assertEqual(l, len(posts.ALL))
now = time.time()
nb = 100
for i in range(nb):
r = requests.get("%s/discovery" % ec.api_uri)
r.close()
self.assertTrue(now + (nb // 100) > time.time())
r = requests.get("%s/discovery" % ec.api_uri)
l = len(json.loads(r.content.decode()))
r.close()
self.assertEqual(l, len(posts.ALL))
def test_08_backup(self):
n = int(math.floor(time.time()))
r = requests.post("%s/backup/db" % ec.api_uri)
data = r.content.decode()
s = json.loads(data)
r.close()
self.assertTrue(s["copy"])
self.assertTrue(os.path.isfile(s["dest_fs"]))
os.remove(s["dest_fs"])
self.assertTrue(n < s["ts"])
|
|
import docker
import json
import requests
import subprocess
import synapseclient
from synapseclient import File, Folder
import synapseutils as synu
import zipfile
import os
import base64
import time
#Synapse Id of Challenge
CHALLENGE_SYN_ID = "syn1235"
#Synapse Id of directory that you want the log files to go into
CHALLENGE_LOG_FOLDER = "syn12345"
CHALLENGE_PREDICTION_FOLDER = "syn1235"
## Name of your challenge, defaults to the name of the challenge's project
CHALLENGE_NAME = "Example Synapse Challenge"
## Synapse user IDs of the challenge admins who will be notified by email
## about errors in the scoring script
ADMIN_USER_IDS = ['123234']
leaderboard_tables = {}
config_evaluations = [
#Sub-Challenge 1 (12345)
#Sub-Challenge 2 (23456)
{
'id':12345,
'score_sh':'/score_sc1.sh'
},
{
'id':23456,
'score_sh':'/score_sc2.sh'
}
]
config_evaluations_map = {ev['id']:ev for ev in config_evaluations}
def getBearerTokenURL(dockerRequestURL, user, password):
initialReq = requests.get(dockerRequestURL)
auth_headers = initialReq.headers['Www-Authenticate'].replace('"','').split(",")
for head in auth_headers:
if head.startswith("Bearer realm="):
bearerRealm = head.split('Bearer realm=')[1]
elif head.startswith('service='):
service = head.split('service=')[1]
elif head.startswith('scope='):
scope = head.split('scope=')[1]
return("{0}?service={1}&scope={2}".format(bearerRealm,service,scope))
def getAuthToken(dockerRequestURL, user, password):
bearerTokenURL = getBearerTokenURL(dockerRequestURL, user, password)
auth = base64.b64encode(user + ":" + password)
bearerTokenRequest = requests.get(bearerTokenURL,
headers={'Authorization': 'Basic %s' % auth})
return(bearerTokenRequest.json()['token'])
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file),os.path.join(root, file).replace(path+"/",""))
def dockerValidate(submission, syn, user, password):
submissionJson = json.loads(submission['entityBundleJSON'])
assert submissionJson['entity'].get('repositoryName') is not None, "Must submit a docker container"
dockerRepo = submissionJson['entity']['repositoryName'].replace("docker.synapse.org/","")
#assert dockerRepo.startswith("docker.synapse.org")
assert submission.get('dockerDigest') is not None, "Must submit a docker container with a docker sha digest"
dockerDigest = submission['dockerDigest']
index_endpoint = 'https://docker.synapse.org'
#dockerImage = dockerRepo + "@" + dockerDigest
#Check if docker is able to be pulled
dockerRequestURL = '{0}/v2/{1}/manifests/{2}'.format(index_endpoint, dockerRepo, dockerDigest)
token = getAuthToken(dockerRequestURL, user, password)
resp = requests.get(dockerRequestURL,
headers={'Authorization': 'Bearer %s' % token})
assert resp.status_code == 200, "Docker image + sha digest must exist"
#Must check docker image size
#Synapse docker registry
dockerSize = sum([layer['size'] for layer in resp.json()['layers']])
assert dockerSize/1000000000.0 < 1000, "Docker image must be less than a teribyte"
#Send email to me if harddrive is full
#should be stateless, if there needs to be code changes to the docker agent
preds = synu.walk(syn, CHALLENGE_PREDICTION_FOLDER)
predFolders = preds.next()[1]
predSynId = [synId for name, synId in predFolders if str(submission.id) == name]
logs = synu.walk(syn, CHALLENGE_LOG_FOLDER)
logsFolders = logs.next()[1]
logsSynId = [synId for name, synId in logsFolders if str(submission.id) == name]
if len(predSynId) == 0:
predFolder = syn.store(Folder(submission.id, parent = CHALLENGE_PREDICTION_FOLDER))
predFolder = predFolder.id
else:
predFolder = predSynId[0]
if len(logsSynId) == 0:
logFolder = syn.store(Folder(submission.id, parent = CHALLENGE_LOG_FOLDER))
logFolder = logFolder.id
for participant in submission.contributors:
if participant['principalId'] in ADMIN_USER_IDS:
access = ['CREATE', 'READ', 'DOWNLOAD', 'UPDATE', 'DELETE', 'CHANGE_PERMISSIONS', 'MODERATE', 'CHANGE_SETTINGS']
else:
access = ['READ','DOWNLOAD']
#Comment set permissions out if you don't want to allow participants to see the pred files
#syn.setPermissions(predFolder, principalId = participant['principalId'], accessType = access)
syn.setPermissions(logFolder, principalId = participant['principalId'], accessType = access)
else:
logFolder = logsSynId[0]
#Add more message if you want to return the prediction files
return(True, "Your submission has been validated! As your submission is being ran, please go here: https://www.synapse.org/#!Synapse:%s to check on your log file." % logFolder)
def dockerRun(submission, scoring_sh, syn, client):
#These are the volumes that you want to mount onto your docker container
OUTPUT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),submission.id)
TESTDATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),'test-data')
#These are the locations on the docker that you want your mounted volumes to be + permissions in docker (ro, rw)
#It has to be in this format '/output:rw'
MOUNTED_VOLUMES = {OUTPUT_DIR:'/output:rw',
TESTDATA_DIR:'/test-data:ro'}
#All mounted volumes here in a list
ALL_VOLUMES = [OUTPUT_DIR,TESTDATA_DIR]
#Make a file view of the prediction folder
allLogs = synu.walk(syn, CHALLENGE_LOG_FOLDER)
logFolder = allLogs.next()
logFolderId = [synId for name, synId in logFolder[1] if name == submission.id][0]
allPreds = synu.walk(syn, CHALLENGE_PREDICTION_FOLDER)
predFolder = allPreds.next()
predFolderId = [synId for name, synId in predFolder[1] if name == submission.id][0]
dockerDigest = submission.get('dockerDigest')
submissionJson = json.loads(submission['entityBundleJSON'])
dockerRepo = submissionJson['entity']['repositoryName']
dockerImage = dockerRepo + "@" + dockerDigest
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
#Mount volumes
volumes = {}
for vol in ALL_VOLUMES:
volumes[vol] = {'bind': MOUNTED_VOLUMES[vol].split(":")[0], 'mode': MOUNTED_VOLUMES[vol].split(":")[1]}
# Run docker image
errors = None
try:
container = client.containers.run(dockerRepo, scoring_sh, detach=True, volumes = volumes, name=submission.id + "_t" + str(int(time.time())), network_disabled=True)
except docker.errors.APIError as e:
container = None
errors = str(e) + "\n"
#Create log file
logFileName = submission.id + "_log.txt"
logSynId = None
#Create the logfile
openLog = open(logFileName,'w').close()
#While docker is still running (the docker python client doesn't update status)
#Add sleeps
if container is not None:
while subprocess.Popen(['docker','inspect','-f','{{.State.Running}}',container.name],stdout = subprocess.PIPE).communicate()[0] == "true\n":
logFileText = container.logs()
with open(logFileName,'w') as logFile:
logFile.write(logFileText)
statinfo = os.stat(logFileName)
#Only store log file if > 0bytes
if statinfo.st_size > 0 and statinfo.st_size/1000.0 <= 50:
ent = File(logFileName, parent = logFolderId)
try:
logs = syn.store(ent)
logSynId = logs.id
except synapseclient.exceptions.SynapseHTTPError as e:
pass
time.sleep(60)
#Must run again to make sure all the logs are captured
logFileText = container.logs()
with open(logFileName,'w') as logFile:
logFile.write(logFileText)
statinfo = os.stat(logFileName)
#Only store log file if > 0bytes
if statinfo.st_size > 0 and statinfo.st_size/1000.0 <= 50:
ent = File(logFileName, parent = logFolderId)
try:
logs = syn.store(ent)
logSynId = logs.id
except synapseclient.exceptions.SynapseHTTPError as e:
pass
container.remove()
try:
client.images.remove(dockerImage)
except:
print("Unable to remove image")
statinfo = os.stat(logFileName)
if statinfo.st_size == 0:
with open(logFileName,'w') as logFile:
if errors is not None:
logFile.write(errors)
else:
logFile.write("No Logs, or logs exceed size limit")
logFile.flush()
ent = File(logFileName, parent = logFolderId)
try:
logs = syn.store(ent)
logSynId = logs.id
except synapseclient.exceptions.SynapseHTTPError as e:
pass
if logSynId is None:
logFile = synu.walk(syn, logFolderId)
logFiles = logFile.next()
logSynId = logFiles[2][0][1]
#Zip up predictions and store it into CHALLENGE_PREDICTIONS_FOLDER
if len(os.listdir(OUTPUT_DIR)) > 0:
zipf = zipfile.ZipFile(submission.id + '_predictions.zip', 'w', zipfile.ZIP_DEFLATED)
zipdir(OUTPUT_DIR, zipf)
zipf.close()
ent = File(submission.id + '_predictions.zip', parent = predFolderId)
predictions = syn.store(ent)
prediction_synId = predictions.id
os.system("rm -rf %s" % OUTPUT_DIR)
os.remove(submission.id + '_predictions.zip')
else:
prediction_synId = None
#Remove log file and prediction file
os.remove(logFileName)
return(prediction_synId, logSynId)
def validate_docker(evaluation, submission, syn, user, password):
"""
Find the right validation function and validate the submission.
:returns: (True, message) if validated, (False, message) if
validation fails or throws exception
"""
config = config_evaluations_map[int(evaluation.id)]
results = dockerValidate(submission, syn, user, password)
return(results)
def run_docker(evaluation, submission, syn, client):
"""
Find the right scoring function and score the submission
:returns: (score, message) where score is a dict of stats and message
is text for display to user
"""
config = config_evaluations_map[int(evaluation.id)]
prediction_synId, log_synId = dockerRun(submission,config['score_sh'], syn, client)
if prediction_synId is not None:
#Comment top line if you don't want to return the synId of prediction file
#message = "You can find your prediction file here: https://www.synapse.org/#!Synapse:%s" % prediction_synId
message = "Your prediction file has been stored, but you will not have access to it."
else:
message = "No prediction file generated, please check your log file: https://www.synapse.org/#!Synapse:%s" % log_synId
return (dict(PREDICTION_FILE=prediction_synId, LOG_FILE = log_synId), message)
|
|
import pytest
import pytz
import numpy as np
import pandas as pd
from pandas import (merge_asof, read_csv,
to_datetime, Timedelta)
from pandas.core.reshape.merge import MergeError
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal
class TestAsOfMerge(object):
def read_data(self, datapath, name, dedupe=False):
path = datapath('reshape', 'merge', 'data', name)
x = read_csv(path)
if dedupe:
x = (x.drop_duplicates(['time', 'ticker'], keep='last')
.reset_index(drop=True)
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, 'trades.csv')
self.quotes = self.read_data(datapath, 'quotes.csv', dedupe=True)
self.asof = self.read_data(datapath, 'asof.csv')
self.tolerance = self.read_data(datapath, 'tolerance.csv')
self.allow_exact_matches = self.read_data(datapath,
'allow_exact_matches.csv')
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, 'allow_exact_matches_and_tolerance.csv')
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, 3, 7]})
result = pd.merge_asof(left, right, on='a')
assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.038',
'20160525 13:30:00.048',
'20160525 13:30:00.048',
'20160525 13:30:00.048']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.048',
'20160525 13:30:00.049',
'20160525 13:30:00.072',
'20160525 13:30:00.075']),
'ticker': ['GOOG', 'MSFT', 'MSFT',
'MSFT', 'GOOG', 'AAPL', 'GOOG',
'MSFT'],
'bid': [720.50, 51.95, 51.97, 51.99,
720.50, 97.99, 720.50, 52.01],
'ask': [720.93, 51.96, 51.98, 52.00,
720.93, 98.01, 720.88, 52.03]},
columns=['time', 'ticker', 'bid', 'ask'])
pd.merge_asof(trades, quotes,
on='time',
by='ticker')
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('2ms'))
expected = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.038',
'20160525 13:30:00.048',
'20160525 13:30:00.048',
'20160525 13:30:00.048']),
'ticker': ['MSFT', 'MSFT', 'GOOG', 'GOOG', 'AAPL'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100],
'bid': [np.nan, 51.97, np.nan,
np.nan, np.nan],
'ask': [np.nan, 51.98, np.nan,
np.nan, np.nan]},
columns=['time', 'ticker', 'price', 'quantity',
'bid', 'ask'])
result = pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('10ms'),
allow_exact_matches=False)
assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, 6, np.nan]})
result = pd.merge_asof(left, right, on='a', direction='forward')
assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, 6, 7]})
result = pd.merge_asof(left, right, on='a', direction='nearest')
assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype('category')
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype('category')
expected.ticker = expected.ticker.astype('category')
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index('time')
quotes = self.quotes
result = merge_asof(trades, quotes,
left_index=True,
right_on='time',
by='ticker')
# left-only index uses right's index, oddly
expected.index = result.index
# time column appears after left's columns
expected = expected[result.columns]
assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index('time')
result = merge_asof(trades, quotes,
left_on='time',
right_index=True,
by='ticker')
assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index('time')
trades = self.trades.set_index('time')
quotes = self.quotes.set_index('time')
result = merge_asof(trades, quotes,
left_index=True,
right_index=True,
by='ticker')
assert_frame_equal(result, expected)
def test_multi_index(self):
# MultiIndex is prohibited
trades = self.trades.set_index(['time', 'price'])
quotes = self.quotes.set_index('time')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
left_index=True,
right_index=True)
trades = self.trades.set_index('time')
quotes = self.quotes.set_index(['time', 'bid'])
with pytest.raises(MergeError):
merge_asof(trades, quotes,
left_index=True,
right_index=True)
def test_on_and_index(self):
# 'on' parameter and index together is prohibited
trades = self.trades.set_index('time')
quotes = self.quotes.set_index('time')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
left_on='price',
left_index=True,
right_index=True)
trades = self.trades.set_index('time')
quotes = self.quotes.set_index('time')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
right_on='bid',
left_index=True,
right_index=True)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
left_by='ticker',
right_by='ticker')
assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != 'MSFT']
result = merge_asof(trades, q,
on='time',
by='ticker')
expected.loc[expected.ticker == 'MSFT', ['bid', 'ask']] = np.nan
assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.046',
'20160525 13:30:00.048',
'20160525 13:30:00.050']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'exch',
'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.045',
'20160525 13:30:00.049']),
'ticker': ['GOOG', 'MSFT', 'MSFT',
'MSFT', 'GOOG', 'AAPL'],
'exch': ['BATS', 'NSDQ', 'ARCA', 'ARCA',
'NSDQ', 'ARCA'],
'bid': [720.51, 51.95, 51.97, 51.99,
720.50, 97.99],
'ask': [720.92, 51.96, 51.98, 52.00,
720.93, 98.01]},
columns=['time', 'ticker', 'exch', 'bid', 'ask'])
expected = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.046',
'20160525 13:30:00.048',
'20160525 13:30:00.050']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100],
'bid': [np.nan, 51.95, 720.50, 720.51, np.nan],
'ask': [np.nan, 51.96, 720.93, 720.92, np.nan]},
columns=['time', 'ticker', 'exch',
'price', 'quantity', 'bid', 'ask'])
result = pd.merge_asof(trades, quotes, on='time',
by=['ticker', 'exch'])
assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.046',
'20160525 13:30:00.048',
'20160525 13:30:00.050']),
'ticker': [0, 0, 1, 1, 2],
'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'exch',
'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.045',
'20160525 13:30:00.049']),
'ticker': [1, 0, 0, 0, 1, 2],
'exch': ['BATS', 'NSDQ', 'ARCA', 'ARCA',
'NSDQ', 'ARCA'],
'bid': [720.51, 51.95, 51.97, 51.99,
720.50, 97.99],
'ask': [720.92, 51.96, 51.98, 52.00,
720.93, 98.01]},
columns=['time', 'ticker', 'exch', 'bid', 'ask'])
expected = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.046',
'20160525 13:30:00.048',
'20160525 13:30:00.050']),
'ticker': [0, 0, 1, 1, 2],
'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100],
'bid': [np.nan, 51.95, 720.50, 720.51, np.nan],
'ask': [np.nan, 51.96, 720.93, 720.92, np.nan]},
columns=['time', 'ticker', 'exch',
'price', 'quantity', 'bid', 'ask'])
result = pd.merge_asof(trades, quotes, on='time',
by=['ticker', 'exch'])
assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame([
[pd.to_datetime('20160602'), 1, 'a'],
[pd.to_datetime('20160602'), 2, 'a'],
[pd.to_datetime('20160603'), 1, 'b'],
[pd.to_datetime('20160603'), 2, 'b']],
columns=['time', 'k1', 'k2']).set_index('time')
right = pd.DataFrame([
[pd.to_datetime('20160502'), 1, 'a', 1.0],
[pd.to_datetime('20160502'), 2, 'a', 2.0],
[pd.to_datetime('20160503'), 1, 'b', 3.0],
[pd.to_datetime('20160503'), 2, 'b', 4.0]],
columns=['time', 'k1', 'k2', 'value']).set_index('time')
expected = pd.DataFrame([
[pd.to_datetime('20160602'), 1, 'a', 1.0],
[pd.to_datetime('20160602'), 2, 'a', 2.0],
[pd.to_datetime('20160603'), 1, 'b', 3.0],
[pd.to_datetime('20160603'), 2, 'b', 4.0]],
columns=['time', 'k1', 'k2', 'value']).set_index('time')
result = pd.merge_asof(left,
right,
left_index=True,
right_index=True,
by=['k1', 'k2'])
assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(left, right, left_index=True, right_index=True,
left_by=['k1', 'k2'], right_by=['k1'])
def test_basic2(self, datapath):
expected = self.read_data(datapath, 'asof2.csv')
trades = self.read_data(datapath, 'trades2.csv')
quotes = self.read_data(datapath, 'quotes2.csv', dedupe=True)
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = lambda x: x[x.ticker == 'MSFT'].drop('ticker', axis=1) \
.reset_index(drop=True)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes,
on='time')
assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes,
left_on='time',
right_on='bid',
by='ticker')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
on=['time', 'ticker'],
by='ticker')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
by='ticker')
def test_with_duplicates(self, datapath):
q = pd.concat([self.quotes, self.quotes]).sort_values(
['time', 'ticker']).reset_index(drop=True)
result = merge_asof(self.trades, q,
on='time',
by='ticker')
expected = self.read_data(datapath, 'asof.csv')
assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({'key': [1, 1, 3],
'left_val': [1, 2, 3]})
df2 = pd.DataFrame({'key': [1, 2, 2],
'right_val': [1, 2, 3]})
result = merge_asof(df1, df2, on='key')
expected = pd.DataFrame({'key': [1, 1, 3],
'left_val': [1, 2, 3],
'right_val': [1, 1, 3]})
assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
allow_exact_matches='foo')
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=Timedelta('1s'))
# integer
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=1)
# incompat
with pytest.raises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=1)
# invalid
with pytest.raises(MergeError):
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=1.0)
# invalid negative
with pytest.raises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=-Timedelta('1s'))
with pytest.raises(MergeError):
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=-1)
def test_non_sorted(self):
trades = self.trades.sort_values('time', ascending=False)
quotes = self.quotes.sort_values('time', ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes,
on='time',
by='ticker')
trades = self.trades.sort_values('time')
assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes,
on='time',
by='ticker')
quotes = self.quotes.sort_values('time')
assert trades.time.is_monotonic
assert quotes.time.is_monotonic
# ok, though has dupes
merge_asof(trades, self.quotes,
on='time',
by='ticker')
def test_tolerance(self):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=Timedelta('1day'))
expected = self.tolerance
assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 7, 11],
'right_val': [1, 2, 3, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, np.nan, 11]})
result = pd.merge_asof(left, right, on='a', direction='forward',
tolerance=1)
assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 7, 11],
'right_val': [1, 2, 3, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, np.nan, 11]})
result = pd.merge_asof(left, right, on='a', direction='nearest',
tolerance=1)
assert_frame_equal(result, expected)
def test_tolerance_tz(self):
# GH 14844
left = pd.DataFrame(
{'date': pd.DatetimeIndex(start=pd.to_datetime('2016-01-02'),
freq='D', periods=5,
tz=pytz.timezone('UTC')),
'value1': np.arange(5)})
right = pd.DataFrame(
{'date': pd.DatetimeIndex(start=pd.to_datetime('2016-01-01'),
freq='D', periods=5,
tz=pytz.timezone('UTC')),
'value2': list("ABCDE")})
result = pd.merge_asof(left, right, on='date',
tolerance=pd.Timedelta('1 day'))
expected = pd.DataFrame(
{'date': pd.DatetimeIndex(start=pd.to_datetime('2016-01-02'),
freq='D', periods=5,
tz=pytz.timezone('UTC')),
'value1': np.arange(5),
'value2': list("BCDEE")})
assert_frame_equal(result, expected)
def test_index_tolerance(self):
# GH 15135
expected = self.tolerance.set_index('time')
trades = self.trades.set_index('time')
quotes = self.quotes.set_index('time')
result = pd.merge_asof(trades, quotes,
left_index=True,
right_index=True,
by='ticker',
tolerance=pd.Timedelta('1day'))
assert_frame_equal(result, expected)
def test_allow_exact_matches(self):
result = merge_asof(self.trades, self.quotes,
on='time',
by='ticker',
allow_exact_matches=False)
expected = self.allow_exact_matches
assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 7, 11],
'right_val': [1, 2, 3, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [2, 7, 11]})
result = pd.merge_asof(left, right, on='a', direction='forward',
allow_exact_matches=False)
assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 7, 11],
'right_val': [1, 2, 3, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [2, 3, 11]})
result = pd.merge_asof(left, right, on='a', direction='nearest',
allow_exact_matches=False)
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self):
result = merge_asof(self.trades, self.quotes,
on='time',
by='ticker',
tolerance=Timedelta('100ms'),
allow_exact_matches=False)
expected = self.allow_exact_matches_and_tolerance
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob']})
df2 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.000',
'2016-07-15 13:30:00.030']),
'version': [1, 2]})
result = pd.merge_asof(df1, df2, on='time')
expected = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob'],
'version': [2]})
assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on='time', allow_exact_matches=False)
expected = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob'],
'version': [1]})
assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on='time', allow_exact_matches=False,
tolerance=pd.Timedelta('10ms'))
expected = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob'],
'version': [np.nan]})
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance3(self):
# GH 13709
df1 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030',
'2016-07-15 13:30:00.030']),
'username': ['bob', 'charlie']})
df2 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.000',
'2016-07-15 13:30:00.030']),
'version': [1, 2]})
result = pd.merge_asof(df1, df2, on='time', allow_exact_matches=False,
tolerance=pd.Timedelta('10ms'))
expected = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030',
'2016-07-15 13:30:00.030']),
'username': ['bob', 'charlie'],
'version': [np.nan, np.nan]})
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_forward(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 3, 4, 6, 11],
'right_val': [1, 3, 4, 6, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [np.nan, 6, 11]})
result = pd.merge_asof(left, right, on='a', direction='forward',
allow_exact_matches=False, tolerance=1)
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 3, 4, 6, 11],
'right_val': [1, 3, 4, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [np.nan, 4, 11]})
result = pd.merge_asof(left, right, on='a', direction='nearest',
allow_exact_matches=False, tolerance=1)
assert_frame_equal(result, expected)
def test_forward_by(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10, 12, 15],
'b': ['X', 'X', 'Y', 'Z', 'Y'],
'left_val': ['a', 'b', 'c', 'd', 'e']})
right = pd.DataFrame({'a': [1, 6, 11, 15, 16],
'b': ['X', 'Z', 'Y', 'Z', 'Y'],
'right_val': [1, 6, 11, 15, 16]})
expected = pd.DataFrame({'a': [1, 5, 10, 12, 15],
'b': ['X', 'X', 'Y', 'Z', 'Y'],
'left_val': ['a', 'b', 'c', 'd', 'e'],
'right_val': [1, np.nan, 11, 15, 16]})
result = pd.merge_asof(left, right, on='a', by='b',
direction='forward')
assert_frame_equal(result, expected)
def test_nearest_by(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10, 12, 15],
'b': ['X', 'X', 'Z', 'Z', 'Y'],
'left_val': ['a', 'b', 'c', 'd', 'e']})
right = pd.DataFrame({'a': [1, 6, 11, 15, 16],
'b': ['X', 'Z', 'Z', 'Z', 'Y'],
'right_val': [1, 6, 11, 15, 16]})
expected = pd.DataFrame({'a': [1, 5, 10, 12, 15],
'b': ['X', 'X', 'Z', 'Z', 'Y'],
'left_val': ['a', 'b', 'c', 'd', 'e'],
'right_val': [1, 1, 11, 11, 16]})
result = pd.merge_asof(left, right, on='a', by='b',
direction='nearest')
assert_frame_equal(result, expected)
def test_by_int(self):
# we specialize by type, so test that this is correct
df1 = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.020',
'20160525 13:30:00.030',
'20160525 13:30:00.040',
'20160525 13:30:00.050',
'20160525 13:30:00.060']),
'key': [1, 2, 1, 3, 2],
'value1': [1.1, 1.2, 1.3, 1.4, 1.5]},
columns=['time', 'key', 'value1'])
df2 = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.015',
'20160525 13:30:00.020',
'20160525 13:30:00.025',
'20160525 13:30:00.035',
'20160525 13:30:00.040',
'20160525 13:30:00.055',
'20160525 13:30:00.060',
'20160525 13:30:00.065']),
'key': [2, 1, 1, 3, 2, 1, 2, 3],
'value2': [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8]},
columns=['time', 'key', 'value2'])
result = pd.merge_asof(df1, df2, on='time', by='key')
expected = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.020',
'20160525 13:30:00.030',
'20160525 13:30:00.040',
'20160525 13:30:00.050',
'20160525 13:30:00.060']),
'key': [1, 2, 1, 3, 2],
'value1': [1.1, 1.2, 1.3, 1.4, 1.5],
'value2': [2.2, 2.1, 2.3, 2.4, 2.7]},
columns=['time', 'key', 'value1', 'value2'])
assert_frame_equal(result, expected)
def test_on_float(self):
# mimics how to determine the minimum-price variation
df1 = pd.DataFrame({
'price': [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078],
'symbol': list("ABCDEFG")},
columns=['symbol', 'price'])
df2 = pd.DataFrame({
'price': [0.0, 1.0, 100.0],
'mpv': [0.0001, 0.01, 0.05]},
columns=['price', 'mpv'])
df1 = df1.sort_values('price').reset_index(drop=True)
result = pd.merge_asof(df1, df2, on='price')
expected = pd.DataFrame({
'symbol': list("BGACEDF"),
'price': [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90],
'mpv': [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05]},
columns=['symbol', 'price', 'mpv'])
assert_frame_equal(result, expected)
def test_on_specialized_type(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame({
"value": [5, 2, 25, 100, 78, 120, 79],
"symbol": list("ABCDEFG")},
columns=["symbol", "value"])
df1.value = dtype(df1.value)
df2 = pd.DataFrame({
"value": [0, 80, 120, 125],
"result": list("xyzw")},
columns=["value", "result"])
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value")
expected = pd.DataFrame(
{"symbol": list("BACEGDF"),
"value": [2, 5, 25, 78, 79, 100, 120],
"result": list("xxxxxyz")
}, columns=["symbol", "value", "result"])
expected.value = dtype(expected.value)
assert_frame_equal(result, expected)
def test_on_specialized_type_by_int(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame({
"value": [5, 2, 25, 100, 78, 120, 79],
"key": [1, 2, 3, 2, 3, 1, 2],
"symbol": list("ABCDEFG")},
columns=["symbol", "key", "value"])
df1.value = dtype(df1.value)
df2 = pd.DataFrame({
"value": [0, 80, 120, 125],
"key": [1, 2, 2, 3],
"result": list("xyzw")},
columns=["value", "key", "result"])
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value", by="key")
expected = pd.DataFrame({
"symbol": list("BACEGDF"),
"key": [2, 1, 3, 3, 2, 2, 1],
"value": [2, 5, 25, 78, 79, 100, 120],
"result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"]},
columns=["symbol", "key", "value", "result"])
expected.value = dtype(expected.value)
assert_frame_equal(result, expected)
def test_on_float_by_int(self):
# type specialize both "by" and "on" parameters
df1 = pd.DataFrame({
'symbol': list("AAABBBCCC"),
'exch': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'price': [3.26, 3.2599, 3.2598, 12.58, 12.59,
12.5, 378.15, 378.2, 378.25]},
columns=['symbol', 'exch', 'price'])
df2 = pd.DataFrame({
'exch': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'price': [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0],
'mpv': [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0]},
columns=['exch', 'price', 'mpv'])
df1 = df1.sort_values('price').reset_index(drop=True)
df2 = df2.sort_values('price').reset_index(drop=True)
result = pd.merge_asof(df1, df2, on='price', by='exch')
expected = pd.DataFrame({
'symbol': list("AAABBBCCC"),
'exch': [3, 2, 1, 3, 1, 2, 1, 2, 3],
'price': [3.2598, 3.2599, 3.26, 12.5, 12.58,
12.59, 378.15, 378.2, 378.25],
'mpv': [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25]},
columns=['symbol', 'exch', 'price', 'mpv'])
assert_frame_equal(result, expected)
def test_merge_datatype_error(self):
""" Tests merge datatype mismatch error """
msg = r'merge keys \[0\] object and int64, must be the same type'
left = pd.DataFrame({'left_val': [1, 5, 10],
'a': ['a', 'b', 'c']})
right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7],
'a': [1, 2, 3, 6, 7]})
with tm.assert_raises_regex(MergeError, msg):
merge_asof(left, right, on='a')
|
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Daniel Strohmeier <daniel.strohmeier@gmail.com>
#
# License: Simplified BSD
import pytest
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_array_less)
from mne.inverse_sparse.mxne_optim import (mixed_norm_solver,
tf_mixed_norm_solver,
iterative_mixed_norm_solver,
iterative_tf_mixed_norm_solver,
norm_epsilon_inf, norm_epsilon,
_Phi, _PhiT, dgap_l21l1)
from mne.time_frequency._stft import stft_norm2
def _generate_tf_data():
n, p, t = 30, 40, 64
rng = np.random.RandomState(0)
G = rng.randn(n, p)
G /= np.std(G, axis=0)[None, :]
X = np.zeros((p, t))
active_set = [0, 4]
times = np.linspace(0, 2 * np.pi, t)
X[0] = np.sin(times)
X[4] = -2 * np.sin(4 * times)
X[4, times <= np.pi / 2] = 0
X[4, times >= np.pi] = 0
M = np.dot(G, X)
M += 1 * rng.randn(*M.shape)
return M, G, active_set
def test_l21_mxne():
"""Test convergence of MxNE solver."""
n, p, t, alpha = 30, 40, 20, 1.
rng = np.random.RandomState(0)
G = rng.randn(n, p)
G /= np.std(G, axis=0)[None, :]
X = np.zeros((p, t))
X[0] = 3
X[4] = -2
M = np.dot(G, X)
args = (M, G, alpha, 1000, 1e-8)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=None,
debias=True, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_cd, active_set, _, gap_cd = mixed_norm_solver(
*args, active_set_size=None,
debias=True, solver='cd', return_gap=True)
assert_array_less(gap_cd, 1e-8)
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_bcd, active_set, E, gap_bcd = mixed_norm_solver(
M, G, alpha, maxit=1000, tol=1e-8, active_set_size=None,
debias=True, solver='bcd', return_gap=True)
assert_array_less(gap_bcd, 9.6e-9)
assert_array_equal(np.where(active_set)[0], [0, 4])
assert_allclose(X_hat_prox, X_hat_cd, rtol=1e-2)
assert_allclose(X_hat_prox, X_hat_bcd, rtol=1e-2)
assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_cd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 4])
assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=2, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=2, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
# suppress a coordinate-descent warning here
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=2, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=5, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=5, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = mixed_norm_solver(
*args, active_set_size=2, debias=True, n_orient=5, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
assert_array_equal(X_hat_bcd, X_hat_cd)
assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
def test_tf_mxne():
"""Test convergence of TF-MxNE solver."""
alpha_space = 10.
alpha_time = 5.
M, G, active_set = _generate_tf_data()
with pytest.warns(None): # CD
X_hat_tf, active_set_hat_tf, E, gap_tfmxne = tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, maxit=200, tol=1e-8, verbose=True,
n_orient=1, tstep=4, wsize=32, return_gap=True)
assert_array_less(gap_tfmxne, 1e-8)
assert_array_equal(np.where(active_set_hat_tf)[0], active_set)
def test_norm_epsilon():
"""Test computation of espilon norm on TF coefficients."""
tstep = np.array([2])
wsize = np.array([4])
n_times = 10
n_steps = np.ceil(n_times / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
Y = np.zeros(n_steps * n_freqs)
l1_ratio = 0.03
assert_allclose(norm_epsilon(Y, l1_ratio, phi), 0.)
Y[0] = 2.
assert_allclose(norm_epsilon(Y, l1_ratio, phi), np.max(Y))
l1_ratio = 1.
assert_allclose(norm_epsilon(Y, l1_ratio, phi), np.max(Y))
# dummy value without random:
Y = np.arange(n_steps * n_freqs).reshape(-1, )
l1_ratio = 0.0
assert_allclose(norm_epsilon(Y, l1_ratio, phi) ** 2,
stft_norm2(Y.reshape(-1, n_freqs[0], n_steps[0])))
l1_ratio = 0.03
# test that vanilla epsilon norm = weights equal to 1
w_time = np.ones(n_coefs[0])
Y = np.abs(np.random.randn(n_coefs[0]))
assert_allclose(norm_epsilon(Y, l1_ratio, phi),
norm_epsilon(Y, l1_ratio, phi, w_time=w_time))
# scaling w_time and w_space by the same amount should divide
# epsilon norm by the same amount
Y = np.arange(n_coefs) + 1
mult = 2.
assert_allclose(
norm_epsilon(Y, l1_ratio, phi, w_space=1,
w_time=np.ones(n_coefs)) / mult,
norm_epsilon(Y, l1_ratio, phi, w_space=mult,
w_time=mult * np.ones(n_coefs)))
@pytest.mark.slowtest # slow-ish on Travis OSX
@pytest.mark.timeout(60) # ~30 sec on Travis OSX and Linux OpenBLAS
def test_dgapl21l1():
"""Test duality gap for L21 + L1 regularization."""
n_orient = 2
M, G, active_set = _generate_tf_data()
n_times = M.shape[1]
n_sources = G.shape[1]
tstep, wsize = np.array([4, 2]), np.array([64, 16])
n_steps = np.ceil(n_times / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
phiT = _PhiT(tstep, n_freqs, n_steps, n_times)
for l1_ratio in [0.05, 0.1]:
alpha_max = norm_epsilon_inf(G, M, phi, l1_ratio, n_orient)
alpha_space = (1. - l1_ratio) * alpha_max
alpha_time = l1_ratio * alpha_max
Z = np.zeros([n_sources, phi.n_coefs.sum()])
# for alpha = alpha_max, Z = 0 is the solution so the dgap is 0
gap = dgap_l21l1(M, G, Z, np.ones(n_sources, dtype=bool),
alpha_space, alpha_time, phi, phiT,
n_orient, -np.inf)[0]
assert_allclose(0., gap)
# check that solution for alpha smaller than alpha_max is non 0:
X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver(
M, G, alpha_space / 1.01, alpha_time / 1.01, maxit=200, tol=1e-8,
verbose=True, debias=False, n_orient=n_orient, tstep=tstep,
wsize=wsize, return_gap=True)
# allow possible small numerical errors (negative gap)
assert_array_less(-1e-10, gap)
assert_array_less(gap, 1e-8)
assert_array_less(1, len(active_set_hat_tf))
X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver(
M, G, alpha_space / 5., alpha_time / 5., maxit=200, tol=1e-8,
verbose=True, debias=False, n_orient=n_orient, tstep=tstep,
wsize=wsize, return_gap=True)
assert_array_less(-1e-10, gap)
assert_array_less(gap, 1e-8)
assert_array_less(1, len(active_set_hat_tf))
def test_tf_mxne_vs_mxne():
"""Test equivalence of TF-MxNE (with alpha_time=0) and MxNE."""
alpha_space = 60.
alpha_time = 0.
M, G, active_set = _generate_tf_data()
X_hat_tf, active_set_hat_tf, E = tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, maxit=200, tol=1e-8,
verbose=True, debias=False, n_orient=1, tstep=4, wsize=32)
# Also run L21 and check that we get the same
X_hat_l21, _, _ = mixed_norm_solver(
M, G, alpha_space, maxit=200, tol=1e-8, verbose=False, n_orient=1,
active_set_size=None, debias=False)
assert_allclose(X_hat_tf, X_hat_l21, rtol=1e-1)
@pytest.mark.slowtest # slow-ish on Travis OSX
def test_iterative_reweighted_mxne():
"""Test convergence of irMxNE solver."""
n, p, t, alpha = 30, 40, 20, 1
rng = np.random.RandomState(0)
G = rng.randn(n, p)
G /= np.std(G, axis=0)[None, :]
X = np.zeros((p, t))
X[0] = 3
X[4] = -2
M = np.dot(G, X)
with pytest.warns(None): # CD
X_hat_l21, _, _ = mixed_norm_solver(
M, G, alpha, maxit=1000, tol=1e-8, verbose=False, n_orient=1,
active_set_size=None, debias=False, solver='bcd')
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None,
debias=False, solver='bcd')
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None,
debias=False, solver='prox')
assert_allclose(X_hat_bcd, X_hat_l21, rtol=1e-3)
assert_allclose(X_hat_prox, X_hat_l21, rtol=1e-3)
with pytest.warns(None): # CD
X_hat_prox, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None,
debias=True, solver='prox')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
debias=True, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 4])
with pytest.warns(None): # CD
X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None,
debias=True, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 4])
assert_array_almost_equal(X_hat_prox, X_hat_cd, 5)
assert_array_almost_equal(X_hat_bcd, X_hat_cd, 5)
with pytest.warns(None): # CD
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
debias=True, n_orient=2, solver='bcd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
# suppress a coordinate-descent warning here
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
debias=True, n_orient=2, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
assert_array_equal(X_hat_bcd, X_hat_cd, 5)
X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, debias=True,
n_orient=5)
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
with pytest.warns(RuntimeWarning, match='descent'):
X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
debias=True, n_orient=5, solver='cd')
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
assert_array_equal(X_hat_bcd, X_hat_cd, 5)
@pytest.mark.slowtest
def test_iterative_reweighted_tfmxne():
"""Test convergence of irTF-MxNE solver."""
M, G, true_active_set = _generate_tf_data()
alpha_space = 38.
alpha_time = 0.5
tstep, wsize = [4, 2], [64, 16]
X_hat_tf, _, _ = tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, maxit=1000, tol=1e-4, wsize=wsize,
tstep=tstep, verbose=False, n_orient=1, debias=False)
X_hat_bcd, active_set, _ = iterative_tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, 1, wsize=wsize, tstep=tstep,
maxit=1000, tol=1e-4, debias=False, verbose=False)
assert_allclose(X_hat_tf, X_hat_bcd, rtol=1e-3)
assert_array_equal(np.where(active_set)[0], true_active_set)
alpha_space = 50.
X_hat_bcd, active_set, _ = iterative_tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, 3, wsize=wsize, tstep=tstep,
n_orient=5, maxit=1000, tol=1e-4, debias=False, verbose=False)
assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
alpha_space = 40.
X_hat_bcd, active_set, _ = iterative_tf_mixed_norm_solver(
M, G, alpha_space, alpha_time, 2, wsize=wsize, tstep=tstep,
n_orient=2, maxit=1000, tol=1e-4, debias=False, verbose=False)
assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
|
|
from contextlib import contextmanager
import numpy as np
from matplotlib.colors import ColorConverter
from vispy import scene
from vispy.scene.visuals import Arrow
class MultiColorScatter(scene.visuals.Markers):
"""
This is a helper class to make it easier to show multiple markers at
specific positions and control exactly which marker should be on top of
which.
"""
def __init__(self, *args, **kwargs):
self.layers = {}
self._combined_data = None
self._skip_update = False
self._error_vector_widget = None
super(MultiColorScatter, self).__init__(*args, **kwargs)
@contextmanager
def delay_update(self):
self._skip_update = True
yield
self._skip_update = False
def allocate(self, label):
if label in self.layers:
raise ValueError("Layer {0} already exists".format(label))
else:
self.layers[label] = {'data': None,
'mask': None,
'errors': None,
'vectors': None,
'draw_arrows': False,
'color': np.asarray((1., 1., 1.)),
'alpha': 1.,
'zorder': lambda: 0,
'size': 10,
'visible': True}
def deallocate(self, label):
self.layers.pop(label)
self._update()
def set_data_values(self, label, x, y, z):
"""
Set the position of the datapoints
"""
# TODO: avoid re-allocating an array every time
self.layers[label]['data'] = np.array([x, y, z]).transpose()
self._update()
def set_visible(self, label, visible):
self.layers[label]['visible'] = visible
self._update()
def set_mask(self, label, mask):
self.layers[label]['mask'] = mask
self._update()
def set_errors(self, label, error_lines):
self.layers[label]['errors'] = error_lines
self._update()
def set_vectors(self, label, vectors):
self.layers[label]['vectors'] = vectors
self._update()
def set_draw_arrows(self, label, draw_arrows):
self.layers[label]['draw_arrows'] = draw_arrows
self._update()
def set_size(self, label, size):
if not np.isscalar(size) and size.ndim > 1:
raise Exception("size should be a 1-d array")
self.layers[label]['size'] = size
self._update()
def set_color(self, label, rgb):
if isinstance(rgb, str):
rgb = ColorConverter().to_rgb(rgb)
self.layers[label]['color'] = np.asarray(rgb)
self._update()
def set_alpha(self, label, alpha):
self.layers[label]['alpha'] = alpha
self._update()
def set_zorder(self, label, zorder):
self.layers[label]['zorder'] = zorder
self._update()
def update_line_width(self, width):
if self._error_vector_widget:
self._error_vector_widget.set_data(width=width)
def _update(self):
if self._skip_update:
return
data = []
colors = []
sizes = []
lines = []
line_colors = []
arrows = []
arrow_colors = []
for label in sorted(self.layers, key=lambda x: self.layers[x]['zorder']()):
layer = self.layers[label]
if not layer['visible'] or layer['data'] is None:
continue
input_points = layer['data'].shape[0]
if layer['mask'] is None:
n_points = input_points
else:
n_points = np.sum(layer['mask'])
if input_points > 0 and n_points > 0:
# Data
if layer['mask'] is None:
data.append(layer['data'])
else:
data.append(layer['data'][layer['mask'], :])
# Colors
if layer['color'].ndim == 1:
rgba = np.hstack([layer['color'], 1])
rgba = np.repeat(rgba, n_points).reshape(4, -1).transpose()
else:
if layer['mask'] is None:
rgba = layer['color'].copy()
else:
rgba = layer['color'][layer['mask']]
rgba[:, 3] *= layer['alpha']
colors.append(rgba)
# Sizes
if np.isscalar(layer['size']):
size = np.repeat(layer['size'], n_points)
else:
if layer['mask'] is None:
size = layer['size']
else:
size = layer['size'][layer['mask']]
sizes.append(size)
# Error bar and colors
if layer['errors'] is not None:
for error_set in layer['errors']:
if layer['mask'] is None:
out = error_set
else:
out = error_set[layer['mask']]
out = out.reshape((-1, 3))
lines.append(out)
line_colors.append(np.repeat(rgba, 2, axis=0))
if layer['vectors'] is not None:
if layer['mask'] is None:
out = layer['vectors']
else:
out = layer['vectors'][layer['mask']]
lines.append(out.reshape((-1, 3)))
line_colors.append(np.repeat(rgba, 2, axis=0))
if layer['draw_arrows']:
arrows.append(out)
arrow_colors.append(rgba)
if len(data) == 0:
self.visible = False
return
else:
self.visible = True
data = np.vstack(data)
colors = np.vstack(colors)
sizes = np.hstack(sizes)
self.set_data(data, edge_color=colors, face_color=colors, size=sizes)
if len(lines) == 0:
if self._error_vector_widget is not None:
self._error_vector_widget.visible = False
return
else:
if self._error_vector_widget is None:
widget = Arrow(parent=self, connect="segments")
widget.set_gl_state(depth_test=False, blend=True,
blend_func=('src_alpha', 'one_minus_src_alpha'))
self._error_vector_widget = widget
self._error_vector_widget.visible = True
lines = np.vstack(lines)
line_colors = np.vstack(line_colors)
self._error_vector_widget.set_data(pos=lines, color=line_colors)
arrows = np.vstack(arrows) if len(arrows) > 0 else np.array([])
arrow_colors = np.vstack(arrow_colors) if len(arrow_colors) else np.array([])
self._error_vector_widget.set_data(arrows=arrows)
self._error_vector_widget.arrow_color = arrow_colors
def draw(self, *args, **kwargs):
if len(self.layers) == 0:
return
else:
try:
super(MultiColorScatter, self).draw(*args, **kwargs)
except Exception:
pass
if __name__ == "__main__": # pragma: nocover
from vispy import app, scene
canvas = scene.SceneCanvas(keys='interactive')
view = canvas.central_widget.add_view()
view.camera = scene.TurntableCamera(up='z', fov=60)
x = np.random.random(20)
y = np.random.random(20)
z = np.random.random(20)
multi_scat = MultiColorScatter(parent=view.scene)
multi_scat.allocate('data')
multi_scat.set_zorder('data', lambda: 0)
multi_scat.set_data_values('data', x, y, z)
multi_scat.allocate('subset1')
multi_scat.set_mask('subset1', np.random.random(20) > 0.5)
multi_scat.set_color('subset1', 'red')
multi_scat.set_zorder('subset1', lambda: 1)
multi_scat.allocate('subset2')
multi_scat.set_mask('subset2', np.random.random(20) > 0.5)
multi_scat.set_color('subset2', 'green')
multi_scat.set_zorder('subset2', lambda: 2)
multi_scat.set_alpha('subset2', 0.5)
multi_scat.set_size('subset2', 20)
axis = scene.visuals.XYZAxis(parent=view.scene)
canvas.show()
app.run()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Multinomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
_multinomial_prob_note = """
For each batch of counts `[n_1,...,n_k]`, `P[counts]` is the probability
that after sampling `n` draws from this Multinomial distribution, the
number of draws falling in class `j` is `n_j`. Note that different
sequences of draws can result in the same counts, thus the probability
includes a combinatorial coefficient.
Note that input "counts" must be a non-negative tensor with dtype `dtype`
and whose shape can be broadcast with `self.p` and `self.n`. For fixed
leading dimensions, the last dimension represents counts for the
corresponding Multinomial distribution in `self.p`. `counts` is only legal
if it sums up to `n` and its components are equal to integer values.
"""
class Multinomial(distribution.Distribution):
"""Multinomial distribution.
This distribution is parameterized by a vector `p` of probability
parameters for `k` classes and `n`, the counts per each class..
#### Mathematical details
The Multinomial is a distribution over k-class count data, meaning
for each k-tuple of non-negative integer `counts = [n_1,...,n_k]`, we have a
probability of these draws being made from the distribution. The distribution
has hyperparameters `p = (p_1,...,p_k)`, and probability mass
function (pmf):
```pmf(counts) = n! / (n_1!...n_k!) * (p_1)^n_1*(p_2)^n_2*...(p_k)^n_k```
where above `n = sum_j n_j`, `n!` is `n` factorial.
#### Examples
Create a 3-class distribution, with the 3rd class is most likely to be drawn,
using logits..
```python
logits = [-50., -43, 0]
dist = Multinomial(n=4., logits=logits)
```
Create a 3-class distribution, with the 3rd class is most likely to be drawn.
```python
p = [.2, .3, .5]
dist = Multinomial(n=4., p=p)
```
The distribution functions can be evaluated on counts.
```python
# counts same shape as p.
counts = [1., 0, 3]
dist.prob(counts) # Shape []
# p will be broadcast to [[.2, .3, .5], [.2, .3, .5]] to match counts.
counts = [[1., 2, 1], [2, 2, 0]]
dist.prob(counts) # Shape [2]
# p will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7]
```
Create a 2-batch of 3-class distributions.
```python
p = [[.1, .2, .7], [.3, .3, .4]] # Shape [2, 3]
dist = Multinomial(n=[4., 5], p=p)
counts = [[2., 1, 1], [3, 1, 1]]
dist.prob(counts) # Shape [2]
```
"""
def __init__(self,
n,
logits=None,
p=None,
validate_args=False,
allow_nan_stats=True,
name="Multinomial"):
"""Initialize a batch of Multinomial distributions.
Args:
n: Non-negative floating point tensor with shape broadcastable to
`[N1,..., Nm]` with `m >= 0`. Defines this as a batch of
`N1 x ... x Nm` different Multinomial distributions. Its components
should be equal to integer values.
logits: Floating point tensor representing the log-odds of a
positive event with shape broadcastable to `[N1,..., Nm, k], m >= 0`,
and the same dtype as `n`. Defines this as a batch of `N1 x ... x Nm`
different `k` class Multinomial distributions. Only one of `logits` or
`p` should be passed in.
p: Positive floating point tensor with shape broadcastable to
`[N1,..., Nm, k]` `m >= 0` and same dtype as `n`. Defines this as
a batch of `N1 x ... x Nm` different `k` class Multinomial
distributions. `p`'s components in the last portion of its shape should
sum up to 1. Only one of `logits` or `p` should be passed in.
validate_args: `Boolean`, default `False`. Whether to assert valid
values for parameters `n` and `p`, and `x` in `prob` and `log_prob`.
If `False`, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prefix Ops created by this distribution class.
Examples:
```python
# Define 1-batch of 2-class multinomial distribution,
# also known as a Binomial distribution.
dist = Multinomial(n=2., p=[.1, .9])
# Define a 2-batch of 3-class distributions.
dist = Multinomial(n=[4., 5], p=[[.1, .3, .6], [.4, .05, .55]])
```
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[n, p]) as ns:
with ops.control_dependencies([
check_ops.assert_non_negative(
n, message="n has negative components."),
distribution_util.assert_integer_form(
n, message="n has non-integer components.")
] if validate_args else []):
self._logits, self._p = distribution_util.get_logits_and_prob(
name=name, logits=logits, p=p, validate_args=validate_args,
multidimensional=True)
self._n = array_ops.identity(n, name="convert_n")
self._mean_val = array_ops.expand_dims(n, -1) * self._p
self._broadcast_shape = math_ops.reduce_sum(
self._mean_val, reduction_indices=[-1], keep_dims=False)
super(Multinomial, self).__init__(
dtype=self._p.dtype,
is_continuous=False,
is_reparameterized=False,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._p, self._n, self._mean_val,
self._logits, self._broadcast_shape],
name=ns)
@property
def n(self):
"""Number of trials."""
return self._n
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def p(self):
"""Vector of probabilities summing to one.
Each element is the probability of drawing that coordinate."""
return self._p
def _batch_shape(self):
return array_ops.shape(self._broadcast_shape)
def _get_batch_shape(self):
return self._broadcast_shape.get_shape()
def _event_shape(self):
return array_ops.gather(array_ops.shape(self._mean_val),
[array_ops.rank(self._mean_val) - 1])
def _get_event_shape(self):
return self._mean_val.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.n, dtype=dtypes.int32)
if self.n.get_shape().ndims is not None:
if self.n.get_shape().ndims != 0:
raise NotImplementedError(
"Sample only supported for scalar number of draws.")
elif self.validate_args:
is_scalar = check_ops.assert_rank(
n_draws, 0,
message="Sample only supported for scalar number of draws.")
n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
k = self.event_shape()[0]
# Flatten batch dims so logits has shape [B, k],
# where B = reduce_prod(self.batch_shape()).
logits = array_ops.reshape(self.logits, [-1, k])
draws = random_ops.multinomial(logits=logits,
num_samples=n * n_draws,
seed=seed)
draws = array_ops.reshape(draws, shape=[-1, n, n_draws])
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
reduction_indices=-2) # shape: [B, n, k]
x = array_ops.transpose(x, perm=[1, 0, 2])
final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0)
return array_ops.reshape(x, final_shape)
@distribution_util.AppendDocstring(_multinomial_prob_note)
def _log_prob(self, counts):
counts = self._assert_valid_sample(counts)
log_unnormalized_prob = math_ops.reduce_sum(
counts * math_ops.log(self.p),
reduction_indices=[-1])
log_normalizer = -distribution_util.log_combinations(self.n, counts)
return log_unnormalized_prob - log_normalizer
@distribution_util.AppendDocstring(_multinomial_prob_note)
def _prob(self, counts):
return math_ops.exp(self._log_prob(counts))
def _mean(self):
return array_ops.identity(self._mean_val)
def _variance(self):
p = self.p * array_ops.expand_dims(array_ops.ones_like(self.n), -1)
outer_prod = math_ops.matmul(
array_ops.expand_dims(self._mean_val, -1), array_ops.expand_dims(p, -2))
return array_ops.matrix_set_diag(-outer_prod,
self._mean_val - self._mean_val * p)
def _assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args: return counts
return control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
counts, message="counts has negative components."),
check_ops.assert_equal(
self.n, math_ops.reduce_sum(counts, reduction_indices=[-1]),
message="counts do not sum to n."),
distribution_util.assert_integer_form(
counts, message="counts have non-integer components.")
], counts)
|
|
#! /usr/bin/env python
# Copyright (c) 2014, OpenCog Foundation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the OpenCog Foundation nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
import tf
from collections import deque
import re
import math
class RingBuffer(deque):
# Ring buffer implementation using deque, written by vegaseat
# Sourced from: http://www.daniweb.com/forums/post202523-3.html
"""
inherits deque, pops the oldest data to make room
for the newest data when size is reached
"""
def __init__(self, size):
deque.__init__(self)
self.size = size
def full_append(self, item):
deque.append(self, item)
# full, pop the oldest item, left most item
self.popleft()
def append(self, item):
deque.append(self, item)
# max size reached, append becomes full_append
if len(self) == self.size:
self.append = self.full_append
def get(self):
"""returns a list of size items (newest items)"""
return list(self)
class Smoother():
def __init__(self):
node_name = rospy.get_name()
#print "node_name: " + str(node_name)
rate = rospy.get_param("~rate", 14.429)
self.history_size = rospy.get_param("~history_size", 5)
self.thresh = rospy.get_param("~thresh", 0.05)
try:
self.input_topic = rospy.get_param("~input_topic")
except:
err = 'Please specify an input topic'
rospy.logerr('Please specify an input topic')
raise Exception(err)
try:
self.output_topic = rospy.get_param("~output_topic")
except:
err = 'Please specify an output topic'
rospy.logerr(err)
raise Exception(err)
self.tf = tf.TransformListener()
self.br = tf.TransformBroadcaster()
self.rate = rospy.Rate(rate)
self.history = RingBuffer(self.history_size)
self.parent = self.get_parent(self.input_topic)
#print "PARENT: " + str(self.parent)
def run(self):
self.buffer()
self.median_rot = self.get_median_rot()
while not rospy.is_shutdown():
(trans, rot) = self.get_transform(self.parent, self.input_topic)
# print "xr: " + str(rot[0]) + " yr: " + str(rot[1]) + " zr: " + str(rot[2]) + " ?:" + str(rot[3])
self.history.append((trans, rot))
(m_trans, m_rot) = self.mean()
dev_rot = self.dev_from_cur_rot(self.median_rot)
# print "Rot: " + str(self.median_rot)
# print "Dev: " + str(dev_rot)
if dev_rot[0] > self.thresh or dev_rot[1] > self.thresh or dev_rot[2] > self.thresh or dev_rot[3] > self.thresh:
self.median_rot = self.get_median_rot()
self.br.sendTransform(m_trans, self.median_rot, rospy.Time.now(), self.output_topic, self.parent)
# self.br.sendTransform((10.0, 0.0, 0.0), (0.0,0.0,0.0,1.0), rospy.Time.now(), "carrot" , self.new_name)
self.rate.sleep()
def get_frames(self):
while True:
self.rate.sleep()
frames = self.tf.allFramesAsString()
if frames != "":
break
return frames
def dev_from_cur_rot(self, cur_rot):
rot = [0.0, 0.0, 0.0, 0.0]
for (t1, r1) in self.history:
rot[0] += math.pow(r1[0] - cur_rot[0], 2.0)
rot[1] += math.pow(r1[1] - cur_rot[1], 2.0)
rot[2] += math.pow(r1[2] - cur_rot[2], 2.0)
rot[3] += math.pow(r1[3] - cur_rot[3], 2.0)
length = len(self.history)
rot[0] /= (length - 1)
rot[1] /= (length - 1)
rot[2] /= (length - 1)
rot[3] /= (length - 1)
rot[0] = math.sqrt(rot[0])
rot[1] = math.sqrt(rot[1])
rot[2] = math.sqrt(rot[2])
rot[3] = math.sqrt(rot[3])
return (rot[0], rot[1], rot[2], rot[3])
def buffer(self):
i = 0
while not rospy.is_shutdown():
if i < self.history_size:
(trans, rot) = self.get_transform(self.parent, self.input_topic)
self.history.append((trans, rot))
i += 1
else:
break
self.rate.sleep()
def get_median_rot(self):
x = []
y = []
z = []
w = []
middle = int(len(self.history) / 2)
#print "middle: " + str(middle)
for (trans, rot) in self.history:
x.append(rot[0])
y.append(rot[1])
z.append(rot[2])
w.append(rot[3])
x.sort()
y.sort()
z.sort()
w.sort()
return (x[middle], y[middle], z[middle], w[middle])
def mean(self):
av_trans = [0.0, 0.0, 0.0]
av_rot = [0.0, 0.0, 0.0, 0.0]
num_points = float(len(self.history))
# w = 1.0
for (trans, rot) in self.history:
av_trans[0] += trans[0]
av_trans[1] += trans[1]
av_trans[2] += trans[2]
av_rot[0] += rot[0]
av_rot[1] += rot[1]
av_rot[2] += rot[2]
av_rot[3] += rot[3]
av_trans[0] /= num_points
av_trans[1] /= num_points
av_trans[2] /= num_points
av_rot[0] /= num_points
av_rot[1] /= num_points
av_rot[2] /= num_points
av_rot[3] /= num_points
# br.sendTransform((0.0, 0.0, 0.127), (0.0, 0.0, 0.0, 1.0), rospy.Time.now(), "gaze_base", "torso")
return ((av_trans[0], av_trans[1], av_trans[2]), (av_rot[0], av_rot[1], av_rot[2], av_rot[3]))
def get_parent(self, topic):
prefix = "Frame " + topic.replace("/", "") + " exists with parent "
#print "Prefix: " + prefix
while not rospy.is_shutdown():
frames = self.get_frames()
#print "frames: " + frames
hierarchy_info = re.search(prefix + "[A-Za-z_]+", frames)
if hierarchy_info is not None:
regex_result = hierarchy_info.group(0)
break
else:
print "Looking for parent of topic:'" + topic + "'"
parent = regex_result.replace(prefix, "")
print "Parent of " + topic + " is: '" + str(parent) + "'"
return parent
def get_transform(self, origin, target):
try:
try:
self.tf.waitForTransform(origin, target, rospy.Time(), rospy.Duration(5.0))
(trans, rot) = self.tf.lookupTransform(origin, target, rospy.Time())
return (trans, rot)
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
rospy.loginfo("tf Failure")
except tf.Exception:
print "Couldn't transform"
if __name__ == '__main__':
rospy.init_node("ar_tag_smoother")
smoother = Smoother()
smoother.run()
rospy.spin()
|
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import netaddr
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
from neutron.common import constants
from neutron.common import exceptions as n_exc
LOG = logging.getLogger(__name__)
ATTR_NOT_SPECIFIED = object()
# Defining a constant to avoid repeating string literal in several modules
SHARED = 'shared'
# Used by range check to indicate no limit for a bound.
UNLIMITED = None
# TODO(watanabe.isao): A fix like in neutron/db/models_v2.py needs to be
# done in other db modules, to reuse the following constants.
# Common definitions for maximum string field length
NAME_MAX_LEN = 255
TENANT_ID_MAX_LEN = 255
DESCRIPTION_MAX_LEN = 255
DEVICE_ID_MAX_LEN = 255
DEVICE_OWNER_MAX_LEN = 255
def _verify_dict_keys(expected_keys, target_dict, strict=True):
"""Allows to verify keys in a dictionary.
:param expected_keys: A list of keys expected to be present.
:param target_dict: The dictionary which should be verified.
:param strict: Specifies whether additional keys are allowed to be present.
:return: True, if keys in the dictionary correspond to the specification.
"""
if not isinstance(target_dict, dict):
msg = (_("Invalid input. '%(target_dict)s' must be a dictionary "
"with keys: %(expected_keys)s") %
{'target_dict': target_dict, 'expected_keys': expected_keys})
LOG.debug(msg)
return msg
expected_keys = set(expected_keys)
provided_keys = set(target_dict.keys())
predicate = expected_keys.__eq__ if strict else expected_keys.issubset
if not predicate(provided_keys):
msg = (_("Validation of dictionary's keys failed. "
"Expected keys: %(expected_keys)s "
"Provided keys: %(provided_keys)s") %
{'expected_keys': expected_keys,
'provided_keys': provided_keys})
LOG.debug(msg)
return msg
def is_attr_set(attribute):
return not (attribute is None or attribute is ATTR_NOT_SPECIFIED)
def _validate_values(data, valid_values=None):
if data not in valid_values:
msg = (_("'%(data)s' is not in %(valid_values)s") %
{'data': data, 'valid_values': valid_values})
LOG.debug(msg)
return msg
def _validate_not_empty_string_or_none(data, max_len=None):
if data is not None:
return _validate_not_empty_string(data, max_len=max_len)
def _validate_not_empty_string(data, max_len=None):
msg = _validate_string(data, max_len=max_len)
if msg:
return msg
if not data.strip():
msg = _("'%s' Blank strings are not permitted") % data
LOG.debug(msg)
return msg
def _validate_string_or_none(data, max_len=None):
if data is not None:
return _validate_string(data, max_len=max_len)
def _validate_string(data, max_len=None):
if not isinstance(data, six.string_types):
msg = _("'%s' is not a valid string") % data
LOG.debug(msg)
return msg
if max_len is not None and len(data) > max_len:
msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") %
{'data': data, 'max_len': max_len})
LOG.debug(msg)
return msg
def _validate_boolean(data, valid_values=None):
try:
convert_to_boolean(data)
except n_exc.InvalidInput:
msg = _("'%s' is not a valid boolean value") % data
LOG.debug(msg)
return msg
def _validate_range(data, valid_values=None):
"""Check that integer value is within a range provided.
Test is inclusive. Allows either limit to be ignored, to allow
checking ranges where only the lower or upper limit matter.
It is expected that the limits provided are valid integers or
the value None.
"""
min_value = valid_values[0]
max_value = valid_values[1]
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if min_value is not UNLIMITED and data < min_value:
msg = _("'%(data)s' is too small - must be at least "
"'%(limit)d'") % {'data': data, 'limit': min_value}
LOG.debug(msg)
return msg
if max_value is not UNLIMITED and data > max_value:
msg = _("'%(data)s' is too large - must be no larger than "
"'%(limit)d'") % {'data': data, 'limit': max_value}
LOG.debug(msg)
return msg
def _validate_no_whitespace(data):
"""Validates that input has no whitespace."""
if re.search(r'\s', data):
msg = _("'%s' contains whitespace") % data
LOG.debug(msg)
raise n_exc.InvalidInput(error_message=msg)
return data
def _validate_mac_address(data, valid_values=None):
try:
valid_mac = netaddr.valid_mac(_validate_no_whitespace(data))
except Exception:
valid_mac = False
# TODO(arosen): The code in this file should be refactored
# so it catches the correct exceptions. _validate_no_whitespace
# raises AttributeError if data is None.
if not valid_mac:
msg = _("'%s' is not a valid MAC address") % data
LOG.debug(msg)
return msg
def _validate_mac_address_or_none(data, valid_values=None):
if data is not None:
return _validate_mac_address(data, valid_values)
def _validate_ip_address(data, valid_values=None):
try:
netaddr.IPAddress(_validate_no_whitespace(data))
# The followings are quick checks for IPv6 (has ':') and
# IPv4. (has 3 periods like 'xx.xx.xx.xx')
# NOTE(yamamoto): netaddr uses libraries provided by the underlying
# platform to convert addresses. For example, inet_aton(3).
# Some platforms, including NetBSD and OS X, have inet_aton
# implementation which accepts more varying forms of addresses than
# we want to accept here. The following check is to reject such
# addresses. For Example:
# >>> netaddr.IPAddress('1' * 59)
# IPAddress('199.28.113.199')
# >>> netaddr.IPAddress(str(int('1' * 59) & 0xffffffff))
# IPAddress('199.28.113.199')
# >>>
if ':' not in data and data.count('.') != 3:
raise ValueError()
except Exception:
msg = _("'%s' is not a valid IP address") % data
LOG.debug(msg)
return msg
def _validate_ip_pools(data, valid_values=None):
"""Validate that start and end IP addresses are present.
In addition to this the IP addresses will also be validated
"""
if not isinstance(data, list):
msg = _("Invalid data format for IP pool: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['start', 'end']
for ip_pool in data:
msg = _verify_dict_keys(expected_keys, ip_pool)
if msg:
return msg
for k in expected_keys:
msg = _validate_ip_address(ip_pool[k])
if msg:
return msg
def _validate_fixed_ips(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for fixed IP: '%s'") % data
LOG.debug(msg)
return msg
ips = []
for fixed_ip in data:
if not isinstance(fixed_ip, dict):
msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip
LOG.debug(msg)
return msg
if 'ip_address' in fixed_ip:
# Ensure that duplicate entries are not set - just checking IP
# suffices. Duplicate subnet_id's are legitimate.
fixed_ip_address = fixed_ip['ip_address']
if fixed_ip_address in ips:
msg = _("Duplicate IP address '%s'") % fixed_ip_address
LOG.debug(msg)
else:
msg = _validate_ip_address(fixed_ip_address)
if msg:
return msg
ips.append(fixed_ip_address)
if 'subnet_id' in fixed_ip:
msg = _validate_uuid(fixed_ip['subnet_id'])
if msg:
return msg
def _validate_nameservers(data, valid_values=None):
if not hasattr(data, '__iter__'):
msg = _("Invalid data format for nameserver: '%s'") % data
LOG.debug(msg)
return msg
hosts = []
for host in data:
# This must be an IP address only
msg = _validate_ip_address(host)
if msg:
msg = _("'%(host)s' is not a valid nameserver. %(msg)s") % {
'host': host, 'msg': msg}
LOG.debug(msg)
return msg
if host in hosts:
msg = _("Duplicate nameserver '%s'") % host
LOG.debug(msg)
return msg
hosts.append(host)
def _validate_hostroutes(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for hostroute: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['destination', 'nexthop']
hostroutes = []
for hostroute in data:
msg = _verify_dict_keys(expected_keys, hostroute)
if msg:
return msg
msg = _validate_subnet(hostroute['destination'])
if msg:
return msg
msg = _validate_ip_address(hostroute['nexthop'])
if msg:
return msg
if hostroute in hostroutes:
msg = _("Duplicate hostroute '%s'") % hostroute
LOG.debug(msg)
return msg
hostroutes.append(hostroute)
def _validate_ip_address_or_none(data, valid_values=None):
if data is not None:
return _validate_ip_address(data, valid_values)
def _validate_subnet(data, valid_values=None):
msg = None
try:
net = netaddr.IPNetwork(_validate_no_whitespace(data))
if '/' not in data:
msg = _("'%(data)s' isn't a recognized IP subnet cidr,"
" '%(cidr)s' is recommended") % {"data": data,
"cidr": net.cidr}
else:
return
except Exception:
msg = _("'%s' is not a valid IP subnet") % data
if msg:
LOG.debug(msg)
return msg
def _validate_subnet_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
for item in data:
msg = _validate_subnet(item)
if msg:
return msg
def _validate_subnet_or_none(data, valid_values=None):
if data is not None:
return _validate_subnet(data, valid_values)
def _validate_regex(data, valid_values=None):
try:
if re.match(valid_values, data):
return
except TypeError:
pass
msg = _("'%s' is not a valid input") % data
LOG.debug(msg)
return msg
def _validate_regex_or_none(data, valid_values=None):
if data is not None:
return _validate_regex(data, valid_values)
def _validate_uuid(data, valid_values=None):
if not uuidutils.is_uuid_like(data):
msg = _("'%s' is not a valid UUID") % data
LOG.debug(msg)
return msg
def _validate_uuid_or_none(data, valid_values=None):
if data is not None:
return _validate_uuid(data)
def _validate_uuid_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
for item in data:
msg = _validate_uuid(item)
if msg:
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
def _validate_dict_item(key, key_validator, data):
# Find conversion function, if any, and apply it
conv_func = key_validator.get('convert_to')
if conv_func:
data[key] = conv_func(data.get(key))
# Find validator function
# TODO(salv-orlando): Structure of dict attributes should be improved
# to avoid iterating over items
val_func = val_params = None
for (k, v) in six.iteritems(key_validator):
if k.startswith('type:'):
# ask forgiveness, not permission
try:
val_func = validators[k]
except KeyError:
msg = _("Validator '%s' does not exist.") % k
LOG.debug(msg)
return msg
val_params = v
break
# Process validation
if val_func:
return val_func(data.get(key), val_params)
def _validate_dict(data, key_specs=None):
if not isinstance(data, dict):
msg = _("'%s' is not a dictionary") % data
LOG.debug(msg)
return msg
# Do not perform any further validation, if no constraints are supplied
if not key_specs:
return
# Check whether all required keys are present
required_keys = [key for key, spec in six.iteritems(key_specs)
if spec.get('required')]
if required_keys:
msg = _verify_dict_keys(required_keys, data, False)
if msg:
return msg
# Perform validation and conversion of all values
# according to the specifications.
for key, key_validator in [(k, v) for k, v in six.iteritems(key_specs)
if k in data]:
msg = _validate_dict_item(key, key_validator, data)
if msg:
return msg
def _validate_dict_or_none(data, key_specs=None):
if data is not None:
return _validate_dict(data, key_specs)
def _validate_dict_or_empty(data, key_specs=None):
if data != {}:
return _validate_dict(data, key_specs)
def _validate_dict_or_nodata(data, key_specs=None):
if data:
return _validate_dict(data, key_specs)
def _validate_non_negative(data, valid_values=None):
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if data < 0:
msg = _("'%s' should be non-negative") % data
LOG.debug(msg)
return msg
def convert_to_boolean(data):
if isinstance(data, six.string_types):
val = data.lower()
if val == "true" or val == "1":
return True
if val == "false" or val == "0":
return False
elif isinstance(data, bool):
return data
elif isinstance(data, int):
if data == 0:
return False
elif data == 1:
return True
msg = _("'%s' cannot be converted to boolean") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_to_boolean_if_not_none(data):
if data is not None:
return convert_to_boolean(data)
def convert_to_int(data):
try:
return int(data)
except (ValueError, TypeError):
msg = _("'%s' is not a integer") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_to_int_if_not_none(data):
if data is not None:
return convert_to_int(data)
return data
def convert_to_positive_float_or_none(val):
# NOTE(salv-orlando): This conversion function is currently used by
# a vendor specific extension only at the moment It is used for
# port's RXTX factor in neutron.plugins.vmware.extensions.qos.
# It is deemed however generic enough to be in this module as it
# might be used in future for other API attributes.
if val is None:
return
try:
val = float(val)
if val < 0:
raise ValueError()
except (ValueError, TypeError):
msg = _("'%s' must be a non negative decimal.") % val
raise n_exc.InvalidInput(error_message=msg)
return val
def convert_kvp_str_to_list(data):
"""Convert a value of the form 'key=value' to ['key', 'value'].
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key).
"""
kvp = [x.strip() for x in data.split('=', 1)]
if len(kvp) == 2 and kvp[0]:
return kvp
msg = _("'%s' is not of the form <key>=[value]") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_kvp_list_to_dict(kvp_list):
"""Convert a list of 'key=value' strings to a dict.
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key) or if any
of the keys appear more than once.
"""
if kvp_list == ['True']:
# No values were provided (i.e. '--flag-name')
return {}
kvp_map = {}
for kvp_str in kvp_list:
key, value = convert_kvp_str_to_list(kvp_str)
kvp_map.setdefault(key, set())
kvp_map[key].add(value)
return dict((x, list(y)) for x, y in six.iteritems(kvp_map))
def convert_none_to_empty_list(value):
return [] if value is None else value
def convert_none_to_empty_dict(value):
return {} if value is None else value
def convert_to_list(data):
if data is None:
return []
elif hasattr(data, '__iter__'):
return list(data)
else:
return [data]
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
# Note: In order to ensure that the MAC address is unicast the first byte
# must be even.
MAC_PATTERN = "^%s[aceACE02468](:%s{2}){5}$" % (HEX_ELEM, HEX_ELEM)
# Dictionary that maintains a list of validation functions
validators = {'type:dict': _validate_dict,
'type:dict_or_none': _validate_dict_or_none,
'type:dict_or_empty': _validate_dict_or_empty,
'type:dict_or_nodata': _validate_dict_or_nodata,
'type:fixed_ips': _validate_fixed_ips,
'type:hostroutes': _validate_hostroutes,
'type:ip_address': _validate_ip_address,
'type:ip_address_or_none': _validate_ip_address_or_none,
'type:ip_pools': _validate_ip_pools,
'type:mac_address': _validate_mac_address,
'type:mac_address_or_none': _validate_mac_address_or_none,
'type:nameservers': _validate_nameservers,
'type:non_negative': _validate_non_negative,
'type:range': _validate_range,
'type:regex': _validate_regex,
'type:regex_or_none': _validate_regex_or_none,
'type:string': _validate_string,
'type:string_or_none': _validate_string_or_none,
'type:not_empty_string': _validate_not_empty_string,
'type:not_empty_string_or_none':
_validate_not_empty_string_or_none,
'type:subnet': _validate_subnet,
'type:subnet_list': _validate_subnet_list,
'type:subnet_or_none': _validate_subnet_or_none,
'type:uuid': _validate_uuid,
'type:uuid_or_none': _validate_uuid_or_none,
'type:uuid_list': _validate_uuid_list,
'type:values': _validate_values,
'type:boolean': _validate_boolean}
# Define constants for base resource name
NETWORK = 'network'
NETWORKS = '%ss' % NETWORK
PORT = 'port'
PORTS = '%ss' % PORT
SUBNET = 'subnet'
SUBNETS = '%ss' % SUBNET
SUBNETPOOL = 'subnetpool'
SUBNETPOOLS = '%ss' % SUBNETPOOL
# Note: a default of ATTR_NOT_SPECIFIED indicates that an
# attribute is not required, but will be generated by the plugin
# if it is not specified. Particularly, a value of ATTR_NOT_SPECIFIED
# is different from an attribute that has been specified with a value of
# None. For example, if 'gateway_ip' is omitted in a request to
# create a subnet, the plugin will receive ATTR_NOT_SPECIFIED
# and the default gateway_ip will be generated.
# However, if gateway_ip is specified as None, this means that
# the subnet does not have a gateway IP.
# The following is a short reference for understanding attribute info:
# default: default value of the attribute (if missing, the attribute
# becomes mandatory.
# allow_post: the attribute can be used on POST requests.
# allow_put: the attribute can be used on PUT requests.
# validate: specifies rules for validating data in the attribute.
# convert_to: transformation to apply to the value before it is returned
# is_visible: the attribute is returned in GET responses.
# required_by_policy: the attribute is required by the policy engine and
# should therefore be filled by the API layer even if not present in
# request body.
# enforce_policy: the attribute is actively part of the policy enforcing
# mechanism, ie: there might be rules which refer to this attribute.
RESOURCE_ATTRIBUTE_MAP = {
NETWORKS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': NAME_MAX_LEN},
'default': '', 'is_visible': True},
'subnets': {'allow_post': False, 'allow_put': False,
'default': [],
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
SHARED: {'allow_post': True,
'allow_put': True,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': True,
'required_by_policy': True,
'enforce_policy': True},
},
PORTS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': NAME_MAX_LEN},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'mac_address': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:mac_address': None},
'enforce_policy': True,
'is_visible': True},
'fixed_ips': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'convert_list_to': convert_kvp_list_to_dict,
'validate': {'type:fixed_ips': None},
'enforce_policy': True,
'is_visible': True},
'device_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': DEVICE_ID_MAX_LEN},
'default': '',
'is_visible': True},
'device_owner': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': DEVICE_OWNER_MAX_LEN},
'default': '',
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
SUBNETS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': NAME_MAX_LEN},
'is_visible': True},
'ip_version': {'allow_post': True, 'allow_put': False,
'convert_to': convert_to_int,
'validate': {'type:values': [4, 6]},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'subnetpool_id': {'allow_post': True,
'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'required_by_policy': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True},
'prefixlen': {'allow_post': True,
'allow_put': False,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'default': ATTR_NOT_SPECIFIED,
'required_by_policy': False,
'is_visible': False},
'cidr': {'allow_post': True,
'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:subnet_or_none': None},
'required_by_policy': False,
'is_visible': True},
'gateway_ip': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:ip_address_or_none': None},
'is_visible': True},
'allocation_pools': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:ip_pools': None},
'is_visible': True},
'dns_nameservers': {'allow_post': True, 'allow_put': True,
'convert_to': convert_none_to_empty_list,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:nameservers': None},
'is_visible': True},
'host_routes': {'allow_post': True, 'allow_put': True,
'convert_to': convert_none_to_empty_list,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:hostroutes': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
'enable_dhcp': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'ipv6_ra_mode': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:values': constants.IPV6_MODES},
'is_visible': True},
'ipv6_address_mode': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:values':
constants.IPV6_MODES},
'is_visible': True},
SHARED: {'allow_post': False,
'allow_put': False,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': False,
'required_by_policy': True,
'enforce_policy': True},
},
SUBNETPOOLS: {
'id': {'allow_post': False,
'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True,
'allow_put': True,
'validate': {'type:not_empty_string': None},
'is_visible': True},
'tenant_id': {'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'prefixes': {'allow_post': True,
'allow_put': True,
'validate': {'type:subnet_list': None},
'is_visible': True},
'default_quota': {'allow_post': True,
'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'default': ATTR_NOT_SPECIFIED,
'is_visible': True},
'ip_version': {'allow_post': False,
'allow_put': False,
'is_visible': True},
'default_prefixlen': {'allow_post': True,
'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'default': ATTR_NOT_SPECIFIED,
'is_visible': True},
'min_prefixlen': {'allow_post': True,
'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'is_visible': True},
'max_prefixlen': {'allow_post': True,
'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'is_visible': True},
SHARED: {'allow_post': True,
'allow_put': False,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': True,
'required_by_policy': True,
'enforce_policy': True},
}
}
# Identify the attribute used by a resource to reference another resource
RESOURCE_FOREIGN_KEYS = {
NETWORKS: 'network_id'
}
PLURALS = {NETWORKS: NETWORK,
PORTS: PORT,
SUBNETS: SUBNET,
SUBNETPOOLS: SUBNETPOOL,
'dns_nameservers': 'dns_nameserver',
'host_routes': 'host_route',
'allocation_pools': 'allocation_pool',
'fixed_ips': 'fixed_ip',
'extensions': 'extension'}
|
|
import sys
from functools import update_wrapper
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models.base import ModelBase
from django.views.decorators.cache import never_cache
reload(sys)
sys.setdefaultencoding("utf-8")
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class MergeAdminMetaclass(type):
def __new__(cls, name, bases, attrs):
return type.__new__(cls, str(name), bases, attrs)
class AdminSite(object):
def __init__(self, name='xadmin'):
self.name = name
self.app_name = 'xadmin'
self._registry = {} # model_class class -> admin_class class
self._registry_avs = {} # admin_view_class class -> admin_class class
self._registry_settings = {} # settings name -> admin_class class
self._registry_views = []
# url instance contains (path, admin_view class, name)
self._registry_modelviews = []
# url instance contains (path, admin_view class, name)
self._registry_plugins = {} # view_class class -> plugin_class class
self._admin_view_cache = {}
self.check_dependencies()
self.model_admins_order = 0
def copy_registry(self):
import copy
return {
'models': copy.copy(self._registry),
'avs': copy.copy(self._registry_avs),
'views': copy.copy(self._registry_views),
'settings': copy.copy(self._registry_settings),
'modelviews': copy.copy(self._registry_modelviews),
'plugins': copy.copy(self._registry_plugins),
}
def restore_registry(self, data):
self._registry = data['models']
self._registry_avs = data['avs']
self._registry_views = data['views']
self._registry_settings = data['settings']
self._registry_modelviews = data['modelviews']
self._registry_plugins = data['plugins']
def register_modelview(self, path, admin_view_class, name):
from xadmin.views.base import BaseAdminView
if issubclass(admin_view_class, BaseAdminView):
self._registry_modelviews.append((path, admin_view_class, name))
else:
raise ImproperlyConfigured(u'The registered view class %s isn\'t subclass of %s' %
(admin_view_class.__name__, BaseAdminView.__name__))
def register_view(self, path, admin_view_class, name):
self._registry_views.append((path, admin_view_class, name))
def register_plugin(self, plugin_class, admin_view_class):
from xadmin.views.base import BaseAdminPlugin
if issubclass(plugin_class, BaseAdminPlugin):
self._registry_plugins.setdefault(
admin_view_class, []).append(plugin_class)
else:
raise ImproperlyConfigured(u'The registered plugin class %s isn\'t subclass of %s' %
(plugin_class.__name__, BaseAdminPlugin.__name__))
def register_settings(self, name, admin_class):
self._registry_settings[name.lower()] = admin_class
def register(self, model_or_iterable, admin_class=object, **options):
from xadmin.views.base import BaseAdminView
if isinstance(model_or_iterable, ModelBase) or issubclass(model_or_iterable, BaseAdminView):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if isinstance(model, ModelBase):
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered(
'The model %s is already registered' % model.__name__)
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type(str("%s%sAdmin" % (model._meta.app_label, model._meta.model_name )), (admin_class,), options or {})
admin_class.model = model
admin_class.order = self.model_admins_order
self.model_admins_order += 1
self._registry[model] = admin_class
else:
if model in self._registry_avs:
raise AlreadyRegistered('The admin_view_class %s is already registered' % model.__name__)
if options:
options['__module__'] = __name__
admin_class = type(str(
"%sAdmin" % model.__name__), (admin_class,), options)
# Instantiate the admin class to save in the registry
self._registry_avs[model] = admin_class
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
from xadmin.views.base import BaseAdminView
if isinstance(model_or_iterable, (ModelBase, BaseAdminView)):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if isinstance(model, ModelBase):
if model not in self._registry:
raise NotRegistered(
'The model %s is not registered' % model.__name__)
del self._registry[model]
else:
if model not in self._registry_avs:
raise NotRegistered('The admin_view_class %s is not registered' % model.__name__)
del self._registry_avs[model]
def set_loginview(self, login_view):
self.login_view = login_view
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that LogEntry, ContentType and the
auth context processor are installed.
"""
return
from django.contrib.contenttypes.models import ContentType
if not ContentType._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.contenttypes' in "
"your INSTALLED_APPS setting in order to use the admin application.")
if not ('django.contrib.auth.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS or
'django.core.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS):
raise ImproperlyConfigured("Put 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATE_CONTEXT_PROCESSORS setting in order to use the admin application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(MyAdminSite, self).get_urls()
urls += patterns('',
url(r'^my_view/$', self.admin_view(some_view))
)
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request) and getattr(view, 'need_site_permission', True):
return self.create_admin_view(self.login_view)(request, *args, **kwargs)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
return update_wrapper(inner, view)
def _get_merge_attrs(self, option_class, plugin_class):
return dict([(name, getattr(option_class, name)) for name in dir(option_class)
if name[0] != '_' and not callable(getattr(option_class, name)) and hasattr(plugin_class, name)])
def _get_settings_class(self, admin_view_class):
name = admin_view_class.__name__.lower()
if name in self._registry_settings:
return self._registry_settings[name]
elif name.endswith('admin') and name[0:-5] in self._registry_settings:
return self._registry_settings[name[0:-5]]
elif name.endswith('adminview') and name[0:-9] in self._registry_settings:
return self._registry_settings[name[0:-9]]
return None
def _create_plugin(self, option_classes):
def merge_class(plugin_class):
if option_classes:
attrs = {}
bases = [plugin_class]
for oc in option_classes:
attrs.update(self._get_merge_attrs(oc, plugin_class))
meta_class = getattr(oc, plugin_class.__name__, getattr(oc, plugin_class.__name__.replace('Plugin', ''), None))
if meta_class:
bases.insert(0, meta_class)
if attrs:
plugin_class = MergeAdminMetaclass(
'%s%s' % (''.join([oc.__name__ for oc in option_classes]), plugin_class.__name__),
tuple(bases), attrs)
return plugin_class
return merge_class
def get_plugins(self, admin_view_class, *option_classes):
from xadmin.views import BaseAdminView
plugins = []
opts = [oc for oc in option_classes if oc]
for klass in admin_view_class.mro():
if klass == BaseAdminView or issubclass(klass, BaseAdminView):
merge_opts = []
reg_class = self._registry_avs.get(klass)
if reg_class:
merge_opts.append(reg_class)
settings_class = self._get_settings_class(klass)
if settings_class:
merge_opts.append(settings_class)
merge_opts.extend(opts)
ps = self._registry_plugins.get(klass, [])
plugins.extend(map(self._create_plugin(
merge_opts), ps) if merge_opts else ps)
return plugins
def get_view_class(self, view_class, option_class=None, **opts):
merges = [option_class] if option_class else []
for klass in view_class.mro():
reg_class = self._registry_avs.get(klass)
if reg_class:
merges.append(reg_class)
settings_class = self._get_settings_class(klass)
if settings_class:
merges.append(settings_class)
merges.append(klass)
new_class_name = ''.join([c.__name__ for c in merges])
if new_class_name not in self._admin_view_cache:
plugins = self.get_plugins(view_class, option_class)
self._admin_view_cache[new_class_name] = MergeAdminMetaclass(
new_class_name, tuple(merges),
dict({'plugin_classes': plugins, 'admin_site': self}, **opts))
return self._admin_view_cache[new_class_name]
def create_admin_view(self, admin_view_class):
return self.get_view_class(admin_view_class).as_view()
def create_model_admin_view(self, admin_view_class, model, option_class):
return self.get_view_class(admin_view_class, option_class).as_view()
def get_urls(self):
from django.conf.urls import patterns, url, include
from xadmin.views.base import BaseAdminView
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = patterns('',
url(r'^jsi18n/$', wrap(self.i18n_javascript,
cacheable=True), name='jsi18n')
)
# Registed admin views
urlpatterns += patterns('',
*[url(
path, wrap(self.create_admin_view(clz_or_func)) if type(clz_or_func) == type and issubclass(clz_or_func, BaseAdminView) else include(clz_or_func(self)),
name=name) for path, clz_or_func, name in self._registry_views]
)
# Add in each model's views.
for model, admin_class in self._registry.iteritems():
view_urls = [url(
path, wrap(
self.create_model_admin_view(clz, model, admin_class)),
name=name % (model._meta.app_label, model._meta.model_name))
for path, clz, name in self._registry_modelviews]
pattern = getattr(admin_class, 'pattern', r'^%s/%s/')
urlpatterns += patterns('',
url(
pattern % (
model._meta.app_label, model._meta.model_name),
include(patterns('', *view_urls)))
)
return urlpatterns
@property
def urls(self):
return self.get_urls(), self.name, self.app_name
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'xadmin'])
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.17 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "rdtools-"
cfg.versionfile_source = "rdtools/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''This is the main controller module for the Files Plugin.'''
import logging
from six.moves.urllib.parse import unquote
from tg import config, redirect, expose, flash
from tg.decorators import with_trailing_slash, without_trailing_slash
from tg import tmpl_context as c, app_globals as g
from tg import request, response
from jinja2.exceptions import TemplateNotFound
from allura.app import Application
from allura.controllers import BaseController
from allura.lib.decorators import require_post
from allura.lib.widgets.subscriptions import SubscribeForm
from allura.lib.security import require_access
from allura import model as M
from allura.controllers import attachments as att
from allura import version
from allura.model.timeline import TransientActor
from bson import ObjectId
from webob import exc
# local imports ##
from forgefiles.model.files import UploadFolder, UploadFiles, Upload
log = logging.getLogger(__name__)
class FilesApp(Application):
"""Files plugin for the Allura platform"""
__version__ = version.__version__
tool_label = 'Files'
tool_description = """Upload executables for your project.
You may maintain version specific executables as well."""
default_mount_label = 'Files'
default_mount_point = 'files'
uninstallable = True
ordinal = 9
max_instances = 1
def __init__(self, project, config):
Application.__init__(self, project, config)
self.root = FilesController()
def install(self, project):
'Set up any default permissions and roles here'
self.config.options['project_name'] = project.name
super().install(project)
role_anon = M.ProjectRole.by_name('*anonymous')._id
self.config.acl = [
M.ACE.allow(role_anon, 'read'),
]
def uninstall(self, project):
"Remove all the tool's artifacts from the database"
app_config_id = {'app_config_id': c.app.config._id}
Upload.query.remove(app_config_id)
UploadFolder.query.remove(app_config_id)
file_objects = UploadFiles.query.find(app_config_id).all()
for file_object in file_objects:
file_object.delete()
super().uninstall(project)
def has_linked_download(self):
return UploadFiles.query.find({
'app_config_id': c.app.config._id, 'linked_to_download': True, 'disabled': False}).count()
def get_parent_folders(linked_file_object=None):
'''Returns the list of the parent folders for the current file or folder'''
parent_folder = linked_file_object.parent_folder if linked_file_object else None
parent_folders_list = []
while parent_folder:
parent_folders_list.append(str(parent_folder._id))
parent_folder = parent_folder.parent_folder
parent_folders_list = list(set(parent_folders_list))
return parent_folders_list
class FilesController(BaseController):
"""Root controller for the Files Application"""
def _check_security(self):
require_access(c.app, 'read')
@expose('jinja:forgefiles:templates/files.html')
def index(self):
'''Index method for the Root controller'''
require_access(c.app, 'read')
folder_object = None
file_object = None
upload_object = Upload.query.get(app_config_id=c.app.config._id)
self.attachment = AttachmentsController(upload_object)
file_objects = UploadFiles.query.find({'app_config_id': c.app.config._id, 'parent_folder_id': None})
file_objects = file_objects.sort([('created_date', -1)]).all()
folder_objects = UploadFolder.query.find({'app_config_id': c.app.config._id, 'parent_folder_id': None})
folder_objects = folder_objects.sort([('created_date', -1)]).all()
if c.user in c.project.admins():
M.Mailbox.subscribe(type='direct')
c.subscribe_form = SubscribeForm(thing='files')
tool_subscribed = M.Mailbox.subscribed()
if tool_subscribed:
subscribed = M.Mailbox.subscribed()
else:
subscribed = False
file_object = UploadFiles.query.get(app_config_id=c.app.config._id, linked_to_download=True)
parents = get_parent_folders(linked_file_object=file_object)
return dict(file_objects=file_objects,
folder_objects=folder_objects, folder_object=folder_object, file_object=file_object,
subscribed=subscribed, parents=parents)
def get_parent_folder_url(self, parent_folder_id):
''' Returns the url,parent_folder and id of parent_folder object if object is there'''
if (parent_folder_id == 'None') or (not parent_folder_id):
parent_folder_id = None
parent_folder = None
url = c.app.url
else:
parent_folder = UploadFolder.query.get(_id=ObjectId(parent_folder_id), app_config_id=c.app.config._id)
parent_folder_id = ObjectId(parent_folder._id)
url = parent_folder.url()
return parent_folder_id, parent_folder, url
@require_post()
@expose()
def create_folder(self, parent_folder_id=None, folder_name=None):
'''Controller method for creating a folder. The folder is stored in UploadFolder collection'''
require_access(c.app, 'create')
parent_folder_id, parent_folder, url = self.get_parent_folder_url(parent_folder_id)
if folder_name:
folder_object = UploadFolder.query.find({
'app_config_id': c.app.config._id, 'folder_name': folder_name,
'parent_folder_id': parent_folder_id}).first()
if folder_object:
flash('Folder with the same name already exists!')
else:
folder_object = UploadFolder(folder_name=folder_name)
folder_object.parent_folder_id = parent_folder_id
parent = parent_folder
while parent:
parent.folder_ids.append(str(folder_object._id))
parent = parent.parent_folder
flash('Folder is created successfully')
g.director.create_activity(c.user, 'created', folder_object, related_nodes=[c.project])
else:
flash('Folder is not created successfully')
return redirect(url)
@require_post()
@expose()
def upload_file(self, parent_folder_id=None, file_upload=None, filename=None):
'''Controller method for creating a folder. The folder is stored in UploadFolder collection'''
require_access(c.app, 'create')
parent_folder_id, parent_folder, url = self.get_parent_folder_url(parent_folder_id)
if file_upload is not None:
file_object = UploadFiles.query.find({
'app_config_id': c.app.config._id, 'filename': filename,
'parent_folder_id': parent_folder_id}).first()
if file_object:
flash('File with the same name already exists!')
else:
upload_object = Upload(
app_config_id=c.app.config._id, filename=filename, filetype=file_upload.type)
attach_object = upload_object.attach(
filename, file_upload.file, parent_folder_id=parent_folder_id)
if attach_object.parent_folder:
upload_object.file_url = attach_object.parent_folder.url()
else:
upload_object.file_url = c.app.url
parent = parent_folder
while parent:
parent.file_ids.append(str(attach_object._id))
parent = parent.parent_folder
flash('File is uploaded successfully')
g.director.create_activity(c.user, 'uploaded', upload_object, related_nodes=[c.project])
else:
flash('File is not uploaded successfully')
return redirect(url)
@require_post()
@expose()
def delete_file(self, file_id=None):
'''Controller method to delete a file'''
file_object = UploadFiles.query.get(_id=ObjectId(file_id), app_config_id=c.app.config._id)
upload_object = Upload.query.get(_id=file_object.artifact_id, app_config_id=c.app.config._id)
file_name = file_object.filename
transient_actor = TransientActor(activity_name=file_name)
url = c.app.url
if file_id is not None:
require_access(upload_object, 'delete')
self.delete_file_from_db(file_id=file_id)
parent_folder = file_object.parent_folder
if parent_folder:
url = parent_folder.url()
flash('File is successfully deleted')
g.director.create_activity(c.user, 'deleted the file', transient_actor, related_nodes=[c.project])
else:
flash('File is not deleted')
return redirect(url)
def delete_file_from_db(self, file_id=None):
'''Method to delete a file from db'''
file_object = UploadFiles.query.get(_id=ObjectId(file_id), app_config_id=c.app.config._id)
Upload.query.remove({'_id': file_object.artifact_id, 'app_config_id': c.app.config._id})
file_object.delete()
def delete_folder_recursively(self, folder_id):
'''This method is called recursively to delete folder in a hierarchy'''
sub_file_objects = UploadFiles.query.find(dict({
'app_config_id': c.app.config._id, 'parent_folder_id': ObjectId(folder_id)})).all()
for file_object in sub_file_objects:
self.delete_file_from_db(file_id=file_object._id)
sub_folder_objects = UploadFolder.query.find({
'app_config_id': c.app.config._id, 'parent_folder_id': ObjectId(folder_id)}).all()
for folder_object in sub_folder_objects:
self.delete_folder_recursively(folder_object._id)
UploadFolder.query.remove({'_id': ObjectId(folder_id), 'app_config_id': c.app.config._id})
@without_trailing_slash
@require_post()
@expose('jinja:forgefiles:templates/files.html')
def delete_folder(self, folder_id=None):
'''Controller method to delete a folder'''
folder_object = UploadFolder.query.get(_id=ObjectId(folder_id), app_config_id=c.app.config._id)
folder_name = folder_object.folder_name
transient_actor = TransientActor(activity_name=folder_name)
url = c.app.url
if folder_id is not None:
require_access(folder_object, 'delete')
self.delete_folder_recursively(folder_id)
if folder_object.parent_folder:
url = folder_object.parent_folder.url()
flash('Folder is deleted Successfully')
g.director.create_activity(c.user, 'deleted the folder', transient_actor, related_nodes=[c.project])
else:
flash('Folder is not deleted')
return redirect(url)
@without_trailing_slash
@require_post()
@expose()
def link_file(self, file_id=None, status=None):
'''Controller method to link a file to the download button'''
linkable_file_object = UploadFiles.query.get(_id=ObjectId(file_id), app_config_id=c.app.config._id)
upload_object = Upload.query.get(_id=linkable_file_object.artifact_id, app_config_id=c.app.config._id)
require_access(upload_object, 'link')
if status == 'False':
linkable_file_object.linked_to_download = False
else:
file_objects = UploadFiles.query.find({'app_config_id': c.app.config._id}).all()
for file_object in file_objects:
if file_object.linked_to_download:
file_object.linked_to_download = False
linkable_file_object.linked_to_download = True
@expose()
def download_file(self, filename=None):
'''Controller method to download a file'''
if filename:
request_path = request.path.split(c.app.url)[-1].rstrip('/')
request_path = unquote(request_path)
linked_file_object = UploadFiles.query.find({
'app_config_id': c.app.config._id, 'filename': filename, 'path': request_path, 'disabled': False,
}).first()
else:
linked_file_object = UploadFiles.query.find({
'app_config_id': c.app.config._id, 'linked_to_download': True, 'disabled': False,
}).first()
if linked_file_object:
try:
if not c.user.is_anonymous():
M.Mailbox.subscribe(type='direct')
return linked_file_object.serve(embed=False)
except Exception as e:
log.exception('%s error to download the file', e)
else:
flash('No artifact available')
return redirect(c.app.url)
@require_post()
@expose()
def edit_folder(self, folder_id=None, folder_name=None):
'''Controller method to edit the folder name'''
url = c.app.url
folder_object = UploadFolder.query.get(_id=ObjectId(folder_id), app_config_id=c.app.config._id)
if folder_object:
require_access(folder_object, 'update')
folder_object.folder_name = folder_name
flash("Folder name edited successfully")
if folder_object.parent_folder:
url = folder_object.parent_folder.url()
else:
flash("Folder name not edited")
redirect(url)
@require_post()
@expose()
def edit_file(self, file_id=None, file_name=None):
'''Controller method to edit the file name'''
url = c.app.url
file_object = UploadFiles.query.get(_id=ObjectId(file_id), app_config_id=c.app.config._id)
upload_object = Upload.query.get(_id=file_object.artifact_id, app_config_id=c.app.config._id)
if file_object:
require_access(upload_object, 'update')
upload_object.filename = file_name
file_object.filename = file_name
flash("File name edited successfully")
if file_object.parent_folder:
url = file_object.parent_folder.url()
else:
flash("File not edited")
return redirect(url)
@require_post()
@expose()
def publish_folder(self, folder_id=None, remarks=None):
'''Controller which publishes the folder. It send update about the publishing of the folder.'''
folder_object = UploadFolder.query.get(_id=ObjectId(folder_id), app_config_id=c.app.config._id)
url = c.app.url
if folder_object:
require_access(folder_object, 'publish')
folder_object.published = True
folder_object.remarks = remarks
mailbox_object = M.Mailbox.query.find({'app_config_id': c.app.config._id}).all()
user_ids = [i.user_id for i in mailbox_object]
admins = [i._id for i in c.project.admins()]
user_ids += admins
user_ids = list(set(user_ids))
from allura.tasks import mail_tasks
from allura.lib import helpers as h
template_name = ''
try:
for i in user_ids:
user_object = M.User.query.get(_id=i)
template_name = 'forgefiles:/templates/mail.html'
text = g.jinja2_env.get_template(template_name).render(dict(
base_url=config.get('base_url'), user_object=user_object, project=c.project,
remarks=remarks, folder_object=folder_object, project_owner=c.user,
domain=config.get('domain')
))
email_addr = user_object.get_pref('email_address')
if email_addr:
mail_tasks.sendsimplemail.post(
fromaddr=g.noreply,
reply_to=g.noreply,
toaddr=email_addr,
subject='{} - {} Release Update'.format(config.get('site_name'), c.project.name),
message_id=h.gen_message_id(),
text=text)
if folder_object.parent_folder:
url = folder_object.parent_folder.url()
flash('Successfully Published')
except TemplateNotFound:
log.exception('%s Template not found' % (template_name))
log.info('Folder %s is not published successfully' % (folder_object.folder_name))
flash('Folder is not published successfully')
return redirect(url)
@require_post()
@expose()
def disable_folder(self, folder_id=None, status=None):
'''Controller method to disable the folder.'''
folder_object = UploadFolder.query.get(_id=ObjectId(folder_id), app_config_id=c.app.config._id)
if status == 'True':
disable_status = True
text = 'disabled'
else:
disable_status = False
text = 'enabled'
if folder_object:
require_access(folder_object, 'disable')
folder_object.disabled = disable_status
'''Disabling Child folders & files of the current folder '''
for child_folder_id in folder_object.folder_ids:
child_folder_object = UploadFolder.query.get(
_id=ObjectId(child_folder_id), app_config_id=c.app.config._id)
if child_folder_object:
child_folder_object.disabled = disable_status
for child_file_id in folder_object.file_ids:
child_file_object = UploadFiles.query.get(_id=ObjectId(child_file_id), app_config_id=c.app.config._id)
if child_file_object:
child_file_object.disabled = disable_status
flash('Folder %s successfully' % (text))
else:
flash('No folder exists')
@require_post()
@expose()
def disable_file(self, file_id=None, status=None):
'''Controller method to disable the file.'''
file_object = UploadFiles.query.get(_id=ObjectId(file_id), app_config_id=c.app.config._id)
upload_object = Upload.query.get(_id=file_object.artifact_id, app_config_id=c.app.config._id)
if status == 'True':
disable_status = True
text = 'disabled'
else:
disable_status = False
text = 'enabled'
if file_object:
require_access(upload_object, 'disable')
file_object.disabled = disable_status
flash('File %s successfully' % (text))
else:
flash('No file exists')
@expose('json:')
@require_post()
def subscribe(self, subscribe=None, unsubscribe=None):
'''Controller method that subscribes an user to the files plugin.'''
if subscribe:
M.Mailbox.subscribe(type='direct')
elif unsubscribe:
M.Mailbox.unsubscribe()
return {
'status': 'ok',
'subscribed': M.Mailbox.subscribed(),
}
def get_folder_object(self, folder_id=None):
'''Returns the folder object for input folder id'''
folder_object = UploadFolder.query.get(_id=ObjectId(folder_id), app_config_id=c.app.config._id)
return folder_object
@expose('jinja:forgefiles:templates/create_folder.html')
def get_parent_for_create_folder(self, folder_id=None):
'''Returns the parent object of the input folder id'''
folder_object = self.get_folder_object(folder_id)
return dict(folder_object=folder_object)
@expose('jinja:forgefiles:templates/upload_file.html')
def get_parent_for_upload_file(self, folder_id=None):
'''Returns the parent object of the input folder id'''
folder_object = self.get_folder_object(folder_id)
return dict(folder_object=folder_object)
def get_folder_file_object(self, object_id=None):
'''Returns corresponding file or folder object for the input id '''
folder_object = UploadFolder.query.get(_id=ObjectId(object_id), app_config_id=c.app.config._id)
file_object = UploadFiles.query.get(_id=ObjectId(object_id), app_config_id=c.app.config._id)
return dict(folder_object=folder_object, file_object=file_object)
@expose('jinja:forgefiles:templates/edit.html')
def get_editable_object(self, object_id=None):
'''Returns object id of the folder or file to be edited'''
object_dict = self.get_folder_file_object(object_id)
return object_dict
@expose('jinja:forgefiles:templates/delete.html')
def get_deletable_object(self, object_id=None):
'''Returns object id of the folder or file to be deleted'''
object_dict = self.get_folder_file_object(object_id)
return object_dict
@expose('jinja:forgefiles:templates/publish_folder.html')
def get_publishable_folder(self, folder_id=None):
'''Returns the status and folder object if the folder can be published or not'''
linked_file_object = UploadFiles.query.get(
app_config_id=c.app.config._id, linked_to_download=True, disabled=False)
parent_folders = get_parent_folders(linked_file_object=linked_file_object)
if folder_id:
folder_object = UploadFolder.query.get(_id=ObjectId(folder_id), app_config_id=c.app.config._id)
status = str(folder_object._id) in parent_folders
else:
folder_object = None
status = False
return dict(folder_object=folder_object, status=status)
@expose()
def _lookup(self, name, *remainder):
''' Class method which is used to call individual files controller class'''
if not remainder:
argument = name
else:
argument = remainder[-1]
if argument == 'createFolder':
argument = None
return IndividualFilesController(argument), remainder
def folder_breadcrumbs(folder_object=None):
''' Function to create a breadcrumbs for folders '''
list_object = folder_object.path.split('/')
second_list = []
length = 0
urls = {}
for i in list_object:
length += len(i)
folder_object = UploadFolder.query.get(folder_name=i)
urls[str(i)] = str(folder_object.url())
if length in range(1, (61-len(list_object[-1])+1)):
second_list.append(i)
second_list.append('...')
second_list.append(list_object[-1])
string = '/'.join(second_list)
if length > 61:
return string, urls
else:
return folder_object.path, urls
# handle requests for individual folder,file objects
class IndividualFilesController(BaseController):
"""Handle requests for a specific folder/file objects"""
def __init__(self, arg):
path = request.path.split(c.app.url)[-1].rstrip('/')
if path == arg:
path = arg
path = unquote(path)
arg = unquote(arg)
self.folder_object = UploadFolder.query.find({
'app_config_id': ObjectId(c.app.config._id), 'folder_name': arg, 'path': path}).first()
self.file_object = UploadFiles.query.find({
'app_config_id': ObjectId(c.app.config._id), 'filename': arg, 'path': path}).first()
methods = ('create_folder', 'upload_file', 'delete_file', 'delete_folder', 'subscribe')
if (not self.folder_object) and (not self.file_object) and (arg not in methods):
log.exception('No Folder/File object found')
raise exc.HTTPNotFound()
else:
pass
def _check_security(self):
require_access(c.app, 'read')
@expose('jinja:forgefiles:templates/files.html')
@with_trailing_slash
def index(self):
''' Index method of individual folder/file objects'''
require_access(c.app, 'read')
folder_objects = None
file_objects = None
folder_path, urls = '', ''
if self.folder_object:
folder_objects = UploadFolder.query.find({
'app_config_id': c.app.config._id, 'parent_folder_id': self.folder_object._id})
folder_objects = folder_objects.sort([('created_date', -1)]).all()
file_objects = UploadFiles.query.find({
'app_config_id': c.app.config._id, 'parent_folder_id': self.folder_object._id})
file_objects = file_objects.sort([('created_date', -1)]).all()
folder_path, urls = folder_breadcrumbs(folder_object=self.folder_object)
elif self.file_object:
return FilesController().download_file(filename=self.file_object.filename)
if c.user in c.project.admins():
M.Mailbox.subscribe(type='direct')
c.subscribe_form = SubscribeForm(thing='files')
tool_subscribed = M.Mailbox.subscribed()
if tool_subscribed:
subscribed = M.Mailbox.subscribed()
else:
subscribed = False
file_object = UploadFiles.query.get(app_config_id=c.app.config._id, linked_to_download=True)
parents = get_parent_folders(linked_file_object=file_object)
return dict(folder_objects=folder_objects,
file_objects=file_objects, folder_object=self.folder_object, file_object=self.file_object,
subscribed=subscribed, parents=parents, folder_path=folder_path, urls=urls)
@require_post()
@expose()
def create_folder(self, parent_folder_id=None, folder_name=None):
return FilesController().create_folder(parent_folder_id=parent_folder_id, folder_name=folder_name)
@require_post()
@expose()
def upload_file(self, parent_folder_id=None, filename=None, file_upload=None):
return FilesController().upload_file(
parent_folder_id=parent_folder_id, filename=filename, file_upload=file_upload)
@require_post()
@expose()
def delete_file(self, file_id=None):
return FilesController().delete_file(file_id=file_id)
@expose('json:')
@require_post()
def subscribe(self, subscribe=None, unsubscribe=None):
if subscribe:
M.Mailbox.subscribe(type='direct')
elif unsubscribe:
M.Mailbox.unsubscribe()
return {
'status': 'ok',
'subscribed': M.Mailbox.subscribed(),
}
@expose()
def _lookup(self, name, *remainder):
if not remainder:
argument = name
else:
argument = remainder[-1]
return IndividualFilesController(argument), remainder
class AttachmentController(att.AttachmentController):
AttachmentClass = UploadFiles
edit_perm = 'update'
class AttachmentsController(att.AttachmentsController):
AttachmentControllerClass = AttachmentController
|
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Custom attribute definition module"""
import flask
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import validates
from sqlalchemy.sql.schema import UniqueConstraint
import ggrc.models
from ggrc import db
from ggrc.utils import benchmark
from ggrc.models import mixins
from ggrc.models.reflection import AttributeInfo
from ggrc.models.custom_attribute_value import CustomAttributeValue
from ggrc.models.exceptions import ValidationError
class CustomAttributeDefinition(mixins.Base, mixins.Titled, db.Model):
"""Custom attribute definition model.
Attributes:
multi_choice_mandatory: comma separated values of mandatory bitmaps.
First lsb is for comment, second bit is for attachement.
"""
__tablename__ = 'custom_attribute_definitions'
definition_type = db.Column(db.String, nullable=False)
definition_id = db.Column(db.Integer)
attribute_type = db.Column(db.String, nullable=False)
multi_choice_options = db.Column(db.String)
multi_choice_mandatory = db.Column(db.String)
mandatory = db.Column(db.Boolean)
helptext = db.Column(db.String)
placeholder = db.Column(db.String)
attribute_values = db.relationship('CustomAttributeValue',
backref='custom_attribute',
cascade='all, delete-orphan')
@property
def definition_attr(self):
return '{0}_definition'.format(self.definition_type)
@property
def definition(self):
return getattr(self, self.definition_attr)
@definition.setter
def definition(self, value):
self.definition_id = getattr(value, 'id', None)
if hasattr(value, '_inflector'):
self.definition_type = value._inflector.table_singular
else:
self.definition_type = ''
return setattr(self, self.definition_attr, value)
_extra_table_args = (
UniqueConstraint('definition_type', 'definition_id', 'title',
name='uq_custom_attribute'),
db.Index('ix_custom_attributes_title', 'title'))
_include_links = _publish_attrs = [
'definition_type',
'definition_id',
'attribute_type',
'multi_choice_options',
'multi_choice_mandatory',
'mandatory',
'helptext',
'placeholder',
]
_sanitize_html = [
"multi_choice_options",
"helptext",
"placeholder",
]
_reserved_names = {}
def _clone(self, target):
"""Clone custom attribute definitions."""
data = {
"title": self.title,
"definition_type": self.definition_type,
"definition_id": target.id,
"attribute_type": self.attribute_type,
"multi_choice_options": self.multi_choice_options,
"multi_choice_mandatory": self.multi_choice_mandatory,
"mandatory": self.mandatory,
"helptext": self.helptext,
"placeholder": self.placeholder,
}
ca_definition = CustomAttributeDefinition(**data)
db.session.add(ca_definition)
db.session.flush()
return ca_definition
class ValidTypes(object):
"""Class representing valid custom attribute definitions.
Basically an enum, therefore no need for public methods.
"""
# pylint: disable=too-few-public-methods
TEXT = "Text"
RICH_TEXT = "Rich Text"
DROPDOWN = "Dropdown"
CHECKBOX = "Checkbox"
DATE = "Date"
MAP = "Map"
class MultiChoiceMandatoryFlags(object):
"""Enum representing flags in multi_choice_mandatory bitmaps."""
# pylint: disable=too-few-public-methods
COMMENT_REQUIRED = 0b01
EVIDENCE_REQUIRED = 0b10
VALID_TYPES = {
"Text": "Text",
"Rich Text": "Rich Text",
"Dropdown": "Dropdown",
"Checkbox": "Checkbox",
"Date": "Date",
"Person": "Map:Person",
}
@validates("attribute_type")
def validate_attribute_type(self, _, value):
"""Check that provided attribute_type is allowed."""
if value not in self.VALID_TYPES.values():
raise ValidationError("Invalid attribute_type: got {v}, "
"expected one of {l}"
.format(v=value,
l=list(self.VALID_TYPES.values())))
return value
@validates("multi_choice_options")
def validate_multi_choice_options(self, _, value):
"""Strip spaces around options in dropdown options."""
# pylint: disable=no-self-use
# TODO: this should be "if value is not None" to disallow value == ""
if value:
value_list = [part.strip() for part in value.split(",")]
value_set = set(value_list)
if len(value_set) != len(value_list):
raise ValidationError("Duplicate dropdown options are not allowed: "
"'{}'".format(value))
if "" in value_set:
raise ValidationError("Empty dropdown options are not allowed: '{}'"
.format(value))
value = ",".join(value_list)
return value
@validates("multi_choice_mandatory")
def validate_multi_choice_mandatory(self, _, value):
"""Strip spaces around bitmas in dropdown options."""
# pylint: disable=no-self-use
if value:
value = ",".join(part.strip() for part in value.split(","))
return value
@classmethod
def _get_reserved_names(cls, definition_type):
"""Get a list of all attribute names in all objects.
On first call this function computes all possible names that can be used by
any model and stores them in a static frozen set. All later calls just get
this set.
Returns:
frozen set containing all reserved attribute names for the current
object.
"""
# pylint: disable=protected-access
# The _inflector is a false positive in our app.
with benchmark("Generate a list of all reserved attribute names"):
if not cls._reserved_names.get(definition_type):
definition_map = {model._inflector.table_singular: model
for model in ggrc.models.all_models.all_models}
definition_model = definition_map.get(definition_type)
if not definition_model:
raise ValueError("Invalid definition type")
aliases = AttributeInfo.gather_aliases(definition_model)
cls._reserved_names[definition_type] = frozenset(
(value["display_name"] if isinstance(
value, dict) else value).lower()
for value in aliases.values() if value
)
return cls._reserved_names[definition_type]
@classmethod
def _get_global_cad_names(cls, definition_type):
"""Get names of global cad for a given object."""
definition_types = [definition_type]
if definition_type == "assessment_template":
definition_types.append("assessment")
if not getattr(flask.g, "global_cad_names", set()):
query = db.session.query(cls.title, cls.id).filter(
cls.definition_type.in_(definition_types),
cls.definition_id.is_(None)
)
flask.g.global_cad_names = {name.lower(): id_ for name, id_ in query}
return flask.g.global_cad_names
def validate_assessment_title(self, name):
"""Check assessment title uniqueness.
Assessment CAD should not match any name from assessment_template.
Currently assessment templates do not have global custom attributes, but
in the future global CAD on assessment templates could be applied to all
generated assessments. That's why we do not differentiate between global
and local CAD here.
Args:
name: Assessment custom attribute definition title.
Raises:
ValueError if name is an invalid CAD title.
"""
if self.definition_id:
# Local Assessment CAD can match local and global Assessment Template
# CAD.
# NOTE: This is not the best way of checking if the current CAD is local,
# since it relies on the fact that if definition_id will be set, it will
# be set along with definition_type. If we manually set definition_type
# then title then definition_id, this check would fail.
return
if not getattr(flask.g, "template_cad_names", set()):
query = db.session.query(self.__class__.title).filter(
self.__class__.definition_type == "assessment_template"
)
flask.g.template_cad_names = {cad.title.lower() for cad in query}
if name in flask.g.template_cad_names:
raise ValueError(u"Local custom attribute '{}' "
u"already exists for this object type."
.format(name))
@validates("title", "definition_type")
def validate_title(self, key, value):
"""Validate CAD title/name uniqueness.
Note: title field is used for storing CAD names.
CAD names need to follow 3 uniqueness rules:
1) names must not match any attribute name on any existing object.
2) Object level CAD names must not match any global CAD name.
3) Object level CAD names can clash, but not for the same Object
instance. This means we can have two CAD with a name "my cad", with
different attributable_id fields.
Third rule is handled by the database with unique key uq_custom_attribute
(`definition_type`,`definition_id`,`title`).
This validator should check for name collisions for 1st and 2nd rule.
This validator works, because definition_type is never changed. It only
gets set when the cad is created and after that only title filed can
change. This makes validation using both fields possible.
Args:
value: custom attribute definition name
Returns:
value if the name passes all uniqueness checks.
"""
if key == "title" and self.definition_type:
name = value.lower()
definition_type = self.definition_type
elif key == "definition_type" and self.title:
name = self.title.lower()
definition_type = value.lower()
else:
return value
if name in self._get_reserved_names(definition_type):
raise ValueError(u"Attribute '{}' is reserved for this object type."
.format(name))
if (self._get_global_cad_names(definition_type).get(name) is not None and
self._get_global_cad_names(definition_type).get(name) != self.id):
raise ValueError(u"Global custom attribute '{}' "
u"already exists for this object type"
.format(name))
if definition_type == "assessment":
self.validate_assessment_title(name)
return value
class CustomAttributeMapable(object):
# pylint: disable=too-few-public-methods
# because this is a mixin
@declared_attr
def related_custom_attributes(self):
"""CustomAttributeValues that directly map to this object.
Used just to get the backrefs on the CustomAttributeValue object.
Returns:
a sqlalchemy relationship
"""
return db.relationship(
'CustomAttributeValue',
primaryjoin=lambda: (
(CustomAttributeValue.attribute_value == self.__name__) &
(CustomAttributeValue.attribute_object_id == self.id)),
foreign_keys="CustomAttributeValue.attribute_object_id",
backref='attribute_{0}'.format(self.__name__),
viewonly=True)
|
|
"""
Autopsy Forensic Browser
Copyright 2019-2020 Basis Technology Corp.
Contact: carrier <at> sleuthkit <dot> org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import jarray
import inspect
import time
import calendar
from datetime import datetime
from java.lang import System
from java.util.logging import Level
from java.io import File
from java.util import ArrayList
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.datamodel import Blackboard
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import TskCoreException
from org.sleuthkit.datamodel.blackboardutils import GeoArtifactsHelper
from org.sleuthkit.datamodel.blackboardutils.attributes import GeoWaypoints
from org.sleuthkit.datamodel.blackboardutils.attributes.GeoWaypoints import Waypoint
from org.sleuthkit.datamodel.blackboardutils.attributes import GeoTrackPoints
from org.sleuthkit.datamodel.blackboardutils.attributes.GeoTrackPoints import TrackPoint
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import FileIngestModule
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.ingest import ModuleDataEvent
# Based on gpxpy module: https://github.com/tkrajina/gpxpy
import gpxpy
import gpxpy.gpx
import gpxpy.parser
# to get a random filename to prevent race conditions
import uuid
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the analysis.
class GPXParserFileIngestModuleFactory(IngestModuleFactoryAdapter):
moduleName = "GPX Parser"
def getModuleDisplayName(self):
return self.moduleName
def getModuleDescription(self):
return "Module that extracts GEO data from GPX files."
def getModuleVersionNumber(self):
return "1.2"
def isFileIngestModuleFactory(self):
return True
def createFileIngestModule(self, ingestOptions):
return GPXParserFileIngestModule()
# File level ingest module.
class GPXParserFileIngestModule(FileIngestModule):
logger = Logger.getLogger(
GPXParserFileIngestModuleFactory.moduleName)
writeDebugMsgs = False
def log(self, level, msg):
self.logger.logp(level, self.__class__.__name__,
inspect.stack()[1][3], msg)
def __init__(self):
self.context = None
self.fileCount = 0
# Get the module name, it will be needed for adding attributes
self.moduleName = GPXParserFileIngestModuleFactory.moduleName
# Get the case database and its blackboard.
self.skCase = Case.getCurrentCase().getSleuthkitCase()
self.blackboard = self.skCase.getBlackboard()
# Check if a folder for this module is present in the case Temp directory.
# If not, create it.
self.dirName = os.path.join(
Case.getCurrentCase().getTempDirectory(), "GPX_Parser_Module")
try:
os.stat(self.dirName)
except:
os.mkdir(self.dirName)
# Where any setup and configuration is done.
def startUp(self, context):
self.context = context
self.fileCount = 0
# Where the file analysis is done.
def process(self, file):
if not file.getName().lower().endswith(".gpx"):
return IngestModule.ProcessResult.OK
# Create a temp file name. It appears that we cannot close and delete
# this file, but we can overwrite it for each file we need to process.
fileName = os.path.join(self.dirName, uuid.uuid4().hex + ".gpx")
# Create a GeoArtifactsHelper for this file.
geoArtifactHelper = GeoArtifactsHelper(
self.skCase, self.moduleName, None, file)
if self.writeDebugMsgs:
self.log(Level.INFO, "Processing " + file.getUniquePath() +
" (objID = " + str(file.getId()) + ")")
# Write the file so that it can be parsed by gpxpy.
localFile = File(fileName)
ContentUtils.writeToFile(file, localFile)
# Send the file to gpxpy for parsing.
gpxfile = open(fileName)
try:
gpx = gpxpy.parse(gpxfile)
if self.writeDebugMsgs:
self.log(Level.INFO, "Parsed " + file.getUniquePath() +
" (objID = " + str(file.getId()) + ")")
except Exception as e:
self.log(Level.WARNING, "Error parsing file " + file.getUniquePath() +
" (objID = " + str(file.getId()) + "):" + str(e))
return IngestModule.ProcessResult.ERROR
if gpx:
if self.writeDebugMsgs:
self.log(Level.INFO, "Processing tracks from " +
file.getUniquePath() + " (objID = " + str(file.getId()) + ")")
for track in gpx.tracks:
for segment in track.segments:
geoPointList = GeoTrackPoints()
for point in segment.points:
elevation = 0
if point.elevation != None:
elevation = point.elevation
timeStamp = 0
try:
if (point.time != None):
timeStamp = long(time.mktime(
point.time.timetuple()))
except Exception as e:
self.log(Level.WARNING, "Error getting track timestamp from " +
file.getUniquePath() + " (objID = " + str(file.getId()) + "):" + str(e))
geoPointList.addPoint(TrackPoint(
point.latitude, point.longitude, elevation, None, 0, 0, 0, timeStamp))
try:
geoArtifactHelper.addTrack("Track", geoPointList, None)
except Blackboard.BlackboardException as e:
self.log(Level.SEVERE, "Error posting GPS track artifact for " +
file.getUniquePath() + " (objID = " + str(file.getId()) + "):" + e.getMessage())
except TskCoreException as e:
self.log(Level.SEVERE, "Error creating GPS track artifact for " +
file.getUniquePath() + " (objID = " + str(file.getId()) + "):" + e.getMessage())
if self.writeDebugMsgs:
self.log(Level.INFO, "Processing waypoints from " +
file.getUniquePath() + " (objID = " + str(file.getId()) + ")")
for waypoint in gpx.waypoints:
try:
art = file.newArtifact(
BlackboardArtifact.ARTIFACT_TYPE.TSK_GPS_BOOKMARK)
attributes = ArrayList()
attributes.add(BlackboardAttribute(
BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_LATITUDE.getTypeID(), self.moduleName, waypoint.latitude))
attributes.add(BlackboardAttribute(
BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_LONGITUDE.getTypeID(), self.moduleName, waypoint.longitude))
attributes.add(BlackboardAttribute(
BlackboardAttribute.ATTRIBUTE_TYPE.TSK_FLAG.getTypeID(), self.moduleName, "Waypoint"))
attributes.add(BlackboardAttribute(
BlackboardAttribute.ATTRIBUTE_TYPE.TSK_NAME.getTypeID(), self.moduleName, waypoint.name))
attributes.add(BlackboardAttribute(
BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PROG_NAME.getTypeID(), self.moduleName, "GPXParser"))
art.addAttributes(attributes)
self.blackboard.postArtifact(art, self.moduleName)
except Blackboard.BlackboardException as e:
self.log(Level.SEVERE, "Error posting GPS bookmark artifact for " +
file.getUniquePath() + " (objID = " + str(file.getId()) + "):" + e.getMessage())
except TskCoreException as e:
self.log(Level.SEVERE, "Error creating GPS bookmark artifact for " +
file.getUniquePath() + " (objID = " + str(file.getId()) + "):" + e.getMessage())
if self.writeDebugMsgs:
self.log(Level.INFO, "Processing routes from " +
file.getUniquePath() + " (objID = " + str(file.getId()) + ")")
for route in gpx.routes:
geoWaypoints = GeoWaypoints()
for point in route.points:
geoWaypoints.addPoint(
Waypoint(point.latitude, point.longitude, point.elevation, point.name))
try:
geoArtifactHelper.addRoute(None, None, geoWaypoints, None)
except Blackboard.BlackboardException as e:
self.log("Error posting GPS route artifact for " + file.getUniquePath() +
" (objID = " + str(file.getId()) + "):" + e.getMessage())
except TskCoreException as e:
self.log(Level.SEVERE, "Error creating GPS route artifact for " +
file.getUniquePath() + " (objID = " + str(file.getId()) + "):" + e.getMessage())
self.fileCount += 1
return IngestModule.ProcessResult.OK
def shutDown(self):
message = IngestMessage.createMessage(
IngestMessage.MessageType.DATA, GPXParserFileIngestModuleFactory.moduleName,
str(self.fileCount) + " files found")
ingestServices = IngestServices.getInstance().postMessage(message)
|
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import matplotlib
import matplotlib.artist as martist
from matplotlib.artist import allow_rasterization
from matplotlib import docstring
import matplotlib.transforms as mtransforms
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.path as mpath
import matplotlib.cbook as cbook
import numpy as np
import warnings
rcParams = matplotlib.rcParams
class Spine(mpatches.Patch):
"""an axis spine -- the line noting the data area boundaries
Spines are the lines connecting the axis tick marks and noting the
boundaries of the data area. They can be placed at arbitrary
positions. See function:`~matplotlib.spines.Spine.set_position`
for more information.
The default position is ``('outward',0)``.
Spines are subclasses of class:`~matplotlib.patches.Patch`, and
inherit much of their behavior.
Spines draw a line or a circle, depending if
function:`~matplotlib.spines.Spine.set_patch_line` or
function:`~matplotlib.spines.Spine.set_patch_circle` has been
called. Line-like is the default.
"""
def __str__(self):
return "Spine"
@docstring.dedent_interpd
def __init__(self, axes, spine_type, path, **kwargs):
"""
- *axes* : the Axes instance containing the spine
- *spine_type* : a string specifying the spine type
- *path* : the path instance used to draw the spine
Valid kwargs are:
%(Patch)s
"""
super(Spine, self).__init__(**kwargs)
self.axes = axes
self.set_figure(self.axes.figure)
self.spine_type = spine_type
self.set_facecolor('none')
self.set_edgecolor(rcParams['axes.edgecolor'])
self.set_linewidth(rcParams['axes.linewidth'])
self.set_capstyle('projecting')
self.axis = None
self.set_zorder(2.5)
self.set_transform(self.axes.transData) # default transform
self._bounds = None # default bounds
self._smart_bounds = False
# Defer initial position determination. (Not much support for
# non-rectangular axes is currently implemented, and this lets
# them pass through the spines machinery without errors.)
self._position = None
if not isinstance(path, matplotlib.path.Path):
msg = "'path' must be an instance of 'matplotlib.path.Path'"
raise ValueError(msg)
self._path = path
# To support drawing both linear and circular spines, this
# class implements Patch behavior two ways. If
# self._patch_type == 'line', behave like a mpatches.PathPatch
# instance. If self._patch_type == 'circle', behave like a
# mpatches.Ellipse instance.
self._patch_type = 'line'
# Behavior copied from mpatches.Ellipse:
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = mtransforms.IdentityTransform()
def set_smart_bounds(self, value):
"""set the spine and associated axis to have smart bounds"""
self._smart_bounds = value
# also set the axis if possible
if self.spine_type in ('left', 'right'):
self.axes.yaxis.set_smart_bounds(value)
elif self.spine_type in ('top', 'bottom'):
self.axes.xaxis.set_smart_bounds(value)
self.stale = True
def get_smart_bounds(self):
"""get whether the spine has smart bounds"""
return self._smart_bounds
def set_patch_circle(self, center, radius):
"""set the spine to be circular"""
self._patch_type = 'circle'
self._center = center
self._width = radius * 2
self._height = radius * 2
self._angle = 0
# circle drawn on axes transform
self.set_transform(self.axes.transAxes)
self.stale = True
def set_patch_line(self):
"""set the spine to be linear"""
self._patch_type = 'line'
self.stale = True
# Behavior copied from mpatches.Ellipse:
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
assert self._patch_type == 'circle'
center = (self.convert_xunits(self._center[0]),
self.convert_yunits(self._center[1]))
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
self._patch_transform = mtransforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self._angle) \
.translate(*center)
def get_patch_transform(self):
if self._patch_type == 'circle':
self._recompute_transform()
return self._patch_transform
else:
return super(Spine, self).get_patch_transform()
def get_path(self):
return self._path
def _ensure_position_is_set(self):
if self._position is None:
# default position
self._position = ('outward', 0.0) # in points
self.set_position(self._position)
def register_axis(self, axis):
"""register an axis
An axis should be registered with its corresponding spine from
the Axes instance. This allows the spine to clear any axis
properties when needed.
"""
self.axis = axis
if self.axis is not None:
self.axis.cla()
self.stale = True
def cla(self):
"""Clear the current spine"""
self._position = None # clear position
if self.axis is not None:
self.axis.cla()
def is_frame_like(self):
"""return True if directly on axes frame
This is useful for determining if a spine is the edge of an
old style MPL plot. If so, this function will return True.
"""
self._ensure_position_is_set()
position = self._position
if cbook.is_string_like(position):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
if len(position) != 2:
raise ValueError("position should be 2-tuple")
position_type, amount = position
if position_type == 'outward' and amount == 0:
return True
else:
return False
def _adjust_location(self):
"""automatically set spine bounds to the view interval"""
if self.spine_type == 'circle':
return
if self._bounds is None:
if self.spine_type in ('left', 'right'):
low, high = self.axes.viewLim.intervaly
elif self.spine_type in ('top', 'bottom'):
low, high = self.axes.viewLim.intervalx
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
if self._smart_bounds:
# attempt to set bounds in sophisticated way
if low > high:
# handle inverted limits
low, high = high, low
viewlim_low = low
viewlim_high = high
del low, high
if self.spine_type in ('left', 'right'):
datalim_low, datalim_high = self.axes.dataLim.intervaly
ticks = self.axes.get_yticks()
elif self.spine_type in ('top', 'bottom'):
datalim_low, datalim_high = self.axes.dataLim.intervalx
ticks = self.axes.get_xticks()
# handle inverted limits
ticks = list(ticks)
ticks.sort()
ticks = np.array(ticks)
if datalim_low > datalim_high:
datalim_low, datalim_high = datalim_high, datalim_low
if datalim_low < viewlim_low:
# Data extends past view. Clip line to view.
low = viewlim_low
else:
# Data ends before view ends.
cond = (ticks <= datalim_low) & (ticks >= viewlim_low)
tickvals = ticks[cond]
if len(tickvals):
# A tick is less than or equal to lowest data point.
low = tickvals[-1]
else:
# No tick is available
low = datalim_low
low = max(low, viewlim_low)
if datalim_high > viewlim_high:
# Data extends past view. Clip line to view.
high = viewlim_high
else:
# Data ends before view ends.
cond = (ticks >= datalim_high) & (ticks <= viewlim_high)
tickvals = ticks[cond]
if len(tickvals):
# A tick is greater than or equal to highest data
# point.
high = tickvals[0]
else:
# No tick is available
high = datalim_high
high = min(high, viewlim_high)
else:
low, high = self._bounds
v1 = self._path.vertices
assert v1.shape == (2, 2), 'unexpected vertices shape'
if self.spine_type in ['left', 'right']:
v1[0, 1] = low
v1[1, 1] = high
elif self.spine_type in ['bottom', 'top']:
v1[0, 0] = low
v1[1, 0] = high
else:
raise ValueError('unable to set bounds for spine "%s"' %
self.spine_type)
@allow_rasterization
def draw(self, renderer):
self._adjust_location()
ret = super(Spine, self).draw(renderer)
self.stale = False
return ret
def _calc_offset_transform(self):
"""calculate the offset transform performed by the spine"""
self._ensure_position_is_set()
position = self._position
if cbook.is_string_like(position):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
assert len(position) == 2, "position should be 2-tuple"
position_type, amount = position
assert position_type in ('axes', 'outward', 'data')
if position_type == 'outward':
if amount == 0:
# short circuit commonest case
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif self.spine_type in ['left', 'right', 'top', 'bottom']:
offset_vec = {'left': (-1, 0),
'right': (1, 0),
'bottom': (0, -1),
'top': (0, 1),
}[self.spine_type]
# calculate x and y offset in dots
offset_x = amount * offset_vec[0] / 72.0
offset_y = amount * offset_vec[1] / 72.0
self._spine_transform = ('post',
mtransforms.ScaledTranslation(
offset_x,
offset_y,
self.figure.dpi_scale_trans))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif position_type == 'axes':
if self.spine_type in ('left', 'right'):
self._spine_transform = ('pre',
mtransforms.Affine2D.from_values(
# keep y unchanged, fix x at
# amount
0, 0, 0, 1, amount, 0))
elif self.spine_type in ('bottom', 'top'):
self._spine_transform = ('pre',
mtransforms.Affine2D.from_values(
# keep x unchanged, fix y at
# amount
1, 0, 0, 0, 0, amount))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif position_type == 'data':
if self.spine_type in ('right', 'top'):
# The right and top spines have a default position of 1 in
# axes coordinates. When specifying the position in data
# coordinates, we need to calculate the position relative to 0.
amount -= 1
if self.spine_type in ('left', 'right'):
self._spine_transform = ('data',
mtransforms.Affine2D().translate(
amount, 0))
elif self.spine_type in ('bottom', 'top'):
self._spine_transform = ('data',
mtransforms.Affine2D().translate(
0, amount))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
def set_position(self, position):
"""set the position of the spine
Spine position is specified by a 2 tuple of (position type,
amount). The position types are:
* 'outward' : place the spine out from the data area by the
specified number of points. (Negative values specify placing the
spine inward.)
* 'axes' : place the spine at the specified Axes coordinate (from
0.0-1.0).
* 'data' : place the spine at the specified data coordinate.
Additionally, shorthand notations define a special positions:
* 'center' -> ('axes',0.5)
* 'zero' -> ('data', 0.0)
"""
if position in ('center', 'zero'):
# special positions
pass
else:
if len(position) != 2:
raise ValueError("position should be 'center' or 2-tuple")
if position[0] not in ['outward', 'axes', 'data']:
msg = ("position[0] should be in [ 'outward' | 'axes' |"
" 'data' ]")
raise ValueError(msg)
self._position = position
self._calc_offset_transform()
self.set_transform(self.get_spine_transform())
if self.axis is not None:
self.axis.reset_ticks()
self.stale = True
def get_position(self):
"""get the spine position"""
self._ensure_position_is_set()
return self._position
def get_spine_transform(self):
"""get the spine transform"""
self._ensure_position_is_set()
what, how = self._spine_transform
if what == 'data':
# special case data based spine locations
data_xform = self.axes.transScale + \
(how + self.axes.transLimits + self.axes.transAxes)
if self.spine_type in ['left', 'right']:
result = mtransforms.blended_transform_factory(
data_xform, self.axes.transData)
elif self.spine_type in ['top', 'bottom']:
result = mtransforms.blended_transform_factory(
self.axes.transData, data_xform)
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
return result
if self.spine_type in ['left', 'right']:
base_transform = self.axes.get_yaxis_transform(which='grid')
elif self.spine_type in ['top', 'bottom']:
base_transform = self.axes.get_xaxis_transform(which='grid')
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
if what == 'identity':
return base_transform
elif what == 'post':
return base_transform + how
elif what == 'pre':
return how + base_transform
else:
raise ValueError("unknown spine_transform type: %s" % what)
def set_bounds(self, low, high):
"""Set the bounds of the spine."""
if self.spine_type == 'circle':
raise ValueError(
'set_bounds() method incompatible with circular spines')
self._bounds = (low, high)
self.stale = True
def get_bounds(self):
"""Get the bounds of the spine."""
return self._bounds
@classmethod
def linear_spine(cls, axes, spine_type, **kwargs):
"""
(staticmethod) Returns a linear :class:`Spine`.
"""
# all values of 13 get replaced upon call to set_bounds()
if spine_type == 'left':
path = mpath.Path([(0.0, 13), (0.0, 13)])
elif spine_type == 'right':
path = mpath.Path([(1.0, 13), (1.0, 13)])
elif spine_type == 'bottom':
path = mpath.Path([(13, 0.0), (13, 0.0)])
elif spine_type == 'top':
path = mpath.Path([(13, 1.0), (13, 1.0)])
else:
raise ValueError('unable to make path for spine "%s"' % spine_type)
result = cls(axes, spine_type, path, **kwargs)
result.set_visible(rcParams['axes.spines.{0}'.format(spine_type)])
return result
@classmethod
def circular_spine(cls, axes, center, radius, **kwargs):
"""
(staticmethod) Returns a circular :class:`Spine`.
"""
path = mpath.Path.unit_circle()
spine_type = 'circle'
result = cls(axes, spine_type, path, **kwargs)
result.set_patch_circle(center, radius)
return result
def set_color(self, c):
"""
Set the edgecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
# The facecolor of a spine is always 'none' by default -- let
# the user change it manually if desired.
self.set_edgecolor(c)
self.stale = True
|
|
#!/Users/Alex/MHacks9/FoodStickersMessages/bin/python3.5
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <esr@thyrsus.com>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack.insert(0, item)
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
return self.stack.pop(0)
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.push(dup)
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1>
[<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower>
<image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
imageFilter = getattr(ImageFilter, self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(imageFilter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset>
<image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
|
import flask
import pymysql
from donut.modules.courses import blueprint, helpers
YEARS = {1: 'Freshman', 2: 'Sophomore', 3: 'Junior', 4: 'Senior'}
WEEK_DAYS = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
SCHEDULER_START_HOUR = 8 # 8 AM
SCHEDULER_END_HOUR = 23 # 11 PM
SCHEDULER_HOUR_HEIGHT = 50 # px
@blueprint.route('/planner')
def planner():
return flask.render_template(
'planner.html', TERMS=helpers.TERM_NAMES, YEARS=YEARS)
@blueprint.route('/scheduler')
def scheduler():
return flask.render_template(
'scheduler.html',
TERMS=helpers.TERM_NAMES,
WEEK_DAYS=WEEK_DAYS,
START_HOUR=SCHEDULER_START_HOUR,
END_HOUR=SCHEDULER_END_HOUR,
HOUR_HEIGHT=SCHEDULER_HOUR_HEIGHT,
terms=helpers.get_terms())
@blueprint.route('/1/planner/courses')
def planner_courses():
return flask.jsonify(helpers.get_year_courses())
@blueprint.route('/1/planner/course/<int:course_id>/add/<int:year>')
def planner_add_course(course_id, year):
username = flask.session.get('username')
if not username:
return flask.jsonify({
'success': False,
'message': 'Must be logged in to save'
})
try:
helpers.add_planner_course(username, course_id, year)
except Exception as e:
if helpers.is_duplicate_error(e):
flask.current_app.logger.warning(f'Duplicate planner entry: {e}')
return flask.jsonify({
'success':
False,
'message':
'Cannot add a class twice in the same term'
})
else:
raise e
return flask.jsonify({'success': True})
@blueprint.route('/1/planner/course/<int:course_id>/drop/<int:year>')
def planner_drop_course(course_id, year):
username = flask.session.get('username')
if not username:
return flask.jsonify({
'success': False,
'message': 'Must be logged in to save'
})
helpers.drop_planner_course(username, course_id, year)
return flask.jsonify({'success': True})
@blueprint.route('/1/planner/<int:year>/<int:term>/placeholder', \
methods=('POST', ))
def planner_add_placeholder(year, term):
username = flask.session.get('username')
if not username:
return flask.jsonify({
'success': False,
'message': 'Must be logged in to save'
})
form = flask.request.form
course = form.get('course')
units = form.get('units')
if not (course and units):
return flask.jsonify({
'success': False,
'message': 'Missing course or units'
})
try:
units = float(units)
except ValueError:
return flask.jsonify({
'success': False,
'message': 'Invalid number of units'
})
placeholder_id = \
helpers.add_planner_placeholder(username, year, term, course, units)
return flask.jsonify({'success': True, 'id': placeholder_id})
@blueprint.route('/1/planner/placeholder/<int:id>', methods=('DELETE', ))
def planner_drop_placeholder(id):
username = flask.session.get('username')
if not username:
return flask.jsonify({
'success': False,
'message': 'Must be logged in to save'
})
if not helpers.drop_planner_placeholder(username, id):
return flask.jsonify({
'success': False,
'message': 'Invalid placeholder'
})
return flask.jsonify({'success': True})
@blueprint.route('/1/planner/courses/mine')
def planner_mine():
username = flask.session.get('username')
if username:
courses = helpers.get_user_planner_courses(username)
placeholders = helpers.get_user_planner_placeholders(username)
else:
courses = ()
placeholders = ()
return flask.jsonify({'courses': courses, 'placeholders': placeholders})
@blueprint.route('/1/scheduler/courses/<int:year>/<int:term>')
def scheduler_courses(year, term):
return flask.jsonify(helpers.get_scheduler_courses(year, term))
@blueprint.route('/1/scheduler/course/<int:course>/section/<int:section>/add')
def scheduler_add_section(course, section):
username = flask.session.get('username')
if not username:
return flask.jsonify({
'success': False,
'message': 'Must be logged in to save'
})
try:
helpers.add_scheduler_section(username, course, section)
except Exception as e:
if helpers.is_duplicate_error(e):
flask.current_app.logger.warning(f'Duplicate scheduler entry: {e}')
return flask.jsonify({
'success': False,
'message': 'Cannot add a section twice'
})
else:
raise e
return flask.jsonify({'success': True})
@blueprint.route('/1/scheduler/course/<int:course>/section/<int:section>/drop')
def scheduler_drop_section(course, section):
username = flask.session.get('username')
if not username:
return flask.jsonify({
'success': False,
'message': 'Must be logged in to save'
})
helpers.drop_scheduler_section(username, course, section)
return flask.jsonify({'success': True})
@blueprint.route('/1/scheduler/sections/<int:year>/<int:term>/mine')
def scheduler_mine(year, term):
username = flask.session.get('username')
if not username: return flask.jsonify(())
return flask.jsonify(
helpers.get_user_scheduler_sections(username, year, term))
@blueprint.route('/1/scheduler/edit_notes', methods=['POST'])
def edit_notes():
username = flask.session.get('username')
if not username:
return flask.jsonify({
'success': False,
'message': 'Must be logged in to save'
})
data = flask.request.get_json(force=True)
course = data.get('course')
section = data.get('section')
notes = data.get('notes')
if notes:
helpers.edit_notes(username, course, section, notes)
else:
helpers.delete_notes(username, course, section)
return flask.jsonify({'success': True})
@blueprint.route('/1/scheduler/notes/<int:course>/section/<int:section>')
def get_notes(course, section):
username = flask.session.get('username')
if not username: return flask.jsonify(None)
return flask.jsonify(helpers.get_notes(username, course, section))
|
|
""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
from _pytest._pluggy import HookspecMarker
hookspec = HookspecMarker("pytest")
# -------------------------------------------------------------------------
# Initialization hooks called for every plugin
# -------------------------------------------------------------------------
@hookspec(historic=True)
def pytest_addhooks(pluginmanager):
"""called at plugin registration time to allow adding new hooks via a call to
pluginmanager.add_hookspecs(module_or_class, prefix)."""
@hookspec(historic=True)
def pytest_namespace():
"""return dict of name->object to be made globally available in
the pytest namespace. This hook is called at plugin registration
time.
"""
@hookspec(historic=True)
def pytest_plugin_registered(plugin, manager):
""" a new pytest plugin got registered. """
@hookspec(historic=True)
def pytest_addoption(parser):
"""register argparse-style options and ini-style config values,
called once at the beginning of a test run.
.. note::
This function should be implemented only in plugins or ``conftest.py``
files situated at the tests root directory due to how pytest
:ref:`discovers plugins during startup <pluginorder>`.
:arg parser: To add command line options, call
:py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`.
To add ini-file values call :py:func:`parser.addini(...)
<_pytest.config.Parser.addini>`.
Options can later be accessed through the
:py:class:`config <_pytest.config.Config>` object, respectively:
- :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to
retrieve the value of a command line option.
- :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve
a value read from an ini-style file.
The config object is passed around on many internal objects via the ``.config``
attribute or can be retrieved as the ``pytestconfig`` fixture or accessed
via (deprecated) ``pytest.config``.
"""
@hookspec(historic=True)
def pytest_configure(config):
""" called after command line options have been parsed
and all plugins and initial conftest files been loaded.
This hook is called for every plugin.
"""
# -------------------------------------------------------------------------
# Bootstrapping hooks called for plugins registered early enough:
# internal and 3rd party plugins as well as directly
# discoverable conftest.py local plugins.
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_cmdline_parse(pluginmanager, args):
"""return initialized config object, parsing the specified args. """
def pytest_cmdline_preparse(config, args):
"""(deprecated) modify command line arguments before option parsing. """
@hookspec(firstresult=True)
def pytest_cmdline_main(config):
""" called for performing the main command line action. The default
implementation will invoke the configure hooks and runtest_mainloop. """
def pytest_load_initial_conftests(early_config, parser, args):
""" implements the loading of initial conftest files ahead
of command line option parsing. """
# -------------------------------------------------------------------------
# collection hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_collection(session):
""" perform the collection protocol for the given session. """
def pytest_collection_modifyitems(session, config, items):
""" called after collection has been performed, may filter or re-order
the items in-place."""
def pytest_collection_finish(session):
""" called after collection has been performed and modified. """
@hookspec(firstresult=True)
def pytest_ignore_collect(path, config):
""" return True to prevent considering this path for collection.
This hook is consulted for all files and directories prior to calling
more specific hooks.
"""
@hookspec(firstresult=True)
def pytest_collect_directory(path, parent):
""" called before traversing a directory for collection files. """
def pytest_collect_file(path, parent):
""" return collection Node or None for the given path. Any new node
needs to have the specified ``parent`` as a parent."""
# logging hooks for collection
def pytest_collectstart(collector):
""" collector starts collecting. """
def pytest_itemcollected(item):
""" we just collected a test item. """
def pytest_collectreport(report):
""" collector finished collecting. """
def pytest_deselected(items):
""" called for test items deselected by keyword. """
@hookspec(firstresult=True)
def pytest_make_collect_report(collector):
""" perform ``collector.collect()`` and return a CollectReport. """
# -------------------------------------------------------------------------
# Python test function related hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_pycollect_makemodule(path, parent):
""" return a Module collector or None for the given path.
This hook will be called for each matching test module path.
The pytest_collect_file hook needs to be used if you want to
create test modules for files that do not match as a test module.
"""
@hookspec(firstresult=True)
def pytest_pycollect_makeitem(collector, name, obj):
""" return custom item/collector for a python object in a module, or None. """
@hookspec(firstresult=True)
def pytest_pyfunc_call(pyfuncitem):
""" call underlying test function. """
def pytest_generate_tests(metafunc):
""" generate (multiple) parametrized calls to a test function."""
@hookspec(firstresult=True)
def pytest_make_parametrize_id(config, val):
"""Return a user-friendly string representation of the given ``val`` that will be used
by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``.
"""
# -------------------------------------------------------------------------
# generic runtest related hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_runtestloop(session):
""" called for performing the main runtest loop
(after collection finished). """
def pytest_itemstart(item, node):
""" (deprecated, use pytest_runtest_logstart). """
@hookspec(firstresult=True)
def pytest_runtest_protocol(item, nextitem):
""" implements the runtest_setup/call/teardown protocol for
the given test item, including capturing exceptions and calling
reporting hooks.
:arg item: test item for which the runtest protocol is performed.
:arg nextitem: the scheduled-to-be-next test item (or None if this
is the end my friend). This argument is passed on to
:py:func:`pytest_runtest_teardown`.
:return boolean: True if no further hook implementations should be invoked.
"""
def pytest_runtest_logstart(nodeid, location):
""" signal the start of running a single test item. """
def pytest_runtest_setup(item):
""" called before ``pytest_runtest_call(item)``. """
def pytest_runtest_call(item):
""" called to execute the test ``item``. """
def pytest_runtest_teardown(item, nextitem):
""" called after ``pytest_runtest_call``.
:arg nextitem: the scheduled-to-be-next test item (None if no further
test item is scheduled). This argument can be used to
perform exact teardowns, i.e. calling just enough finalizers
so that nextitem only needs to call setup-functions.
"""
@hookspec(firstresult=True)
def pytest_runtest_makereport(item, call):
""" return a :py:class:`_pytest.runner.TestReport` object
for the given :py:class:`pytest.Item` and
:py:class:`_pytest.runner.CallInfo`.
"""
def pytest_runtest_logreport(report):
""" process a test setup/call/teardown report relating to
the respective phase of executing a test. """
# -------------------------------------------------------------------------
# Fixture related hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_fixture_setup(fixturedef, request):
""" performs fixture setup execution. """
@hookspec(firstresult=True)
def pytest_fixture_finalize(fixturedef, finalizer):
""" performs fixture finalization. """
def pytest_fixture_post_finalizer(fixturedef):
""" called after fixture teardown, but before the cache is cleared so
the fixture result cache ``fixturedef.cached_result`` can
still be accessed."""
# -------------------------------------------------------------------------
# test session related hooks
# -------------------------------------------------------------------------
def pytest_sessionstart(session):
""" before session.main() is called. """
def pytest_sessionfinish(session, exitstatus):
""" whole test run finishes. """
def pytest_unconfigure(config):
""" called before test process is exited. """
# -------------------------------------------------------------------------
# hooks for customising the assert methods
# -------------------------------------------------------------------------
def pytest_assertrepr_compare(config, op, left, right):
"""return explanation for comparisons in failing assert expressions.
Return None for no custom explanation, otherwise return a list
of strings. The strings will be joined by newlines but any newlines
*in* a string will be escaped. Note that all but the first line will
be indented sligthly, the intention is for the first line to be a summary.
"""
# -------------------------------------------------------------------------
# hooks for influencing reporting (invoked from _pytest_terminal)
# -------------------------------------------------------------------------
def pytest_report_header(config, startdir):
""" return a string to be displayed as header info for terminal reporting."""
@hookspec(firstresult=True)
def pytest_report_teststatus(report):
""" return result-category, shortletter and verbose word for reporting."""
def pytest_terminal_summary(terminalreporter, exitstatus):
""" add additional section in terminal summary reporting. """
@hookspec(historic=True)
def pytest_logwarning(message, code, nodeid, fslocation):
""" process a warning specified by a message, a code string,
a nodeid and fslocation (both of which may be None
if the warning is not tied to a partilar node/location)."""
# -------------------------------------------------------------------------
# doctest hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_doctest_prepare_content(content):
""" return processed content for a given doctest"""
# -------------------------------------------------------------------------
# error handling and internal debugging hooks
# -------------------------------------------------------------------------
def pytest_internalerror(excrepr, excinfo):
""" called for internal errors. """
def pytest_keyboard_interrupt(excinfo):
""" called for keyboard interrupt. """
def pytest_exception_interact(node, call, report):
"""called when an exception was raised which can potentially be
interactively handled.
This hook is only called if an exception was raised
that is not an internal exception like ``skip.Exception``.
"""
def pytest_enter_pdb(config):
""" called upon pdb.set_trace(), can be used by plugins to take special
action just before the python debugger enters in interactive mode.
:arg config: pytest config object
:type config: _pytest.config.Config
"""
|
|
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.models.model import restore_original_dimensions, flatten
from ray.rllib.utils.annotations import PublicAPI
@PublicAPI
class ModelV2:
"""Defines a Keras-style abstract network model for use with RLlib.
Custom models should extend either TFModelV2 or TorchModelV2 instead of
this class directly.
Data flow:
obs -> forward() -> model_out
value_function() -> V(s)
Attributes:
obs_space (Space): observation space of the target gym env. This
may have an `original_space` attribute that specifies how to
unflatten the tensor into a ragged tensor.
action_space (Space): action space of the target gym env
num_outputs (int): number of output units of the model
model_config (dict): config for the model, documented in ModelCatalog
name (str): name (scope) for the model
framework (str): either "tf" or "torch"
"""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name, framework):
"""Initialize the model.
This method should create any variables used by the model.
"""
self.obs_space = obs_space
self.action_space = action_space
self.num_outputs = num_outputs
self.model_config = model_config
self.name = name or "default_model"
self.framework = framework
self._last_output = None
def get_initial_state(self):
"""Get the initial recurrent state values for the model.
Returns:
list of np.array objects, if any
"""
return []
def forward(self, input_dict, state, seq_lens):
"""Call the model with the given input tensors and state.
Any complex observations (dicts, tuples, etc.) will be unpacked by
__call__ before being passed to forward(). To access the flattened
observation tensor, refer to input_dict["obs_flat"].
This method can be called any number of times. In eager execution,
each call to forward() will eagerly evaluate the model. In symbolic
execution, each call to forward creates a computation graph that
operates over the variables of this model (i.e., shares weights).
Custom models should override this instead of __call__.
Arguments:
input_dict (dict): dictionary of input tensors, including "obs",
"obs_flat", "prev_action", "prev_reward", "is_training"
state (list): list of state tensors with sizes matching those
returned by get_initial_state + the batch dimension
seq_lens (Tensor): 1d tensor holding input sequence lengths
Returns:
(outputs, state): The model output tensor of size
[BATCH, num_outputs]
"""
raise NotImplementedError
def value_function(self):
"""Return the value function estimate for the most recent forward pass.
Returns:
value estimate tensor of shape [BATCH].
"""
raise NotImplementedError
def custom_loss(self, policy_loss, loss_inputs):
"""Override to customize the loss function used to optimize this model.
This can be used to incorporate self-supervised losses (by defining
a loss over existing input and output tensors of this model), and
supervised losses (by defining losses over a variable-sharing copy of
this model's layers).
You can find an runnable example in examples/custom_loss.py.
Arguments:
policy_loss (Tensor): scalar policy loss from the policy.
loss_inputs (dict): map of input placeholders for rollout data.
Returns:
Scalar tensor for the customized loss for this model.
"""
return policy_loss
def metrics(self):
"""Override to return custom metrics from your model.
The stats will be reported as part of the learner stats, i.e.,
info:
learner:
model:
key1: metric1
key2: metric2
Returns:
Dict of string keys to scalar tensors.
"""
return {}
def __call__(self, input_dict, state=None, seq_lens=None):
"""Call the model with the given input tensors and state.
This is the method used by RLlib to execute the forward pass. It calls
forward() internally after unpacking nested observation tensors.
Custom models should override forward() instead of __call__.
Arguments:
input_dict (dict): dictionary of input tensors, including "obs",
"prev_action", "prev_reward", "is_training"
state (list): list of state tensors with sizes matching those
returned by get_initial_state + the batch dimension
seq_lens (Tensor): 1d tensor holding input sequence lengths
Returns:
(outputs, state): The model output tensor of size
[BATCH, output_spec.size] or a list of tensors corresponding to
output_spec.shape_list, and a list of state tensors of
[BATCH, state_size_i].
"""
restored = input_dict.copy()
restored["obs"] = restore_original_dimensions(
input_dict["obs"], self.obs_space, self.framework)
if len(input_dict["obs"].shape) > 2:
restored["obs_flat"] = flatten(input_dict["obs"], self.framework)
else:
restored["obs_flat"] = input_dict["obs"]
with self.context():
res = self.forward(restored, state or [], seq_lens)
if ((not isinstance(res, list) and not isinstance(res, tuple))
or len(res) != 2):
raise ValueError(
"forward() must return a tuple of (output, state) tensors, "
"got {}".format(res))
outputs, state = res
try:
shape = outputs.shape
except AttributeError:
raise ValueError("Output is not a tensor: {}".format(outputs))
else:
if len(shape) != 2 or shape[1] != self.num_outputs:
raise ValueError(
"Expected output shape of [None, {}], got {}".format(
self.num_outputs, shape))
if not isinstance(state, list):
raise ValueError("State output is not a list: {}".format(state))
self._last_output = outputs
return outputs, state
def from_batch(self, train_batch, is_training=True):
"""Convenience function that calls this model with a tensor batch.
All this does is unpack the tensor batch to call this model with the
right input dict, state, and seq len arguments.
"""
input_dict = {
"obs": train_batch[SampleBatch.CUR_OBS],
"is_training": is_training,
}
if SampleBatch.PREV_ACTIONS in train_batch:
input_dict["prev_actions"] = train_batch[SampleBatch.PREV_ACTIONS]
if SampleBatch.PREV_REWARDS in train_batch:
input_dict["prev_rewards"] = train_batch[SampleBatch.PREV_REWARDS]
states = []
i = 0
while "state_in_{}".format(i) in train_batch:
states.append(train_batch["state_in_{}".format(i)])
i += 1
return self.__call__(input_dict, states, train_batch.get("seq_lens"))
def last_output(self):
"""Returns the last output returned from calling the model."""
return self._last_output
def context(self):
"""Returns a contextmanager for the current forward pass."""
return NullContextManager()
class NullContextManager:
"""No-op context manager"""
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
|
|
import datetime
import threading
import time
import cherrypy
from cherrypy.lib import cptools, http
class MemoryCache:
maxobjects = 1000
maxobj_size = 100000
maxsize = 10000000
delay = 600
def __init__(self):
self.clear()
t = threading.Thread(target=self.expire_cache, name='expire_cache')
self.expiration_thread = t
if hasattr(threading.Thread, "daemon"):
# Python 2.6+
t.daemon = True
else:
t.setDaemon(True)
t.start()
def clear(self):
"""Reset the cache to its initial, empty state."""
self.cache = {}
self.expirations = {}
self.tot_puts = 0
self.tot_gets = 0
self.tot_hist = 0
self.tot_expires = 0
self.tot_non_modified = 0
self.cursize = 0
def key(self):
return cherrypy.url(qs=cherrypy.request.query_string)
def expire_cache(self):
# expire_cache runs in a separate thread which the servers are
# not aware of. It's possible that "time" will be set to None
# arbitrarily, so we check "while time" to avoid exceptions.
# See tickets #99 and #180 for more information.
while time:
now = time.time()
for expiration_time, objects in self.expirations.items():
if expiration_time <= now:
for obj_size, obj_key in objects:
try:
del self.cache[obj_key]
self.tot_expires += 1
self.cursize -= obj_size
except KeyError:
# the key may have been deleted elsewhere
pass
del self.expirations[expiration_time]
time.sleep(0.1)
def get(self):
"""Return the object if in the cache, else None."""
self.tot_gets += 1
cache_item = self.cache.get(self.key(), None)
if cache_item:
self.tot_hist += 1
return cache_item
else:
return None
def put(self, obj):
if len(self.cache) < self.maxobjects:
# Size check no longer includes header length
obj_size = len(obj[2])
total_size = self.cursize + obj_size
# checks if there's space for the object
if (obj_size < self.maxobj_size and total_size < self.maxsize):
# add to the expirations list and cache
expiration_time = cherrypy.response.time + self.delay
obj_key = self.key()
bucket = self.expirations.setdefault(expiration_time, [])
bucket.append((obj_size, obj_key))
self.cache[obj_key] = obj
self.tot_puts += 1
self.cursize = total_size
def delete(self):
self.cache.pop(self.key(), None)
def get(invalid_methods=("POST", "PUT", "DELETE"), **kwargs):
"""Try to obtain cached output. If fresh enough, raise HTTPError(304).
If POST, PUT, or DELETE:
* invalidates (deletes) any cached response for this resource
* sets request.cached = False
* sets request.cacheable = False
else if a cached copy exists:
* sets request.cached = True
* sets request.cacheable = False
* sets response.headers to the cached values
* checks the cached Last-Modified response header against the
current If-(Un)Modified-Since request headers; raises 304
if necessary.
* sets response.status and response.body to the cached values
* returns True
otherwise:
* sets request.cached = False
* sets request.cacheable = True
* returns False
"""
request = cherrypy.request
# POST, PUT, DELETE should invalidate (delete) the cached copy.
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10.
if request.method in invalid_methods:
cherrypy._cache.delete()
request.cached = False
request.cacheable = False
return False
cache_data = cherrypy._cache.get()
request.cached = c = bool(cache_data)
request.cacheable = not c
if c:
response = cherrypy.response
s, h, b, create_time, original_req_headers = cache_data
# Check 'Vary' selecting headers. If any headers mentioned in "Vary"
# differ between the cached and current request, bail out and
# let the rest of CP handle the request. This should properly
# mimic the behavior of isolated caches as RFC 2616 assumes:
# "If the selecting request header fields for the cached entry
# do not match the selecting request header fields of the new
# request, then the cache MUST NOT use a cached entry to satisfy
# the request unless it first relays the new request to the origin
# server in a conditional request and the server responds with
# 304 (Not Modified), including an entity tag or Content-Location
# that indicates the entity to be used.
# TODO: can we store multiple variants based on Vary'd headers?
for header_element in h.elements('Vary'):
key = header_element.value
if original_req_headers[key] != request.headers.get(key, 'missing'):
request.cached = False
request.cacheable = True
return False
# Copy the response headers. See http://www.cherrypy.org/ticket/721.
response.headers = rh = http.HeaderMap()
for k in h:
dict.__setitem__(rh, k, dict.__getitem__(h, k))
# Add the required Age header
response.headers["Age"] = str(int(response.time - create_time))
try:
# Note that validate_since depends on a Last-Modified header;
# this was put into the cached copy, and should have been
# resurrected just above (response.headers = cache_data[1]).
cptools.validate_since()
except cherrypy.HTTPRedirect, x:
if x.status == 304:
cherrypy._cache.tot_non_modified += 1
raise
# serve it & get out from the request
response.status = s
response.body = b
return c
def tee_output():
def tee(body):
"""Tee response.body into a list."""
output = []
for chunk in body:
output.append(chunk)
yield chunk
# Might as well do this here; why cache if the body isn't consumed?
if response.headers.get('Pragma', None) != 'no-cache':
# save the cache data
body = ''.join(output)
vary = [he.value for he in
cherrypy.response.headers.elements('Vary')]
if vary:
sel_headers = dict([(k, v) for k, v
in cherrypy.request.headers.iteritems()
if k in vary])
else:
sel_headers = {}
cherrypy._cache.put((response.status, response.headers or {},
body, response.time, sel_headers))
response = cherrypy.response
response.body = tee(response.body)
def expires(secs=0, force=False):
"""Tool for influencing cache mechanisms using the 'Expires' header.
'secs' must be either an int or a datetime.timedelta, and indicates the
number of seconds between response.time and when the response should
expire. The 'Expires' header will be set to (response.time + secs).
If 'secs' is zero, the 'Expires' header is set one year in the past, and
the following "cache prevention" headers are also set:
'Pragma': 'no-cache'
'Cache-Control': 'no-cache, must-revalidate'
If 'force' is False (the default), the following headers are checked:
'Etag', 'Last-Modified', 'Age', 'Expires'. If any are already present,
none of the above response headers are set.
"""
response = cherrypy.response
headers = response.headers
cacheable = False
if not force:
# some header names that indicate that the response can be cached
for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'):
if indicator in headers:
cacheable = True
break
if not cacheable:
if isinstance(secs, datetime.timedelta):
secs = (86400 * secs.days) + secs.seconds
if secs == 0:
if force or "Pragma" not in headers:
headers["Pragma"] = "no-cache"
if cherrypy.request.protocol >= (1, 1):
if force or "Cache-Control" not in headers:
headers["Cache-Control"] = "no-cache, must-revalidate"
# Set an explicit Expires date in the past.
expiry = http.HTTPDate(1169942400.0)
else:
expiry = http.HTTPDate(response.time + secs)
if force or "Expires" not in headers:
headers["Expires"] = expiry
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib.estimator.python.estimator import head as head_lib
from tensorflow.contrib.estimator.python.estimator import multi_head as multi_head_lib
from tensorflow.core.framework import summary_pb2
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def _initialize_variables(test_case, scaffold):
scaffold.finalize()
test_case.assertIsNone(scaffold.init_feed_dict)
test_case.assertIsNone(scaffold.init_fn)
scaffold.init_op.run()
scaffold.ready_for_local_init_op.eval()
scaffold.local_init_op.run()
scaffold.ready_op.eval()
test_case.assertIsNotNone(scaffold.saver)
def _assert_simple_summaries(test_case, expected_summaries, summary_str,
tol=1e-6):
"""Assert summary the specified simple values.
Args:
test_case: test case.
expected_summaries: Dict of expected tags and simple values.
summary_str: Serialized `summary_pb2.Summary`.
tol: Tolerance for relative and absolute.
"""
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
test_case.assertAllClose(expected_summaries, {
v.tag: v.simple_value for v in summary.value
}, rtol=tol, atol=tol)
def _assert_no_hooks(test_case, spec):
test_case.assertAllEqual([], spec.training_chief_hooks)
test_case.assertAllEqual([], spec.training_hooks)
def _sigmoid(logits):
return 1 / (1 + np.exp(-logits))
class MultiHeadTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def test_no_heads(self):
with self.assertRaisesRegexp(
ValueError, r'Must specify heads\. Given: \[\]'):
multi_head_lib.multi_head(heads=[])
def test_head_name_missing(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3)
with self.assertRaisesRegexp(
ValueError, r'All given heads must have name specified\.'):
multi_head_lib.multi_head([head1, head2])
def test_head_weights_wrong_size(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
with self.assertRaisesRegexp(
ValueError,
r'heads and head_weights must have the same size\. '
r'Given len\(heads\): 2. Given len\(head_weights\): 1\.'):
multi_head_lib.multi_head([head1, head2], head_weights=[1.])
def test_name(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
self.assertEqual('head1_head2', multi_head.name)
def test_predict_two_heads_logits_dict(self):
"""Tests predict with logits as dict."""
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
logits = {
'head1': np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32),
'head2': np.array([[2., -2., 2.], [-3., 2., -2.]], dtype=np.float32)
}
expected_probabilities = {
'head1': _sigmoid(logits['head1']),
'head2': _sigmoid(logits['head2']),
}
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
(_DEFAULT_SERVING_KEY, 'head1', 'classification/head1', 'predict/head1',
'head2', 'classification/head2', 'predict/head2'),
spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(
logits['head1'],
predictions[('head1', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
logits['head2'],
predictions[('head2', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
expected_probabilities['head1'],
predictions[('head1', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head2'],
predictions[('head2', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs['head1'].scores))
self.assertAllClose(
expected_probabilities['head2'],
sess.run(spec.export_outputs['head2'].scores))
def test_predict_two_heads_logits_tensor(self):
"""Tests predict with logits as Tensor."""
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
logits = np.array(
[[-1., 1., 2., -2., 2.], [-1.5, 1., -3., 2., -2.]], dtype=np.float32)
expected_logits1 = np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32)
expected_logits2 = np.array([[2., -2., 2.], [-3., 2., -2.]],
dtype=np.float32)
expected_probabilities = {
'head1': _sigmoid(expected_logits1),
'head2': _sigmoid(expected_logits2),
}
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
(_DEFAULT_SERVING_KEY, 'head1', 'classification/head1', 'predict/head1',
'head2', 'classification/head2', 'predict/head2'),
spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(
expected_logits1,
predictions[('head1', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
expected_logits2,
predictions[('head2', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
expected_probabilities['head1'],
predictions[('head1', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head2'],
predictions[('head2', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs['head1'].scores))
self.assertAllClose(
expected_probabilities['head2'],
sess.run(spec.export_outputs['head2'].scores))
def test_predict_two_heads_logits_tensor_multi_dim(self):
"""Tests predict with multi-dimensional logits of shape [2, 2, 5]."""
head1 = head_lib.regression_head(label_dimension=2, name='head1')
head2 = head_lib.regression_head(label_dimension=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
logits = np.array(
[[[-1., 1., 2., -2., 2.], [-1., 1., 2., -2., 2.]],
[[-1.5, 1., -3., 2., -2.], [-1.5, 1., -3., 2., -2.]]],
dtype=np.float32)
expected_logits1 = np.array(
[[[-1., 1.], [-1., 1.]],
[[-1.5, 1.], [-1.5, 1.]]],
dtype=np.float32)
expected_logits2 = np.array(
[[[2., -2., 2.], [2., -2., 2.]],
[[-3., 2., -2.], [-3., 2., -2.]]],
dtype=np.float32)
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
(_DEFAULT_SERVING_KEY, 'head1', 'regression/head1', 'predict/head1',
'head2', 'regression/head2', 'predict/head2'),
spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(
expected_logits1,
predictions[('head1', prediction_keys.PredictionKeys.PREDICTIONS)])
self.assertAllClose(
expected_logits2,
predictions[('head2', prediction_keys.PredictionKeys.PREDICTIONS)])
self.assertAllClose(
expected_logits1,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].value))
self.assertAllClose(
expected_logits1,
sess.run(spec.export_outputs['head1'].value))
self.assertAllClose(
expected_logits2,
sess.run(spec.export_outputs['head2'].value))
def test_eval_two_heads_with_weights(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# head1: expected_unweighted_loss = [[10., 10.], [15., 0.]]
# head2: expected_unweighted_loss = [[20., 20., 20.], [30., 0., 0]]
# Average over classes, weighted sum over batch and heads.
expected_loss_head1 = 17.5
expected_loss_head2 = 30.0
expected_loss = 1. * expected_loss_head1 + 2. * expected_loss_head2
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS + '/head1': expected_loss_head1,
keys.LOSS + '/head2': expected_loss_head2,
# Average loss over examples.
keys.LOSS_MEAN + '/head1': expected_loss_head1 / 2,
keys.LOSS_MEAN + '/head2': expected_loss_head2 / 2,
# auc and auc_pr cannot be reliably calculated for only 4-6 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC + '/head1': 0.1667,
keys.AUC + '/head2': 0.3333,
keys.AUC_PR + '/head1': 0.49999964,
keys.AUC_PR + '/head2': 0.33333313,
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol,
atol=tol)
def test_train_create_loss_one_head(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
multi_head = multi_head_lib.multi_head([head1])
logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}
labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}
loss = multi_head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
tol = 1e-3
with self.test_session():
# Unreduced loss of the head is [[(10 + 10) / 2], (15 + 0) / 2]
# (averaged over classes, sum-reduced over examples).
self.assertAllClose(17.5, loss.eval(), rtol=tol, atol=tol)
def test_train_create_loss_two_heads_with_weights(self):
# Use different example weighting for each head weighting.
weights1 = np.array([[1.], [2.]], dtype=np.float32)
weights2 = np.array([[2.], [3.]])
head1 = head_lib.multi_label_head(n_classes=2, name='head1',
weight_column='weights1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2',
weight_column='weights2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
training_loss, unreduced_losses, weights, _ = multi_head.create_loss(
features={
'x': np.array(((42,),), dtype=np.int32),
'weights1': weights1,
'weights2': weights2
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
tol = 1e-3
with self.test_session():
# loss of the first head is [[(10 + 10) / 2], [(15 + 0) / 2]]
# = [10, 7.5]
# training_loss = 1 * 10 + 2 * 7.5 = 25
# head-weighted unreduced_loss = 1 * [10, 7.5]
self.assertAllClose(
[[10.], [7.5]], unreduced_losses['head1'].eval(), rtol=tol, atol=tol)
# loss of the second head is [[(20 + 20 + 20) / 3], [(30 + 0 + 0) / 3]]
# = [20, 10]
# training_loss = 2 * 20 + 3 * 10 = 70
# head-weighted unreduced_loss = 2 * [20, 10]
self.assertAllClose(
[[40.], [20.]], unreduced_losses['head2'].eval(), rtol=tol, atol=tol)
# head-weighted training_loss = 1 * 25 + 2 * 70 = 165
self.assertAllClose(165, training_loss.eval(), rtol=tol, atol=tol)
# head-weighted example weights
self.assertAllClose(
[[1.], [2.]], weights['head1'].eval(), rtol=tol, atol=tol)
self.assertAllClose(
[[4.], [6.]], weights['head2'].eval(), rtol=tol, atol=tol)
def test_train_create_loss_logits_tensor(self):
"""Tests create_loss with logits Tensor."""
weights1 = np.array([[1.], [2.]], dtype=np.float32)
weights2 = np.array([[2.], [3.]])
head1 = head_lib.multi_label_head(n_classes=2, name='head1',
weight_column='weights1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2',
weight_column='weights2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = np.array([[-10., 10., 20., -20., 20.],
[-15., 10., -30., 20., -20.]], dtype=np.float32)
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
training_loss, unreduced_losses, weights, _ = multi_head.create_loss(
features={
'x': np.array(((42,),), dtype=np.int32),
'weights1': weights1,
'weights2': weights2
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
tol = 1e-3
with self.test_session():
# loss of the first head is [[(10 + 10) / 2], [(15 + 0) / 2]]
# = [10, 7.5]
# training_loss = 1 * 10 + 2 * 7.5 = 25
# head-weighted unreduced_loss = 1 * [10, 7.5]
self.assertAllClose(
[[10.], [7.5]], unreduced_losses['head1'].eval(), rtol=tol, atol=tol)
# loss of the second head is [[(20 + 20 + 20) / 3], [(30 + 0 + 0) / 3]]
# = [20, 10]
# training_loss = 2 * 20 + 3 * 10 = 70
# head-weighted unreduced_loss = 2 * [20, 10]
self.assertAllClose(
[[40.], [20.]], unreduced_losses['head2'].eval(), rtol=tol, atol=tol)
# head-weighted training_loss = 1 * 25 + 2 * 70 = 165
self.assertAllClose(165, training_loss.eval(), rtol=tol, atol=tol)
# head-weighted example weights
self.assertAllClose(
[[1.], [2.]], weights['head1'].eval(), rtol=tol, atol=tol)
self.assertAllClose(
[[4.], [6.]], weights['head2'].eval(), rtol=tol, atol=tol)
def test_train_create_loss_logits_tensor_multi_dim(self):
"""Tests create_loss with multi-dimensional logits of shape [2, 2, 5]."""
head1 = head_lib.regression_head(label_dimension=2, name='head1')
head2 = head_lib.regression_head(label_dimension=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
logits = np.array(
[[[-1., 1., 2., -2., 2.], [-1., 1., 2., -2., 2.]],
[[-1.5, 1.5, -2., 2., -2.], [-1.5, 1.5, -2., 2., -2.]]],
dtype=np.float32)
labels = {
'head1': np.array([[[1., 0.], [1., 0.]],
[[1.5, 1.5], [1.5, 1.5]]], dtype=np.float32),
'head2': np.array([[[0., 1., 0.], [0., 1., 0.]],
[[2., 2., 0.], [2., 2., 0.]]], dtype=np.float32),
}
# Loss for the first head:
# loss1 = (1+1)^2 + (0-1)^2 + (1+1)^2 + (0-1)^2 +
# (1.5+1.5)^2 + (1.5-1.5)^2 + (1.5+1.5)^2 + (1.5-1.5)^2
# = 28
# Loss for the second head:
# loss2 = (0-2)^2 + (1+2)^2 + (0-2)^2 + (0-2)^2 + (1+2)^2 + (0-2)^2 +
# (2+2)^2 + (2-2)^2 + (0+2)^2 + (2+2)^2 + (2-2)^2 + (0+2)^2
# = 74
expected_training_loss = 28. + 74.
training_loss = multi_head.create_loss(
features={},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
tol = 1e-3
with self.test_session():
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
def test_train_one_head(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
multi_head = multi_head_lib.multi_head([head1])
logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}
labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, sum over weights.
expected_loss = 17.5
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS + '/head1': expected_loss,
# Average loss over examples.
metric_keys.MetricKeys.LOSS_MEAN + '/head1': expected_loss / 2,
}, summary_str, tol)
def test_train_two_heads_with_weights(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# head1: expected_unweighted_loss = [[10., 10.], [15., 0.]]
# head2: expected_unweighted_loss = [[20., 20., 20.], [30., 0., 0]]
# Average over classes, weighted sum over batch and heads.
expected_loss_head1 = 17.5
expected_loss_head2 = 30.0
expected_loss = 1. * expected_loss_head1 + 2. * expected_loss_head2
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS + '/head1': expected_loss_head1,
metric_keys.MetricKeys.LOSS + '/head2': expected_loss_head2,
# Average loss over examples.
metric_keys.MetricKeys.LOSS_MEAN + '/head1': expected_loss_head1 / 2,
metric_keys.MetricKeys.LOSS_MEAN + '/head2': expected_loss_head2 / 2,
}, summary_str, tol)
if __name__ == '__main__':
test.main()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import threading
import numpy as np
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import logging
from tensorflow.python.util import compat
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None):
"""Runs operations in the session. See `Session.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(fetched_vals[0], fetched_vals[1],
fetched_vals[2]
if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None,
the default graph will be used.
config: (Optional) ConfigProto proto used to configure the session.
Raises:
RuntimeError: If an error occurs while creating the TensorFlow
session.
"""
if graph is None:
self._graph = ops.get_default_graph()
else:
self._graph = graph
self._opened = False
self._closed = False
self._current_version = 0
self._extend_lock = threading.Lock()
self._target = target
self._session = None
opts = tf_session.TF_NewSessionOptions(target=target, config=config)
try:
status = tf_session.TF_NewStatus()
try:
self._session = tf_session.TF_NewSession(opts, status)
if tf_session.TF_GetCode(status) != 0:
raise RuntimeError(compat.as_text(tf_session.TF_Message(status)))
finally:
tf_session.TF_DeleteStatus(status)
finally:
tf_session.TF_DeleteSessionOptions(opts)
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
RuntimeError: If an error occurs while closing the session.
"""
with self._extend_lock:
if self._opened and not self._closed:
self._closed = True
try:
status = tf_session.TF_NewStatus()
tf_session.TF_CloseSession(self._session, status)
if tf_session.TF_GetCode(status) != 0:
raise RuntimeError(compat.as_text(tf_session.TF_Message(status)))
finally:
tf_session.TF_DeleteStatus(status)
def __del__(self):
self.close()
try:
status = tf_session.TF_NewStatus()
if self._session is not None:
tf_session.TF_DeleteSession(self._session, status)
if tf_session.TF_GetCode(status) != 0:
raise RuntimeError(compat.as_text(tf_session.TF_Message(status)))
self._session = None
finally:
tf_session.TF_DeleteStatus(status)
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def()
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
[`Operation.run()`](../../api_docs/python/framework.md#Operation.run) or
[`Tensor.run()`](../../api_docs/python/framework.md#Tensor.run) should be
executed in this session.
```python
c = tf.constant(..)
sess = tf.Session()
with sess.as_default():
assert tf.get_default_session() is sess
print(c.eval())
```
To get the current default session, use
[`tf.get_default_session()`](#get_default_session).
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default graph is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
# Eventually, this registration could be opened up to support custom
# Tensor expansions. Expects tuples of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
# Conceptually, fetch_fn describes how to expand fetch into its
# component Tensors and how to contracting the fetched results back into
# a single return value. feed_fn describes how to unpack a single fed
# value and map it to feeds of a Tensor and its corresponding value.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(ops.SparseTensor,
lambda fetch: (
[fetch.indices, fetch.values, fetch.shape],
lambda fetched_vals: ops.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(zip(
[feed.indices, feed.values, feed.shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: (
[fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape],
_get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None
else [feed.values, feed.indices, feed.dense_shape]),
# The default catches all types and performs no expansions.
(object,
lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)],
lambda feed: [feed])]
# pylint: enable=g-long-lambda
def run(self, fetches, feed_dict=None):
"""Runs the operations and evaluates the tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a list of graph elements or a single
graph element, and these determine the return value of this
method. A graph element can be one of the following types:
* If the *i*th element of `fetches` is an
[`Operation`](../../api_docs/python/framework.md#Operation), the *i*th
return value will be `None`.
* If the *i*th element of `fetches` is a
[`Tensor`](../../api_docs/python/framework.md#Tensor), the *i*th return
value will be a numpy ndarray containing the value of that tensor.
* If the *i*th element of `fetches` is a
[`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor),
the *i*th return value will be a
[`SparseTensorValue`](../../api_docs/python/sparse_ops.md#SparseTensorValue)
containing the value of that sparse tensor.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a [`Tensor`](../../api_docs/python/framework.md#Tensor), the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
[placeholder](../../api_docs/python/io_ops.md#placeholder), the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
[`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor),
the value should be a
[`SparseTensorValue`](../../api_docs/python/sparse_ops.md#SparseTensorValue).
Args:
fetches: A single graph element, or a list of graph elements
(described above).
feed_dict: A dictionary that maps graph elements to values
(described above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list (described above).
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
return self._run(None, fetches, feed_dict)
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
Below is a simple example:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, or a list of graph elements
(described above).
feed_dict: A dictionary that maps graph elements to values
(described above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list (described above).
"""
return self._run(handle, fetches, feed_dict)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in BaseSession._REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r'
% (feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Validate and process fetches.
unique_fetches, target_list, _ = self._process_fetches(fetches)
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,
allow_operation=False)
feed_list.append(compat.as_bytes(subfeed_t.name))
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: '
+ e.message)
e.args = (e.message,)
raise e
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
return tf_session.TF_PRunSetup(session, feed_list, fetch_list,
target_list)
return self._do_call(_setup_fn, self._session, feed_list, unique_fetches,
target_list)
def _process_fetches(self, fetches):
"""Validate and process fetches."""
def _fetch_fn(fetch):
for tensor_type, fetch_fn, _, _ in BaseSession._REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
return fetch_fn(fetch)
raise TypeError('Fetch argument %r has invalid type %r'
% (fetch, type(fetch)))
# Validate and process fetches.
is_list_fetch = isinstance(fetches, (list, tuple))
if not is_list_fetch:
fetches = [fetches]
unique_fetch_targets = set()
target_list = []
fetch_info = []
for fetch in fetches:
subfetches, fetch_contraction_fn = _fetch_fn(fetch)
subfetch_names = []
for subfetch in subfetches:
try:
fetch_t = self.graph.as_graph_element(subfetch, allow_tensor=True,
allow_operation=True)
if isinstance(fetch_t, ops.Operation):
target_list.append(compat.as_bytes(fetch_t.name))
else:
subfetch_names.append(compat.as_bytes(fetch_t.name))
except TypeError as e:
raise TypeError('Fetch argument %r of %r has invalid type %r, '
'must be a string or Tensor. (%s)'
% (subfetch, fetch, type(subfetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r of %r cannot be interpreted as a '
'Tensor. (%s)' % (subfetch, fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r of %r cannot be interpreted as a '
'Tensor. (%s)' % (subfetch, fetch, str(e)))
unique_fetch_targets.update(subfetch_names)
fetch_info.append((subfetch_names, fetch_contraction_fn))
unique_fetch_targets = list(unique_fetch_targets)
return unique_fetch_targets, target_list, fetch_info
def _run(self, handle, fetches, feed_dict):
"""Perform either run or partial_run, depending the exitence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in BaseSession._REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r'
% (feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Validate and process fetches.
unique_fetches, target_list, fetch_info = self._process_fetches(fetches)
# Create request.
feed_dict_string = {}
# Validate and process feed_dict.
if feed_dict:
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,
allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: '
+ e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, or numpy ndarrays.')
np_val = np.array(subfeed_val, dtype=subfeed_t.dtype.as_numpy_dtype)
if subfeed_t.op.type == 'Placeholder':
if not subfeed_t.get_shape().is_compatible_with(np_val.shape):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r'
% (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
feed_dict_string[compat.as_bytes(subfeed_t.name)] = np_val
# Run request and get response.
results = self._do_run(handle, target_list, unique_fetches,
feed_dict_string)
# User may have fetched the same tensor multiple times, but we
# only fetch them from the runtime once. Furthermore, they may
# be wrapped as a tuple of tensors. Here we map the results back
# to what the client asked for.
fetched_results = dict(zip(unique_fetches, results))
ret = []
for fetch_names, fetch_contraction_fn in fetch_info:
if fetch_names:
fetched_vals = [fetched_results[name] for name in fetch_names]
ret.append(fetch_contraction_fn(fetched_vals))
else:
ret.append(None)
if isinstance(fetches, (list, tuple)):
return ret
else:
return ret[0]
# Captures the name of a node in an error status.
_NODEDEF_NAME_RE = re.compile(r'\[\[Node: ([^ ]*?) =')
def _do_run(self, handle, target_list, fetch_list, feed_dict):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of byte arrays corresponding to names of tensors
or operations to be run to, but not fetched.
fetch_list: A list of byte arrays corresponding to names of tensors to
be fetched and operations to be run.
feed_dict: A dictionary that maps tensor names (as byte arrays) to
numpy ndarrays.
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
"""
def _run_fn(session, feed_dict, fetch_list, target_list):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
return tf_session.TF_Run(session, feed_dict, fetch_list, target_list)
def _prun_fn(session, handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
return tf_session.TF_PRun(session, handle, feed_dict, fetch_list)
if handle is None:
return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
target_list)
else:
return self._do_call(_prun_fn, self._session, handle, feed_dict,
fetch_list)
def _do_call(self, fn, *args):
try:
return fn(*args)
except tf_session.StatusNotOK as e:
error_message = compat.as_text(e.error_message)
m = BaseSession._NODEDEF_NAME_RE.search(error_message)
node_def = None
op = None
if m is not None:
node_name = m.group(1)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
# pylint: disable=protected-access
raise errors._make_specific_exception(node_def, op, error_message,
e.code)
# pylint: enable=protected-access
def _extend_graph(self):
# Ensure any changes to the graph are reflected in the runtime.
with self._extend_lock:
if self._graph.version > self._current_version:
graph_def = self._graph.as_graph_def(
from_version=self._current_version)
try:
status = tf_session.TF_NewStatus()
tf_session.TF_ExtendGraph(
self._session, graph_def.SerializeToString(), status)
if tf_session.TF_GetCode(status) != 0:
raise RuntimeError(compat.as_text(tf_session.TF_Message(status)))
self._opened = True
finally:
tf_session.TF_DeleteStatus(status)
self._current_version = self._graph.version
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.Session()
# Evaluate the tensor `c`.
print(sess.run(c))
```
A session may own resources, such as
[variables](../../api_docs/python/state_ops.md#Variable), [queues](../../api_docs/python/io_ops.md#QueueBase),
and [readers](../../api_docs/python/io_ops.md#ReaderBase). It is important to release
these resources when they are no longer required. To do this, either
invoke the [`close()`](#Session.close) method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.Session() as sess:
sess.run(...)
```
The [`ConfigProto`]
(https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
```
@@__init__
@@run
@@close
@@graph
@@as_default
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine. At present, no value
other than the empty string is supported.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
self._context_managers = [self.graph.as_default(), self.as_default()]
def __enter__(self):
for context_manager in self._context_managers:
context_manager.__enter__()
return self
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
for context_manager in reversed(self._context_managers):
context_manager.__exit__(exec_type, exec_value, exec_tb)
self.close()
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods [`Tensor.eval()`](../../api_docs/python/framework.md#Tensor.eval)
and [`Operation.run()`](../../api_docs/python/framework.md#Operation.run)
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
@@__init__
@@close
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine. At present, no value
other than the empty string is supported.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
super(InteractiveSession, self).__init__(target, graph, config)
self._default_session = self.as_default()
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_session.__exit__(None, None, None)
|
|
# mininode.py - Bitcoin P2P network half-a-node
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a bitcoin node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# bitcoin/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import binascii
import time
import sys
import random
import cStringIO
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
BIP0031_VERSION = 60000
MY_VERSION = 60001 # past bip-31 for ping/pong
MY_SUBVERSION = "/python-mininode-tester:0.0.1/"
MAX_INV_SZ = 50000
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return chr(len(s)) + s
elif len(s) < 0x10000:
return chr(253) + struct.pack("<H", len(s)) + s
elif len(s) < 0x100000000L:
return chr(254) + struct.pack("<I", len(s)) + s
return chr(255) + struct.pack("<Q", len(s)) + s
def deser_uint256(f):
r = 0L
for i in xrange(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = ""
for i in xrange(8):
rs += struct.pack("<I", u & 0xFFFFFFFFL)
u >>= 32
return rs
def uint256_from_str(s):
r = 0L
t = struct.unpack("<IIIIIIII", s[:32])
for i in xrange(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFFL) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = "\x00" * 10 + "\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block"}
def __init__(self, t=0, h=0L):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = ""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig="", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = ""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), binascii.hexlify(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = ""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // 100000000, self.nValue % 100000000,
binascii.hexlify(self.scriptPubKey))
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = hash256(self.serialize())[::-1].encode('hex_codec')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000L * 100000000L:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = hash256(r)[::-1].encode('hex_codec')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = ""
r += super(CBlock, self).serialize()
r += ser_vector(self.vtx)
return r
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
while len(hashes) > 1:
newhashes = []
for i in xrange(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = ""
self.strStatusBar = ""
self.strReserved = ""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = ""
self.vchSig = ""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = ""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
# Objects that correspond to messages on the wire
class msg_version(object):
command = "version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = time.time()
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = "verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = "addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = "alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = ""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = "inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = "getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = "getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = ""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = "tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = "block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
class msg_getaddr(object):
command = "getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = "ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = "ping"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = "pong"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = "mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = "sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = "getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = ""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = "headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = "reject"
def __init__(self):
self.message = ""
self.code = ""
self.reason = ""
self.data = 0L
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.message == "block" or self.message == "tx"):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.message == "block" or self.message == "tx"):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Derived classes should call this function once to set the message map
# which associates the derived classes' functions to incoming messages
def create_callback_map(self):
self.cbmap = {
"version": self.on_version,
"verack": self.on_verack,
"addr": self.on_addr,
"alert": self.on_alert,
"inv": self.on_inv,
"getdata": self.on_getdata,
"getblocks": self.on_getblocks,
"tx": self.on_tx,
"block": self.on_block,
"getaddr": self.on_getaddr,
"ping": self.on_ping,
"pong": self.on_pong,
"headers": self.on_headers,
"getheaders": self.on_getheaders,
"reject": self.on_reject,
"mempool": self.on_mempool
}
def deliver(self, conn, message):
with mininode_lock:
try:
self.cbmap[message.command](conn, message)
except:
print "ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0])
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
"version": msg_version,
"verack": msg_verack,
"addr": msg_addr,
"alert": msg_alert,
"inv": msg_inv,
"getdata": msg_getdata,
"getblocks": msg_getblocks,
"tx": msg_tx,
"block": msg_block,
"getaddr": msg_getaddr,
"ping": msg_ping,
"pong": msg_pong,
"headers": msg_headers,
"getheaders": msg_getheaders,
"reject": msg_reject,
"mempool": msg_mempool
}
MAGIC_BYTES = {
"mainnet": "\xf9\xbe\xb4\xd9", # mainnet
"testnet3": "\x0b\x11\x09\x07", # testnet3
"regtest": "\xfa\xbf\xb5\xda" # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=1):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = ""
self.recvbuf = ""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print 'MiniNode: Connecting to Bitcoin Node IP # ' + dstaddr + ':' \
+ str(dstport)
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = ""
self.sendbuf = ""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = cStringIO.StringIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += "\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == "version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap['ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap['ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
|
# -*- coding:utf-8 -*-
# Transform given Active Display Coordinate (ADC) to Media Coordinate (MC).
# raw/pororo*.tsv -> data/pororo*.tsv
#
# @date 2014-03-30
# @author Jin-Hwa Kim (jhkim@bi.snu.ac.kr)
import os, sys, getopt, glob, re
import numpy as np
from sets import Set
import common
# default options
verbose = False
DEBUG = True
# Get the transformation matrix for mapping Tobbi Snapshot coordinate system
# to a unit space coordinate system.
# @param sourceCoords np.array of x, y pairs
# @param targetCoords np.array of x, y pairs
def getTransformationMatrix(sourceCoords, targetCoords):
# AX = y; A = yX'pinv(XX')
# Using a linear algebra library
Q = np.dot(sourceCoords, np.transpose(sourceCoords))
Tr = np.dot(np.dot(targetCoords, np.transpose(sourceCoords)), \
np.linalg.pinv(Q))
return Tr
# Get a transformed coordinate using a given transformation matrix.
def getUnitCoord(Tr, x, y):
# Encapsulate x and y to a x, y pair
coord = encapsulateCoord(x, y)
# Matrix * matrix
unitCoord = np.dot(Tr, coord)
return unitCoord
# encapsulate to a coordinate
# It follows the linear algebra library convention.
def encapsulateCoord(x, y):
return np.array([x, y, 1])
# encapsulate to a coordinate matrix
# It follows the linear algebra library convention.
def encapsulateCoords(listOfXYs):
return np.transpose(np.array(listOfXYs))
# Add MCx MCy to a given data.
def preprocess(source_filename, output_filename, snapshotCoords, length, delay, skip = 0):
(path, filename, name, extension) = common.pfne(source_filename)
idx = 0
Tr = getTobbiTransformationMatrix(snapshotCoords)
with open(source_filename, 'rU') as f, open(output_filename, 'w') as w:
# Print a header for the output file.
_printHeader(w)
# Read lines.
header = f.readline().split('\n')[0]
wholefile = f.readlines()
for line in wholefile:
# parse the line
timestamp, event, fixX, fixY, gzX, gzY = parseTobbiLine(header, line.split('\n')[0])
# skip
if int(timestamp) < skip :
continue
# delay
if int(timestamp) < delay :
continue
else :
timestamp = int(timestamp) - delay
# length
if timestamp > length :
break
# Print
# The number of Origin's columns is 9.
origin = line.split('\n')[0].split('\t')[1:9]
# try :
w.write("{}\t{}".format(timestamp, '\t'.join(origin)))
# Transformation
try :
fixation = getUnitCoord(Tr, int(fixX), int(fixY))
w.write("\t{0:.3f}\t{1:.3f}".format(fixation[0], fixation[1]))
except ValueError :
fixation = ['', '']
w.write("\t\t")
try :
gaze = getUnitCoord(Tr, int(gzX), int(gzY))
w.write("\t{0:.3f}\t{1:.3f}\n".format(gaze[0], gaze[1]))
except ValueError :
gaze = ['', '']
w.write("\t\t\n")
# Parse Tobbi eye-tracking data to extract the required fields.
def parseTobbiLine(header, line, delimiter = "\t"):
header = header.replace("\xef\xbb\xbf", "").split(delimiter)
line = line.split(delimiter)
timestamp = line[header.index('RecordingTimestamp')]
gazeEventType = line[header.index('GazeEventType')]
fixationPointX = line[header.index('FixationPointX (MCSpx)')]
fixationPointY = line[header.index('FixationPointY (MCSpx)')]
gazePointX = line[header.index('GazePointX (ADCSpx)')]
gazePointY = line[header.index('GazePointY (ADCSpx)')]
return timestamp, gazeEventType, fixationPointX, fixationPointY, \
gazePointX, gazePointY
# Print a header for a given file description.
def _printHeader(f):
f.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n" \
.format("RecordingTimestamp", "FixationIndex", "SaccadeIndex", \
"GazeEventType", "GazeEventDuration", \
"FixationPointX (ADCSpx)", "FixationPointY (ADCSpx)", \
"GazePointX (ADCSpx)", "GazePointY (ADCSpx)", \
"FixationPointX (MCSpx)", "FixationPointY (MCSpx)", \
"GazePointX (MCSpx)", "GazePointY (MCSpx)"))
# Print fixations for a given file description.
def _printFixations(f, t_ts, o_ts, fixations, originalFixations):
for i in range(len(fixations)):
fixX = fixations[i][0]
fixY = fixations[i][1]
oFixX = originalFixations[i][0]
oFixY = originalFixations[i][1]
f.write("{}\t{}\t{}\t{}\t{}\t{}\n" \
.format(t_ts, o_ts[i], fixX, fixY, oFixX, oFixY))
# Get the transformation matrix for Tobbi data.
def getTobbiTransformationMatrix(snapshotCoords):
# Pororo video resolution : 720 * 544
M_SIZE = [720., 544.]
# TV Screen resolution : 1920 * 1080
S_size = [1920., 1080.]
# Scaling factor for Pororo video on the screen.
SCALING_FACTOR = (S_size[0] / (M_SIZE[0] * S_size[1] / M_SIZE[1]) - 1) / 2
for row in snapshotCoords:
row.append(1.)
a = encapsulateCoords(snapshotCoords)
b = encapsulateCoords([[-SCALING_FACTOR,0,1],[1+SCALING_FACTOR,0,1],\
[1+SCALING_FACTOR,1,1],[-SCALING_FACTOR,1,1]])
Tr = getTransformationMatrix(a,b)
if verbose:
print "[00] Tr = ",
print(Tr)
return Tr
def usage():
print "Usage: preprocess [OPTION]\n" +\
"Transform given Active Display Coordinates (ADC) to Media Coordinates (MC).\n"+\
"\n"+\
" -s, --source Specifies source directory\n"+\
" default: ./raw\n"+\
"\n"+\
" -o, --output Specifies source directory\n"+\
" default: ./data\n"+\
"\n"+\
" -v, --verbose View more details\n"
def main():
GAT = False
# Define Filenames
DELAY_FILENAME = "info/delay.csv" if not GAT else "info/gat.csv"
SNAPSHOT_FILENAME = "info/snapshot.tsv"
source = "raw/pororo_*.tsv"
output = "data/"
verbose = True
try:
opts, args = \
getopt.getopt(sys.argv[1:], "vs:o:")
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
for option, value in opts:
if option == "-v":
verbose = True
if option in ("-s", "--source"):
source = value
if option in ("-o", "--output"):
output = value
# get file name list
filenameList = glob.glob(source)
snapshotCoordsList = common.readData(SNAPSHOT_FILENAME, '\t', False, verbose)
delayList = common.readData(DELAY_FILENAME, ',', False, verbose)
for fullname in filenameList:
print "[01] Reading", fullname
(path, filename, name, extension) = common.pfne(fullname)
# snapshot coords
snapshotCoords = common.findOne(name, snapshotCoordsList)
tuples = []
for i in range(4):
tuples.append([float(snapshotCoords[i*2+0]), float(snapshotCoords[i*2+1])])
length, delay, skip = [int(i) for i in common.findOne(name, delayList)]
if verbose:
print "delay => ", delay, "skip => ", skip, "length =>", length
if GAT:
_splited = re.split('s03p0\d', filename)
output_filename = _splited[0] + 'GAT' + _splited[1]
else:
output_filename = filename
# Do prepocess and store to a given output filename.
if verbose:
print "preprocess({}, {}, snapshotCoords, {}, {})"\
.format(path + os.sep + filename, output + output_filename, length, delay, skip)
preprocess(path + os.sep + filename, output + output_filename, tuples, length, delay, skip)
if __name__ == "__main__":
main()
|
|
import sys
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
import random
from scipy import sparse
from scipy.special import comb
from scipy.special import gammaln
from scipy.special import erfcx
from scipy.stats import norm
import scipy.stats
import seaborn
import csv
import pandas as pd
import pickle
from collections import defaultdict
import operator
from scipy.sparse import csr_matrix
import itertools
import os.path
import math
import pybedtools
from common import parse_config, get_chrom_size
from large_average_submatrix_hic_avgcutoff_iter import map_rownum2pos, map_colnum2pos
""" This script filters out centromeric regions, row and column sum outliers, and repeat regions from the HiC contact matrix.
Each matrix is corrected by mean and standard devation based on the mean and std of HiC across the entire genome."""
def get_hic_matrix(hic_filename, chr1, chr2):
# construct matrix where each axis corresponds to position along the chromosome
# this matrix will be rectangular b/c sizes of interchromosomal matrices are not the same
# returns scipy sparse matrix
data = np.loadtxt(hic_filename, delimiter = '\t')
row_ind = data[:,0]/config["HIC_RESOLN"]
col_ind = data[:,1]/config["HIC_RESOLN"]
contact_values = data[:,2]
# obtain chromosome sizes
chr1_size = get_chrom_size(chr1, config["GENOME_DIR"])/config["HIC_RESOLN"] + 1
chr2_size = get_chrom_size(chr2, config["GENOME_DIR"])/config["HIC_RESOLN"] + 1
hic_matrix = csr_matrix((contact_values, (row_ind, col_ind)), shape = (chr1_size, chr2_size))
hic_dense = np.asarray(hic_matrix.todense())
row_labels = np.arange(chr1_size)*config["HIC_RESOLN"]
col_labels = np.arange(chr2_size)*config["HIC_RESOLN"]
df = pd.DataFrame(hic_dense, index = row_labels, columns = col_labels)
# get rid of nans
df = df.fillna(0)
return df
def plot_hic_matrix(df, chr1, chr2, label, plotname):
data = df.as_matrix()
plt.figure()
plt.imshow(data, cmap = 'Reds')
cbar = plt.colorbar()
#cbar.set_label('log(1+x) transformed rescaled HiC observed contacts')
#cbar.set_label('Transformed HiC contacts', fontsize = 12)
cbar.set_label(label)
cbar.solids.set_rasterized(True)
# label ticks with genomic position (Mb)
xaxis = range(0, df.shape[1], 100)
xlabels = [str(map_colnum2pos(df, x)/1000000.0) for x in xaxis]
plt.xticks(xaxis, xlabels)
yaxis = range(0, df.shape[0], 100)
ylabels = [str(map_rownum2pos(df, y)/1000000.0) for y in yaxis]
plt.yticks(yaxis, ylabels)
plt.xlabel('chr' + str(chr2) + ' (Mb)', fontsize = 14)
plt.ylabel('chr' + str(chr1) + ' (Mb)', fontsize = 14)
#plt.savefig(config["HIC_DIR"] + 'hic_transformed_rescaled.chr' + str(chr1) + '_chr' + str(chr2) + '.png')
plt.savefig(plotname)
plt.close()
def transform(data):
data_trans = np.log(1 + data)
return data_trans
def filter_centromere(df, chrom, row_or_col, filter_size = 1000000):
# get centromere locations
centrom_filename = config["GENOME_DIR"] + 'chrom_hg19.centromeres'
df_centrom = pd.read_csv(centrom_filename, sep = '\t')
chr_data = df_centrom[df_centrom['chrom'] == 'chr' + str(chrom)]
centrom_start = int(math.floor(float(chr_data['chromStart'])/config["HIC_RESOLN"])*config["HIC_RESOLN"])
centrom_end = int(math.ceil(float(chr_data['chromEnd'])/config["HIC_RESOLN"])*config["HIC_RESOLN"])
centrom_start = centrom_start - filter_size
centrom_end = centrom_end + filter_size
if (row_or_col == 'row'):
df.loc[centrom_start:centrom_end, :] = 0
if (row_or_col == 'col'):
df.loc[:, centrom_start:centrom_end] = 0
return df
def find_repeat_locations_wg():
# get repeats regions
filename = config["GENOME_DIR"] + 'rmsk.txt'
df_repeats = pd.read_csv(filename, header=None , sep= '\t', usecols = (5,6,7), names = ['chr', 'start', 'stop'])
df_repeats_bed = pybedtools.BedTool.from_dataframe(df_repeats)
# make a dictionary of all repeat regions for each chromosome - these regions should be taken out from hic
# this threshold is based on the 95th percentile of repeat coverage histgram over 250kb regions - PLEASE CHANGE THIS IF YOU WANT
threshold = 0.63009760000000004
dic_repeats_tofilter = {}
df_coverage = pd.DataFrame()
#chr_list = range(1,23)
chr_list = config["chrs"]
for chr in chr_list:
print chr
# construct a dataframe of all possible start sites for this chr
chr_size = get_chrom_size(chr, config["GENOME_DIR"])
start_regions = range(0, chr_size, config["HIC_RESOLN"])
df_chr = pd.DataFrame({'chr':['chr' + str(chr)]*len(start_regions), 'start':start_regions})
df_chr['stop'] = df_chr['start'] + config["HIC_RESOLN"]
df_chr_bed = pybedtools.BedTool.from_dataframe(df_chr)
coverage_chr = df_chr_bed.coverage(df_repeats_bed).to_dataframe(names = ['chr', 'start', 'end', 'bases covered', 'length A', 'length B', 'coverage'])
tofilter = coverage_chr[coverage_chr['coverage'] >= threshold]['start'].values
dic_repeats_tofilter[chr] = tofilter
df_coverage = pd.concat([df_coverage, coverage_chr])
return dic_repeats_tofilter
def filter_repeats(df, chr, dic_repeats_tofilter, row_or_col):
regions2filter = dic_repeats_tofilter[chr]
if (row_or_col == 'row'):
df.loc[regions2filter,:] = 0
if (row_or_col == 'col'):
df.loc[:,regions2filter] = 0
return df
def filter_hic(df, row, col, threshold_row, threshold_col):
# take hic matrix and zero out columns or rows that are outliers
ind_row = np.arange(len(row))[row>threshold_row]
df.iloc[ind_row,:] = 0
ind_col = np.arange(len(col))[col>threshold_col]
df.iloc[:,ind_col] = 0
return df
def raw_matrices():
chr_pairs = list(itertools.combinations(config["chrs"], 2))
for pair in chr_pairs:
print pair
chr1, chr2 = pair
hic_filename = config["HIC_DIR"] +'chr' + str(chr1) + '_chr' + str(chr2) + '.txt'
df = get_hic_matrix(hic_filename, chr1, chr2)
# make the minimum value 0
df = df - np.min(np.min(df))
# transdorm
df = transform(df)
# plot the matrix
plot_hic_matrix(df, chr1, chr2, 'log-transformed Obs/Expected', config["HIC_DIR"] + 'chr' + str(chr1) + '_' + 'chr' + str(chr2) + '.png')
def row_col_sums(dic_repeats_tofilter):
# record nonzero values over all hic matrices
nonzero_entries = []
#chr_pairs = list(itertools.combinations(range(1, 23), 2))
chr_pairs = list(itertools.combinations(config["chrs"], 2))
#chr_pairs = list(itertools.product(range(1,23), ['X']))
for pair in chr_pairs:
print pair
chr1, chr2 = pair
hic_filename = config["HIC_DIR"] +'chr' + str(chr1) + '_chr' + str(chr2) + '.txt'
df = get_hic_matrix(hic_filename, chr1, chr2)
df = df - np.min(np.min(df))
# plot the matrix
print 'Before filtering'
print np.count_nonzero(df.sum(axis=0))
print np.count_nonzero(df.sum(axis=1))
# FILTER OUT CENTROMERIC REGIONS (2Mb)
df = filter_centromere(df, chr1, 'row', filter_size = 2000000)
df = filter_centromere(df, chr2, 'col', filter_size = 2000000)
print 'After centromere filtering'
print np.count_nonzero(df.sum(axis=0))
print np.count_nonzero(df.sum(axis=1))
# filter out repeats
df = filter_repeats(df, chr1, dic_repeats_tofilter, 'row')
df = filter_repeats(df, chr2, dic_repeats_tofilter, 'col')
print 'After repeat filtering'
print np.count_nonzero(df.sum(axis=0))
print np.count_nonzero(df.sum(axis=1))
# LOG TRANSFORM
df_transformed = transform(df)
# get all the row sums
row_orig = np.sum(df_transformed, axis = 1).as_matrix()
col_orig = np.sum(df_transformed, axis = 0).as_matrix()
# get rid of nonzero
row = row_orig[np.nonzero(row_orig)]
col = col_orig[np.nonzero(col_orig)]
threshold_row = detect_upper_outliers(row)
threshold_col = detect_upper_outliers(col)
# plt.figure()
# seaborn.violinplot(row, orient = 'v')
# plt.ylabel('HiC contacts log(1+x) transformed row sums')
# plt.title('chr' + str(chr1) + ' - ' + str(chr2))
# plt.savefig(config["HIC_FILT_DIR"] + 'row_sums.chr' + str(chr1) + '_chr' + str(chr2) + '.png')
# plt.close()
# plt.figure()
# seaborn.violinplot(col, orient = 'v')
# plt.ylabel('HiC contacts log(1+x) transformed row sums')
# plt.title('chr' + str(chr1) + ' - ' + str(chr2))
# plt.savefig(config["HIC_FILT_DIR"] + 'col_sums.chr' + str(chr1) + '_chr' + str(chr2) + '.png')
# plt.close()
print np.sum(row>threshold_row)
print np.sum(col>threshold_col)
df_filt = filter_hic(df_transformed, row_orig, col_orig, threshold_row, threshold_col)
# save new dataframe
df_filt.to_csv(config["HIC_FILT_DIR"] + 'chr' + str(chr1) + '_chr' + str(chr2) + '.txt')
# record nonzero entries
data = df_filt.as_matrix()
data_nonzero = data[np.nonzero(data)]
nonzero_entries.append(data_nonzero)
# save the nonzero entries
nonzero_entries = np.asarray(list(itertools.chain.from_iterable(nonzero_entries)))
np.savetxt(config["HIC_FILT_DIR"] + 'whole_genome_nonzero.logtrans.txt', nonzero_entries)
return nonzero_entries
def whole_genome_mean_std(nonzero_entries):
mean = np.mean(nonzero_entries)
std = np.std(nonzero_entries)
print 'Mean = ', mean
print 'St. dev. = ', std
return mean, std
def detect_upper_outliers(arr):
p25 = np.percentile(arr, 25)
p75 = np.percentile(arr, 75)
upper = p75 + 1.5*(p75-p25)
return upper
def z_score_hic_matrix(mean, std):
chr_pairs = list(itertools.combinations(config["chrs"], 2))
#chr_pairs = list(itertools.product(range(1,23), ['X']))
for pair in chr_pairs:
print pair
chr1, chr2 = pair
hic_filename = config["HIC_FILT_DIR"] +'chr' + str(chr1) + '_chr' + str(chr2) + '.txt'
df = pd.read_csv(hic_filename, index_col = 0)
# zscore matrix
df = (df - mean)/std
# save new matrix
df.to_csv(config["HIC_FILT_DIR"] + 'chr' + str(chr1) + '_chr' + str(chr2) + '.zscore.txt')
def output_blacklist_locations(mean, std):
# output a BED file with locations that have been removed (these locations will have an observed value of 0)
print "Generating a blacklist of locations..."
chr_pairs = list(itertools.combinations(config["chrs"], 2))
blacklist = defaultdict(list)
for pair in chr_pairs:
print pair
chr1, chr2 = pair
# read in
filename = config["HIC_FILT_DIR"] + 'chr' + str(chr1) + '_chr' + str(chr2) + '.txt'
df = pd.read_csv(filename, index_col=0)
# find out which columns have all zeroes in them
zero_cols = df.columns[(df == 0).all()]
blacklist[chr2].append(zero_cols)
# flip the dataframe
df = df.T
zero_rows = df.columns[(df == 0).all()]
blacklist[chr1].append(zero_rows)
# process the list
for chr in blacklist.keys():
values_list = blacklist[chr]
blacklist[chr] = set(map(int, list(itertools.chain.from_iterable(values_list))))
# pickle this dictionary
f = open(config["HIC_FILT_DIR"] + 'blacklist.pickle', 'wb')
pickle.dump(blacklist, f)
def main():
global config
config_fn = sys.argv[1]
config = parse_config(config_fn)
#raw_matrices()
# construct a dictionary of regions that have more than >50% repeat coverage - these should be filtered out in HiC matrices
dic_repeats_tofilter = find_repeat_locations_wg()
nonzero_entries = row_col_sums(dic_repeats_tofilter)
##nonzero_entries = np.loadtxt(config["HIC_FILT_DIR"] + 'whole_genome_nonzero.logtrans.txt')
mean, std = whole_genome_mean_std(nonzero_entries)
z_score_hic_matrix(mean, std)
output_blacklist_locations(mean, std)
if __name__ == "__main__":
main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import paramiko
def check_list(in_list, expected_size_of_list, obj_assert, err_msg):
obj_assert.assertEqual(
isinstance(in_list, list),
True,
"'in_list' is not a list."
)
obj_assert.assertEqual(
len(in_list),
expected_size_of_list,
err_msg
)
def get_sf_account_id(cs_api, cs_account_id, primary_storage_id, obj_assert, err_msg):
sf_account_id_request = {'accountid': cs_account_id, 'storageid': primary_storage_id}
sf_account_id_result = cs_api.getSolidFireAccountId(sf_account_id_request)
sf_account_id = sf_account_id_result['apisolidfireaccountid']['solidFireAccountId']
obj_assert.assertEqual(
isinstance(sf_account_id, int),
True,
err_msg
)
return sf_account_id
def get_iqn(cs_api, volume, obj_assert):
# Get volume IQN
sf_iscsi_name_request = {'volumeid': volume.id}
sf_iscsi_name_result = cs_api.getVolumeiScsiName(sf_iscsi_name_request)
sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
check_iscsi_name(sf_iscsi_name, obj_assert)
return sf_iscsi_name
def check_iscsi_name(sf_iscsi_name, obj_assert):
obj_assert.assertEqual(
sf_iscsi_name[0],
"/",
"The iSCSI name needs to start with a forward slash."
)
def set_supports_resign(supports_resign, db_connection):
_set_supports_resign_for_table(supports_resign, db_connection, "host_details")
_set_supports_resign_for_table(supports_resign, db_connection, "cluster_details")
def _set_supports_resign_for_table(supports_resign, db_connection, table):
sql_query = "Update " + str(table) + " Set value = '" + str(supports_resign) + "' Where name = 'supportsResign'"
# make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
db_connection.execute(sql_query)
def purge_solidfire_volumes(sfe):
deleted_volumes = sfe.list_deleted_volumes()
for deleted_volume in deleted_volumes.volumes:
sfe.purge_deleted_volume(deleted_volume.volume_id)
def get_not_active_sf_volumes(sfe, sf_account_id=None):
if sf_account_id is not None:
sf_volumes = sfe.list_volumes_for_account(sf_account_id).volumes
if sf_volumes is not None and len(sf_volumes) > 0:
sf_volumes = _get_not_active_sf_volumes_only(sf_volumes)
else:
sf_volumes = sfe.list_deleted_volumes().volumes
return sf_volumes
def _get_not_active_sf_volumes_only(sf_volumes):
not_active_sf_volumes_only = []
for sf_volume in sf_volumes:
if sf_volume.status != "active":
not_active_sf_volumes_only.append(sf_volume)
return not_active_sf_volumes_only
def get_active_sf_volumes(sfe, sf_account_id=None):
if sf_account_id is not None:
sf_volumes = sfe.list_volumes_for_account(sf_account_id).volumes
if sf_volumes is not None and len(sf_volumes) > 0:
sf_volumes = _get_active_sf_volumes_only(sf_volumes)
else:
sf_volumes = sfe.list_active_volumes().volumes
return sf_volumes
def _get_active_sf_volumes_only(sf_volumes):
active_sf_volumes_only = []
for sf_volume in sf_volumes:
if sf_volume.status == "active":
active_sf_volumes_only.append(sf_volume)
return active_sf_volumes_only
def check_and_get_sf_volume(sf_volumes, sf_volume_name, obj_assert, should_exist=True):
sf_volume = None
for volume in sf_volumes:
if volume.name == sf_volume_name:
sf_volume = volume
break
if should_exist:
obj_assert.assertNotEqual(
sf_volume,
None,
"Check if SF volume was created in correct account: " + str(sf_volumes)
)
else:
obj_assert.assertEqual(
sf_volume,
None,
"Check if SF volume was deleted: " + str(sf_volumes)
)
return sf_volume
def check_xen_sr(xen_sr_name, xen_session, obj_assert, should_exist=True):
xen_sr = xen_session.xenapi.SR.get_by_name_label(xen_sr_name)
if should_exist:
check_list(xen_sr, 1, obj_assert, "SR " + xen_sr_name + " doesn't exist, but should.")
sr_shared = xen_session.xenapi.SR.get_shared(xen_sr[0])
obj_assert.assertEqual(
sr_shared,
True,
"SR " + xen_sr_name + " is not shared, but should be."
)
else:
check_list(xen_sr, 0, obj_assert, "SR " + xen_sr_name + " exists, but shouldn't.")
def check_kvm_access_to_volume(iscsi_name, kvm_hosts, username, password, obj_assert, should_exist=True):
count = 0
for kvm_host in kvm_hosts:
ssh_connection = get_ssh_connection(kvm_host.ipaddress, username, password)
stdin, stdout, stderr = ssh_connection.exec_command("ls /dev/disk/by-path | grep " + iscsi_name)
result = stdout.read()
ssh_connection.close()
if result is not None and len(result.strip()) > len(iscsi_name):
count = count + 1
if should_exist:
obj_assert.assertTrue(count == 1, "Only one KVM host should be connected to the following IQN: " + iscsi_name)
else:
obj_assert.assertTrue(count == 0, "No KVM host should be connected to the following IQN: " + iscsi_name)
def check_vag(sf_volume, sf_vag_id, obj_assert):
obj_assert.assertEqual(
len(sf_volume.volume_access_groups),
1,
"The volume should only be in one VAG."
)
obj_assert.assertEqual(
sf_volume.volume_access_groups[0],
sf_vag_id,
"The volume is not in the VAG with the following ID: " + str(sf_vag_id) + "."
)
def get_vag_id(cs_api, cluster_id, primary_storage_id, obj_assert):
# Get SF Volume Access Group ID
sf_vag_id_request = {'clusterid': cluster_id, 'storageid': primary_storage_id}
sf_vag_id_result = cs_api.getSolidFireVolumeAccessGroupId(sf_vag_id_request)
sf_vag_id = sf_vag_id_result['apisolidfirevolumeaccessgroupid']['solidFireVolumeAccessGroupId']
obj_assert.assertEqual(
isinstance(sf_vag_id, int),
True,
"The SolidFire VAG ID should be a non-zero integer."
)
return sf_vag_id
def format_iqn(iqn):
return "/" + iqn + "/0"
def check_size_and_iops(sf_volume, cs_volume, size, obj_assert):
obj_assert.assertEqual(
sf_volume.qos.min_iops,
cs_volume.miniops,
"Check QoS - Min IOPS: " + str(sf_volume.qos.min_iops)
)
obj_assert.assertEqual(
sf_volume.qos.max_iops,
cs_volume.maxiops,
"Check QoS - Max IOPS: " + str(sf_volume.qos.max_iops)
)
obj_assert.assertEqual(
sf_volume.total_size,
size,
"Check SolidFire volume size: " + str(sf_volume.total_size)
)
def get_volume_size_with_hsr(cs_api, cs_volume, obj_assert):
# Get underlying SF volume size with hypervisor snapshot reserve
sf_volume_size_request = {'volumeid': cs_volume.id}
sf_volume_size_result = cs_api.getSolidFireVolumeSize(sf_volume_size_request)
sf_volume_size = sf_volume_size_result['apisolidfirevolumesize']['solidFireVolumeSize']
obj_assert.assertEqual(
isinstance(sf_volume_size, int),
True,
"The SolidFire volume size should be a non-zero integer."
)
return sf_volume_size
def get_ssh_connection(ip_address, username, password):
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(ip_address, username=username, password=password)
return ssh_client
|
|
# -*- coding: utf-8 -*-
""" Sahana Eden Transport Model
@copyright: 2012-15 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3TransportModel",)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3TransportModel(S3Model):
"""
http://eden.sahanafoundation.org/wiki/BluePrint/Transport
"""
names = ("transport_airport",
"transport_heliport",
"transport_seaport",
)
def model(self):
T = current.T
db = current.db
messages = current.messages
UNKNOWN_OPT = messages.UNKNOWN_OPT
settings = current.deployment_settings
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
# ---------------------------------------------------------------------
# Airports
#
storage_types = {
1: T("covered"),
2: T("uncovered"),
}
airport_capacity_opts = {
2: T("number of planes"),
3: T("m3")
}
# http://en.wikipedia.org/wiki/Runway#Surface_type_codes
runway_surface_opts = {"ASP": T("Asphalt"),
"BIT": T("Bitumenous asphalt or tarmac"),
#"BRI": T("Bricks"), (no longer in use, covered with asphalt or concrete now)
"CLA": T("Clay"),
"COM": T("Composite"),
"CON": T("Concrete"),
"COP": T("Composite"),
"COR": T("Coral (coral reef structures)"),
"GRE": T("Graded or rolled earth, grass on graded earth"),
"GRS": T("Grass or earth not graded or rolled"),
"GVL": T("Gravel"),
"ICE": T("Ice"),
"LAT": T("Laterite"),
"MAC": T("Macadam"),
"PEM": T("Partially concrete, asphalt or bitumen-bound macadam"),
"PER": T("Permanent surface, details unknown"),
"PSP": T("Marsden Matting (derived from pierced/perforated steel planking)"),
"SAN": T("Sand"),
"SMT": T("Sommerfeld Tracking"),
"SNO": T("Snow"),
"U": T("Unknown surface"),
}
# SIGCAF has these:
# http://www.humanitarianresponse.info/operations/central-african-republic/dataset/central-african-republic-aerodromes-airports-airfields
# BASG, BL, BLA, BLAG, BLG, PM/BL
# WFP just use Paved/Unpaved
# SIGCAF classifications
# We could consider using these instead?
# http://en.wikipedia.org/wiki/Pavement_classification_number
aircraft_size_opts = {"MH1521": "MH.1521", # 1 ton 6-seater monoplane: http://en.wikipedia.org/wiki/Max_Holste_Broussard#Specifications_.28MH.1521M.29
"PA31": "PA-31", # 1.3 tons twin prop http://en.wikipedia.org/wiki/Piper_PA-31_Navajo#Specifications_.28PA-31_Navajo.29
"3TN": T("3 Tons"),
"DC3": "DC-3", # 4 tons http://en.wikipedia.org/wiki/Douglas_DC-3#Specifications_.28DC-3A.29
"SE210": "SE 210", # 8 tons http://en.wikipedia.org/wiki/Sud_Aviation_Caravelle#Specifications_.28Caravelle_III.29
"DC4": "DC-4", # 10 tons http://en.wikipedia.org/wiki/Douglas_DC-4#Specifications_.28DC-4-1009.29
"13TN": T("13 Tons"),
"C160": "C-160", # 17 tons http://en.wikipedia.org/wiki/Transall_C-160#Specifications_.28C-160.29
"Larger": T("Larger"),
}
# Numbers are also in the XSL
humanitarian_use_opts = {1: T("No"),
2: T("Upon request"),
3: T("Connection"),
4: T("Hub"),
9: T("Closed"),
}
if settings.get_transport_airport_code_unique():
code_requires = IS_EMPTY_OR([IS_LENGTH(10),
IS_NOT_IN_DB(db, "transport_airport.code"),
])
else:
code_requires = IS_EMPTY_OR(IS_LENGTH(10))
tablename = "transport_airport"
define_table(tablename,
#super_link("doc_id", "doc_entity"),
#super_link("pe_id", "pr_pentity"),
super_link("site_id", "org_site"),
Field("name", notnull=True,
length = 64, # Mayon Compatibility
label = T("Name"),
),
# Code is part of the SE
Field("code",
label = T("Code"),
length = 10, # Mayon Compatibility
requires = code_requires,
# Enable in Templates as-required
readable = False,
writable = False,
),
# Other codes can be added as tags if-required, but these 2 are so common that they are worth putting directly in the table
Field("icao", length=4,
label = T("ICAO"),
requires = IS_EMPTY_OR(IS_NOT_IN_DB(db, "transport_airport.icao")),
),
Field("iata", length=3,
label = T("IATA"),
requires = IS_EMPTY_OR(IS_NOT_IN_DB(db, "transport_airport.iata")),
),
# @ToDo: Expose Elevation & Lat/Lon to Widget
location_id(),
# We should be more specific:
# http://en.wikipedia.org/wiki/Runway#Declared_distances
Field("runway_length", "integer",
label = T("Runway Length (m)"),
),
Field("runway_width", "integer",
label = T("Runway Width (m)"),
),
Field("runway_surface",
default = "U",
label = T("Runway Surface"),
represent = lambda opt: \
runway_surface_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(runway_surface_opts),
),
Field("aircraft_max_size",
label = T("Aircraft Maximum Size"),
represent = lambda opt: \
aircraft_size_opts.get(opt, UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(aircraft_size_opts)
),
),
Field("humanitarian_use", "integer",
label = T("Humanitarian Use"),
represent = lambda opt: \
humanitarian_use_opts.get(opt, UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(humanitarian_use_opts)
),
),
organisation_id(),
Field("restrictions", "text",
label = T("Restrictions"),
# Enable in Templates as-required
readable = False,
writable = False,
),
Field("ils", "boolean",
label = T("Instrument Landing System"),
represent=lambda bool: \
(bool and [T("Yes")] or [T("No")])[0],
# Enable in Templates as-required
readable = False,
writable = False,
),
Field("lighting", "boolean",
label = T("Lighting"),
represent = lambda bool: \
(bool and [T("Yes")] or [T("No")])[0],
# Enable in Templates as-required
readable = False,
writable = False,
),
Field("immigration_customs_capabilities", "text",
label = T("Immigration and Customs Capabilities"),
# Enable in Templates as-required
readable = False,
writable = False,
),
Field("security_desc", "text",
label = T("Security Description"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Security Description"),
T("Description of perimeter fencing, security guards, security lighting."))),
# Enable in Templates as-required
readable = False,
writable = False,
),
# @ToDo: put storage type inline
Field("storage_capacity", "double",
label = T("Storage Capacity (m3)"),
# Enable in Templates as-required
readable = False,
writable = False,
),
Field("storage_type", "integer",
label = T("Storage Type"),
represent = lambda opt: \
storage_types.get(opt, UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(storage_types)
),
# Enable in Templates as-required
readable = False,
writable = False,
),
# @ToDo: put units inline
Field("parking_tarmac_space", "double",
label = T("Parking/Tarmac Space Capacity"),
# Enable in Templates as-required
readable = False,
writable = False,
),
Field("capacity", "integer",
default = 1,
label = T("Parking/Tarmac Space Units"),
represent = lambda opt: \
airport_capacity_opts.get(opt, UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(airport_capacity_opts)
),
# Enable in Templates as-required
readable = False,
writable = False,
),
Field("helipad_info", "text",
label = T("Helipad Information"),
# Enable in Templates as-required
readable = False,
writable = False,
),
self.pr_person_id(
label = T("Information Source"),
# Enable in Templates as-required
readable = False,
writable = False,
),
Field("obsolete", "boolean",
default = False,
label = T("Obsolete"),
represent = lambda bool: \
(bool and [T("Obsolete")] or [current.messages["NONE"]])[0],
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create=T("Create Airport"),
title_display=T("Airport Details"),
title_list=T("Airports"),
title_update=T("Edit Airport"),
title_upload=T("Import Airports"),
label_list_button=T("List Airports"),
label_delete_button=T("Delete Airport"),
msg_record_created=T("Airport added"),
msg_record_modified=T("Airport updated"),
msg_record_deleted=T("Airport deleted"),
msg_list_empty=T("No Airports currently registered"))
configure(tablename,
list_fields = ["name",
"humanitarian_use",
"organisation_id",
"location_id$lat",
"location_id$lon",
"location_id$elevation",
"runway_length",
"runway_width",
"runway_surface",
"aircraft_max_size",
],
#onaccept = self.transport_airport_onaccept,
#super_entity = ("doc_entity", "pr_pentity", "org_site"),
super_entity = "org_site",
)
# ---------------------------------------------------------------------
# Heliports
#
if settings.get_transport_heliport_code_unique():
code_requires = IS_EMPTY_OR([IS_LENGTH(10),
IS_NOT_IN_DB(db, "transport_heliport.code"),
])
else:
code_requires = IS_EMPTY_OR(IS_LENGTH(10))
tablename = "transport_heliport"
define_table(tablename,
#super_link("doc_id", "doc_entity"),
#super_link("pe_id", "pr_pentity"),
super_link("site_id", "org_site"),
Field("name", notnull=True,
length = 64, # Mayon Compatibility
label = T("Name"),
),
Field("code",
label = T("Code"),
length = 10, # Mayon Compatibility
requires = code_requires,
# Deployments that don't want site codes can hide them
#readable = False,
#writable = False,
),
organisation_id(),
location_id(),
Field("obsolete", "boolean",
default = False,
label = T("Obsolete"),
represent = lambda opt: \
(opt and [T("Obsolete")] or [current.messages["NONE"]])[0],
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create=T("Create Heliport"),
title_display=T("Heliport Details"),
title_list=T("Heliports"),
title_update=T("Edit Heliport"),
title_upload=T("Import Heliports"),
label_list_button=T("List Heliports"),
label_delete_button=T("Delete Heliport"),
msg_record_created=T("Heliport added"),
msg_record_modified=T("Heliport updated"),
msg_record_deleted=T("Heliport deleted"),
msg_list_empty=T("No Heliports currently registered"))
configure(tablename,
#onaccept = self.transport_heliport_onaccept,
#super_entity = ("doc_entity", "pr_pentity", "org_site"),
super_entity = "org_site",
)
# ---------------------------------------------------------------------
# Seaports
#
ownership_opts = {
1: T("Public"),
2: T("Private")
}
unit_opts = {
1: T("ft"),
2: T("m")
}
if settings.get_transport_seaport_code_unique():
code_requires = IS_EMPTY_OR([IS_LENGTH(10),
IS_NOT_IN_DB(db, "transport_seaport.code"),
])
else:
code_requires = IS_EMPTY_OR(IS_LENGTH(10))
tablename = "transport_seaport"
define_table(tablename,
#super_link("doc_id", "doc_entity"),
#super_link("pe_id", "pr_pentity"),
super_link("site_id", "org_site"),
Field("name", notnull=True,
length = 64, # Mayon Compatibility
label = T("Name"),
),
Field("code",
label = T("Code"),
length = 10, # Mayon Compatibility
requires = code_requires,
# Deployments that don't want site codes can hide them
#readable = False,
#writable = False,
),
Field("ownership_type", "integer",
default = 1,
label = T("Ownership"),
represent = lambda opt: \
ownership_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(ownership_opts, zero=None),
),
Field("max_height", "double",
label = T("Max Height"),
),
Field("max_height_units", "integer",
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(unit_opts, zero=None),
),
Field("roll_on_off", "boolean",
default = False,
represent = lambda opt: \
(opt and [T("Yes")] or [T("No")])[0],
label = T("Roll On Roll Off Berth"),
),
Field("cargo_pier_depth", "double",
label = T("Cargo Pier Depth"),
),
Field("cargo_pier_depth_units", "integer",
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(unit_opts, zero=None),
),
Field("oil_terminal_depth", "double",
label = T("Oil Terminal Depth"),
),
Field("oil_terminal_depth_units", "integer",
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(unit_opts, zero=None),
),
Field("dry_dock", "boolean",
default = False,
label = T("Dry Dock"),
represent = lambda opt: \
(opt and [T("Yes")] or [T("No")])[0],
),
Field("vessel_max_length", "double",
label = T("Vessel Max Length"),
),
Field("vessel_max_length_units", "integer",
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(unit_opts, zero=None),
),
Field("repairs", "text",
label = T("Repairs"),
),
Field ("shelter", "text",
label = T("Shelter"),
),
Field("warehouse_capacity", "double",
label = T("Warehousing Storage Capacity"),
),
Field("secure_storage_capacity", "double",
label = T("Secure Storage Capacity"),
),
Field("customs_warehouse_capacity", "double",
label = T("Customs Warehousing Storage Capacity"),
),
Field("tugs", "integer",
label = T("Number of Tugboats"),
),
Field("tug_capacity", "double",
label = T("Tugboat Capacity"),
),
Field("barges", "integer",
label = T("Number of Barges"),
),
Field("barge_capacity", "double",
label = T("Barge Capacity"),
),
Field("loading_equipment", "text",
label = T("Loading Equipment"),
),
Field("customs_capacity", "text",
label = T("Customs Capacity"),
),
Field("security", "text",
label = T("Security"),
),
Field("high_tide_depth", "double",
label = T("High Tide Depth"),
),
Field("high_tide_depth_units", "integer",
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(unit_opts, zero=None),
),
Field("low_tide_depth", "double",
label = T("Low Tide Depth"),
),
Field("low_tide_depth_units", "integer",
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(unit_opts, zero=None),
),
Field("flood_depth", "double",
label = T("Flood Depth"),
),
Field("flood_depth_units", "integer",
default = 1,
label = T("Units"),
represent = lambda opt: \
unit_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(unit_opts, zero=None),
),
organisation_id(),
location_id(),
Field("obsolete", "boolean",
default = False,
label = T("Obsolete"),
represent = lambda opt: \
(opt and [T("Closed")] or [T("Operational")])[0],
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create=T("Create Seaport"),
title_display=T("Seaport Details"),
title_list=T("Seaports"),
title_update=T("Edit Seaport"),
title_upload=T("Import Seaports"),
label_list_button=T("List Seaports"),
label_delete_button=T("Delete Seaport"),
msg_record_created=T("Seaport added"),
msg_record_modified=T("Seaport updated"),
msg_record_deleted=T("Seaport deleted"),
msg_list_empty=T("No Seaports currently registered"))
configure(tablename,
#onaccept = self.transport_seaport_onaccept,
#super_entity = ("doc_entity", "pr_pentity", "org_site"),
super_entity = "org_site",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# -------------------------------------------------------------------------
@staticmethod
def transport_airport_onaccept(form):
"""
Update Affiliation, record ownership and component ownership
"""
# If made into a pe_id:
#current.s3db.org_update_affiliations("transport_airport", form.vars)
return
# -------------------------------------------------------------------------
@staticmethod
def transport_heliport_onaccept(form):
"""
Update Affiliation, record ownership and component ownership
"""
# If made into a pe_id:
#current.s3db.org_update_affiliations("transport_heliport", form.vars)
return
# -------------------------------------------------------------------------
@staticmethod
def transport_seaport_onaccept(form):
"""
Update Affiliation, record ownership and component ownership
"""
# If made into a pe_id:
#current.s3db.org_update_affiliations("transport_seaport", form.vars)
return
# END =========================================================================
|
|
import re
from livestreamer.plugin import Plugin, PluginError
from livestreamer.plugin.api import http, validate
from livestreamer.plugin.api.utils import parse_query
from livestreamer.stream import HTTPStream, HLSStream
API_KEY = "AIzaSyBDBi-4roGzWJN4du9TuDMLd_jVTcVkKz4"
API_BASE = "https://www.googleapis.com/youtube/v3"
API_SEARCH_URL = API_BASE + "/search"
API_VIDEO_INFO = "http://youtube.com/get_video_info"
HLS_HEADERS = {
"User-Agent": "Mozilla/5.0"
}
def parse_stream_map(stream_map):
if not stream_map:
return []
return [parse_query(s) for s in stream_map.split(",")]
def parse_fmt_list(formatsmap):
formats = {}
if not formatsmap:
return formats
for format in formatsmap.split(","):
s = format.split("/")
(w, h) = s[1].split("x")
formats[int(s[0])] = "{0}p".format(h)
return formats
_config_schema = validate.Schema(
{
validate.optional("fmt_list"): validate.all(
validate.text,
validate.transform(parse_fmt_list)
),
validate.optional("url_encoded_fmt_stream_map"): validate.all(
validate.text,
validate.transform(parse_stream_map),
[{
"itag": validate.all(
validate.text,
validate.transform(int)
),
"quality": validate.text,
"url": validate.url(scheme="http"),
validate.optional("s"): validate.text,
validate.optional("stereo3d"): validate.all(
validate.text,
validate.transform(int),
validate.transform(bool)
),
}]
),
validate.optional("adaptive_fmts"): validate.all(
validate.text,
validate.transform(parse_stream_map),
[{
validate.optional("s"): validate.text,
"type": validate.all(
validate.text,
validate.transform(lambda t: t.split(";")[0].split("/")),
[validate.text, validate.text]
),
"url": validate.all(
validate.url(scheme="http")
)
}]
),
validate.optional("hlsvp"): validate.text,
validate.optional("live_playback"): validate.transform(bool),
"status": validate.text
}
)
_search_schema = validate.Schema(
{
"items": [{
"id": {
"videoId": validate.text
}
}]
},
validate.get("items")
)
_channelid_re = re.compile('meta itemprop="channelId" content="([^"]+)"')
_url_re = re.compile("""
http(s)?://(\w+\.)?youtube.com
(?:
(?:
/(watch.+v=|embed/|v/)
(?P<video_id>[0-9A-z_-]{11})
)
|
(?:
/(user|channel)/(?P<user>[^/?]+)
)
)
""", re.VERBOSE)
class YouTube(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, stream):
match = re.match("(\w+)_3d", stream)
if match:
weight, group = Plugin.stream_weight(match.group(1))
weight -= 1
group = "youtube_3d"
else:
weight, group = Plugin.stream_weight(stream)
return weight, group
def _find_channel_video(self):
res = http.get(self.url)
match = _channelid_re.search(res.text)
if not match:
return
channel_id = match.group(1)
query = {
"channelId": channel_id,
"type": "video",
"eventType": "live",
"part": "id",
"key": API_KEY
}
res = http.get(API_SEARCH_URL, params=query)
videos = http.json(res, schema=_search_schema)
for video in videos:
video_id = video["id"]["videoId"]
return video_id
def _get_stream_info(self, url):
match = _url_re.match(url)
user = match.group("user")
if user:
video_id = self._find_channel_video()
else:
video_id = match.group("video_id")
if not video_id:
return
params = {
"video_id": video_id,
"el": "player_embedded"
}
res = http.get(API_VIDEO_INFO, params=params)
return parse_query(res.text, name="config", schema=_config_schema)
def _get_streams(self):
info = self._get_stream_info(self.url)
if not info:
return
formats = info.get("fmt_list")
streams = {}
protected = False
for stream_info in info.get("url_encoded_fmt_stream_map", []):
if stream_info.get("s"):
protected = True
continue
stream = HTTPStream(self.session, stream_info["url"])
name = formats.get(stream_info["itag"]) or stream_info["quality"]
if stream_info.get("stereo3d"):
name += "_3d"
streams[name] = stream
# Extract audio streams from the DASH format list
for stream_info in info.get("adaptive_fmts", []):
if stream_info.get("s"):
protected = True
continue
stream_type, stream_format = stream_info["type"]
if stream_type != "audio":
continue
stream = HTTPStream(self.session, stream_info["url"])
name = "audio_{0}".format(stream_format)
streams[name] = stream
hls_playlist = info.get("hlsvp")
if hls_playlist:
try:
hls_streams = HLSStream.parse_variant_playlist(
self.session, hls_playlist, headers=HLS_HEADERS, namekey="pixels"
)
streams.update(hls_streams)
except IOError as err:
self.logger.warning("Failed to extract HLS streams: {0}", err)
if not streams and protected:
raise PluginError("This plugin does not support protected videos, "
"try youtube-dl instead")
return streams
__plugin__ = YouTube
|
|
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single_int(ufunc):
a = pd.array([1, 2, -3, np.nan])
result = ufunc(a)
expected = pd.array(ufunc(a.astype(float)), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(pd.array(ufunc(a.astype(float)), dtype="Int64"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt])
def test_ufuncs_single_float(ufunc):
a = pd.array([1, 2, -3, np.nan])
with np.errstate(invalid="ignore"):
result = ufunc(a)
expected = FloatingArray(ufunc(a.astype(float)), mask=a._mask)
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
with np.errstate(invalid="ignore"):
result = ufunc(s)
expected = pd.Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.add, np.subtract])
def test_ufuncs_binary_int(ufunc):
# two IntegerArrays
a = pd.array([1, 2, -3, np.nan])
result = ufunc(a, a)
expected = pd.array(ufunc(a.astype(float), a.astype(float)), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
# IntegerArray with numpy array
arr = np.array([1, 2, 3, 4])
result = ufunc(a, arr)
expected = pd.array(ufunc(a.astype(float), arr), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = pd.array(ufunc(arr, a.astype(float)), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
# IntegerArray with scalar
result = ufunc(a, 1)
expected = pd.array(ufunc(a.astype(float), 1), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = ufunc(1, a)
expected = pd.array(ufunc(1, a.astype(float)), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
def test_ufunc_binary_output():
a = pd.array([1, 2, np.nan])
result = np.modf(a)
expected = np.modf(a.to_numpy(na_value=np.nan, dtype="float"))
expected = (pd.array(expected[0]), pd.array(expected[1]))
assert isinstance(result, tuple)
assert len(result) == 2
for x, y in zip(result, expected):
tm.assert_extension_array_equal(x, y)
@pytest.mark.parametrize("values", [[0, 1], [0, None]])
def test_ufunc_reduce_raises(values):
arr = pd.array(values)
res = np.add.reduce(arr)
expected = arr.sum(skipna=False)
tm.assert_almost_equal(res, expected)
@pytest.mark.parametrize(
"pandasmethname, kwargs",
[
("var", {"ddof": 0}),
("var", {"ddof": 1}),
("kurtosis", {}),
("skew", {}),
("sem", {}),
],
)
def test_stat_method(pandasmethname, kwargs):
s = pd.Series(data=[1, 2, 3, 4, 5, 6, np.nan, np.nan], dtype="Int64")
pandasmeth = getattr(s, pandasmethname)
result = pandasmeth(**kwargs)
s2 = pd.Series(data=[1, 2, 3, 4, 5, 6], dtype="Int64")
pandasmeth = getattr(s2, pandasmethname)
expected = pandasmeth(**kwargs)
assert expected == result
def test_value_counts_na():
arr = pd.array([1, 2, 1, pd.NA], dtype="Int64")
result = arr.value_counts(dropna=False)
ex_index = pd.Index([1, 2, pd.NA], dtype="Int64")
assert ex_index.dtype == "Int64"
expected = pd.Series([2, 1, 1], index=ex_index, dtype="Int64")
tm.assert_series_equal(result, expected)
result = arr.value_counts(dropna=True)
expected = pd.Series([2, 1], index=arr[:2], dtype="Int64")
assert expected.index.dtype == arr.dtype
tm.assert_series_equal(result, expected)
def test_value_counts_empty():
# https://github.com/pandas-dev/pandas/issues/33317
ser = pd.Series([], dtype="Int64")
result = ser.value_counts()
idx = pd.Index([], dtype=ser.dtype)
assert idx.dtype == ser.dtype
expected = pd.Series([], index=idx, dtype="Int64")
tm.assert_series_equal(result, expected)
def test_value_counts_with_normalize():
# GH 33172
ser = pd.Series([1, 2, 1, pd.NA], dtype="Int64")
result = ser.value_counts(normalize=True)
expected = pd.Series([2, 1], index=ser[:2], dtype="Float64") / 3
assert expected.index.dtype == ser.dtype
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("min_count", [0, 4])
def test_integer_array_sum(skipna, min_count, any_int_ea_dtype):
dtype = any_int_ea_dtype
arr = pd.array([1, 2, 3, None], dtype=dtype)
result = arr.sum(skipna=skipna, min_count=min_count)
if skipna and min_count == 0:
assert result == 6
else:
assert result is pd.NA
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("method", ["min", "max"])
def test_integer_array_min_max(skipna, method, any_int_ea_dtype):
dtype = any_int_ea_dtype
arr = pd.array([0, 1, None], dtype=dtype)
func = getattr(arr, method)
result = func(skipna=skipna)
if skipna:
assert result == (0 if method == "min" else 1)
else:
assert result is pd.NA
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("min_count", [0, 9])
def test_integer_array_prod(skipna, min_count, any_int_ea_dtype):
dtype = any_int_ea_dtype
arr = pd.array([1, 2, None], dtype=dtype)
result = arr.prod(skipna=skipna, min_count=min_count)
if skipna and min_count == 0:
assert result == 2
else:
assert result is pd.NA
@pytest.mark.parametrize(
"values, expected", [([1, 2, 3], 6), ([1, 2, 3, None], 6), ([None], 0)]
)
def test_integer_array_numpy_sum(values, expected):
arr = pd.array(values, dtype="Int64")
result = np.sum(arr)
assert result == expected
@pytest.mark.parametrize("op", ["sum", "prod", "min", "max"])
def test_dataframe_reductions(op):
# https://github.com/pandas-dev/pandas/pull/32867
# ensure the integers are not cast to float during reductions
df = pd.DataFrame({"a": pd.array([1, 2], dtype="Int64")})
result = df.max()
assert isinstance(result["a"], np.int64)
# TODO(jreback) - these need testing / are broken
# shift
# set_index (destroys type)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Joyent Cloud (http://www.joyentcloud.com) driver.
"""
import base64
try:
import simplejson as json
except:
import json
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.common.types import LibcloudError
from libcloud.compute.providers import Provider
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.compute.base import is_private_subnet
from libcloud.compute.types import NodeState, InvalidCredsError
from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeSize
API_HOST_SUFFIX = '.api.joyentcloud.com'
API_VERSION = '~6.5'
NODE_STATE_MAP = {
'provisioning': NodeState.PENDING,
'running': NodeState.RUNNING,
'stopping': NodeState.TERMINATED,
'stopped': NodeState.TERMINATED,
'deleted': NodeState.TERMINATED
}
LOCATIONS = ['us-east-1', 'us-west-1', 'us-sw-1', 'eu-ams-1']
DEFAULT_LOCATION = LOCATIONS[0]
class JoyentResponse(JsonResponse):
"""
Joyent response class.
"""
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def parse_error(self):
if self.status == 401:
data = self.parse_body()
raise InvalidCredsError(data['code'] + ': ' + data['message'])
return self.body
def success(self):
return self.status in self.valid_response_codes
class JoyentConnection(ConnectionUserAndKey):
"""
Joyent connection class.
"""
responseCls = JoyentResponse
def add_default_headers(self, headers):
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json; charset=UTF-8'
headers['X-Api-Version'] = API_VERSION
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
return headers
class JoyentNodeDriver(NodeDriver):
"""
Joyent node driver class.
"""
type = Provider.JOYENT
name = 'Joyent'
website = 'http://www.joyentcloud.com'
connectionCls = JoyentConnection
features = {'create_node': ['generates_password']}
def __init__(self, *args, **kwargs):
"""
@inherits: L{NodeDriver.__init__}
@keyword location: Location which should be used
@type location: C{str}
"""
if 'location' in kwargs:
if kwargs['location'] not in LOCATIONS:
msg = 'Invalid location: "%s". Valid locations: %s'
raise LibcloudError(msg % (kwargs['location'],
', '.join(LOCATIONS)), driver=self)
else:
kwargs['location'] = DEFAULT_LOCATION
super(JoyentNodeDriver, self).__init__(*args, **kwargs)
self.connection.host = kwargs['location'] + API_HOST_SUFFIX
def list_images(self):
result = self.connection.request('/my/datasets').object
images = []
for value in result:
extra = {'type': value['type'], 'urn': value['urn'],
'os': value['os'], 'default': value['default']}
image = NodeImage(id=value['id'], name=value['name'],
driver=self.connection.driver, extra=extra)
images.append(image)
return images
def list_sizes(self):
result = self.connection.request('/my/packages').object
sizes = []
for value in result:
size = NodeSize(id=value['name'], name=value['name'],
ram=value['memory'], disk=value['disk'],
bandwidth=None, price=0.0,
driver=self.connection.driver)
sizes.append(size)
return sizes
def list_nodes(self):
result = self.connection.request('/my/machines').object
nodes = []
for value in result:
node = self._to_node(value)
nodes.append(node)
return nodes
def reboot_node(self, node):
data = json.dumps({'action': 'reboot'})
result = self.connection.request('/my/machines/%s' % (node.id),
data=data, method='POST')
return result.status == httplib.ACCEPTED
def destroy_node(self, node):
result = self.connection.request('/my/machines/%s' % (node.id),
method='DELETE')
return result.status == httplib.NO_CONTENT
def create_node(self, **kwargs):
name = kwargs['name']
size = kwargs['size']
image = kwargs['image']
data = json.dumps({'name': name, 'package': size.id,
'dataset': image.id})
result = self.connection.request('/my/machines', data=data,
method='POST')
return self._to_node(result.object)
def ex_stop_node(self, node):
"""
Stop node
@param node: The node to be stopped
@type node: L{Node}
@rtype: C{bool}
"""
data = json.dumps({'action': 'stop'})
result = self.connection.request('/my/machines/%s' % (node.id),
data=data, method='POST')
return result.status == httplib.ACCEPTED
def ex_start_node(self, node):
"""
Start node
@param node: The node to be stopped
@type node: L{Node}
@rtype: C{bool}
"""
data = json.dumps({'action': 'start'})
result = self.connection.request('/my/machines/%s' % (node.id),
data=data, method='POST')
return result.status == httplib.ACCEPTED
def _to_node(self, data):
state = NODE_STATE_MAP[data['state']]
public_ips = []
private_ips = []
extra = {}
for ip in data['ips']:
if is_private_subnet(ip):
private_ips.append(ip)
else:
public_ips.append(ip)
if 'credentials' in data['metadata']:
extra['password'] = data['metadata']['credentials']['root']
node = Node(id=data['id'], name=data['name'], state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self.connection.driver, extra=extra)
return node
|
|
""" Trains an agent with Deep Q Learning or Double DQN on Breakout. Uses OpenAI Gym.
"""
import sys
import os
sys.path.insert(0,os.path.expanduser('~/Library/Python/2.7/lib/python/site-packages/'))
import numpy as np
import cPickle as pickle
import gym
from optparse import OptionParser
import itertools
import random
import time
from collections import deque, namedtuple
import copy
from scipy.misc import imresize
from malpi.layers import *
from malpi.model import *
from malpi.optimizer import Optimizer
from malpi.experience import Experience2
try:
import config
except:
print "Failed to load config file config.py."
print "Try copying config_empty.py to config.py and re-running."
exit()
import ast
from sklearn.linear_model import BayesianRidge, LinearRegression
import sklearn.gaussian_process as gp
from scipy.stats import norm
from scipy.optimize import minimize
def expected_improvement(x, gaussian_process, evaluated_loss, greater_is_better=False, n_params=1):
""" expected_improvement
Expected improvement acquisition function.
Arguments:
----------
x: array-like, shape = [n_samples, n_hyperparams]
The point for which the expected improvement needs to be computed.
gaussian_process: GaussianProcessRegressor object.
Gaussian process trained on previously evaluated hyperparameters.
evaluated_loss: Numpy array.
Numpy array that contains the values off the loss function for the previously
evaluated hyperparameters.
greater_is_better: Boolean.
Boolean flag that indicates whether the loss function is to be maximised or minimised.
n_params: int.
Dimension of the hyperparameter space.
"""
x_to_predict = x.reshape(-1, n_params)
mu, sigma = gaussian_process.predict(x_to_predict, return_std=True)
if greater_is_better:
loss_optimum = np.max(evaluated_loss)
else:
loss_optimum = np.min(evaluated_loss)
scaling_factor = (-1) ** (not greater_is_better)
# In case sigma equals zero
with np.errstate(divide='ignore'):
Z = scaling_factor * (mu - loss_optimum) / sigma
expected_improvement = scaling_factor * (mu - loss_optimum) * norm.cdf(Z) + sigma * norm.pdf(Z)
expected_improvement[sigma == 0.0] == 0.0
return -1 * expected_improvement
def sample_next_hyperparameter(acquisition_func, gaussian_process, evaluated_loss, greater_is_better=False,
bounds=(0, 10), n_restarts=25):
""" sample_next_hyperparameter
Proposes the next hyperparameter to sample the loss function for.
Arguments:
----------
acquisition_func: function.
Acquisition function to optimise.
gaussian_process: GaussianProcessRegressor object.
Gaussian process trained on previously evaluated hyperparameters.
evaluated_loss: array-like, shape = [n_obs,]
Numpy array that contains the values off the loss function for the previously
evaluated hyperparameters.
greater_is_better: Boolean.
Boolean flag that indicates whether the loss function is to be maximised or minimised.
bounds: Tuple.
Bounds for the L-BFGS optimiser.
n_restarts: integer.
Number of times to run the minimiser with different starting points.
"""
best_x = None
best_acquisition_value = 1
n_params = bounds.shape[0]
for starting_point in np.random.uniform(bounds[:, 0], bounds[:, 1], size=(n_restarts, n_params)):
res = minimize(fun=acquisition_func,
x0=starting_point.reshape(1, -1),
bounds=bounds,
method='L-BFGS-B',
args=(gaussian_process, evaluated_loss, greater_is_better, n_params))
if res.fun < best_acquisition_value:
best_acquisition_value = res.fun
best_x = res.x
return best_x
def bayesian_optimisation(n_iters, sample_loss, bounds, x0=None, n_pre_samples=5,
gp_params=None, random_search=False, alpha=1e-5, epsilon=1e-7):
""" bayesian_optimisation
Uses Gaussian Processes to optimise the loss function `sample_loss`.
Arguments:
----------
n_iters: integer.
Number of iterations to run the search algorithm.
sample_loss: function.
Function to be optimised.
bounds: array-like, shape = [n_params, 2].
Lower and upper bounds on the parameters of the function `sample_loss`.
x0: array-like, shape = [n_pre_samples, n_params].
Array of initial points to sample the loss function for. If None, randomly
samples from the loss function.
n_pre_samples: integer.
If x0 is None, samples `n_pre_samples` initial points from the loss function.
gp_params: dictionary.
Dictionary of parameters to pass on to the underlying Gaussian Process.
random_search: integer.
Flag that indicates whether to perform random search or L-BFGS-B optimisation
over the acquisition function.
alpha: double.
Variance of the error term of the GP.
epsilon: double.
Precision tolerance for floats.
"""
x_list = []
y_list = []
n_params = bounds.shape[0]
if x0 is None:
for params in np.random.uniform(bounds[:, 0], bounds[:, 1], (n_pre_samples, bounds.shape[0])):
x_list.append(params)
y_list.append(sample_loss(params))
else:
for params in x0:
x_list.append(params)
y_list.append(sample_loss(params))
xp = np.array(x_list)
yp = np.array(y_list)
# Create the GP
if gp_params is not None:
model = gp.GaussianProcessRegressor(**gp_params)
else:
kernel = gp.kernels.Matern()
model = gp.GaussianProcessRegressor(kernel=kernel,
alpha=alpha,
n_restarts_optimizer=10,
normalize_y=True)
for n in range(n_iters):
model.fit(xp, yp)
# Sample next hyperparameter
if random_search:
x_random = np.random.uniform(bounds[:, 0], bounds[:, 1], size=(random_search, n_params))
ei = -1 * expected_improvement(x_random, model, yp, greater_is_better=True, n_params=n_params)
next_sample = x_random[np.argmax(ei), :]
else:
next_sample = sample_next_hyperparameter(expected_improvement, model, yp, greater_is_better=True, bounds=bounds, n_restarts=100)
# Duplicates will break the GP. In case of a duplicate, we will randomly sample a next query point.
if np.any(np.abs(next_sample - xp) <= epsilon):
next_sample = np.random.uniform(bounds[:, 0], bounds[:, 1], bounds.shape[0])
# Sample loss for new set of parameters
cv_score = sample_loss(next_sample)
# Update lists
x_list.append(next_sample)
y_list.append(cv_score)
# Update xp and yp
xp = np.array(x_list)
yp = np.array(y_list)
return xp, yp
# {'epsilon_decay': 0.99957392597900963, 'epsilon': 0.96126118058910504, 'learning_rate': 0.0048160891703121133, 'batch_size': 32, 'best_score': 164.90000000000001, 'episodes': 3000, 'clip_error': False, 'learning_rate_decay': 0.99992369857077323, 'lr_decay_on_best': 0.94999999999999996, 'update_rate': 20, 'reg': 0.0050000000000000001, 'gamma': 0.99}
def readParams():
hparams = []
y = []
with open('CartPole-v0_pg_won.txt', 'r') as f:
for line in f:
resd = ast.literal_eval(line)
if isinstance(resd,dict):
best = 195.0
if 'best_score' in resd:
best = resd['best_score']
sample = [32, 10, 200, 0.99, resd['epsilon'], resd['epsilon_decay'],resd['learning_rate'],resd['learning_rate_decay'],resd['lr_decay_on_best'],resd['clip_error'], 0.005]
elif isinstance(resd,list):
sample = resd[0:10]
sample.append(0.005)
best = resd[10]
hparams.append(sample)
y.append(best)
#hparams = np.array(hparams)
#y = np.array(y)
return (hparams,y)
#clf = BayesianRidge(compute_score=True)
#clf.fit(hparams, y)
#ols = LinearRegression()
#ols.fit(X, y)
#np.seterr(all='raise')
np.seterr(under='ignore')
def stats(arr, msg=""):
mi = np.min(arr)
ma = np.max(arr)
av = np.mean(arr)
std = np.std(arr)
abs_arr = np.abs(arr)
mi_abs = np.min(abs_arr)
ma_abs = np.max(abs_arr)
print "%sMin/Max/Mean/Stdev abs(Min/Max): %g/%g/%g/%g %g/%g" % (msg,mi,ma,av,std,mi_abs,ma_abs)
def saveModel( model, options ):
filename = os.path.join( options.dir_model, options.model_name + ".pickle" )
with open(filename, 'wb') as f:
pickle.dump( model, f, pickle.HIGHEST_PROTOCOL)
def initializeModel( name, number_actions, input_dim=(4,84,84), verbose=False ):
output = "FC-%d" % (number_actions,)
# layers = ["conv-32", "maxpool", "conv-64", "maxpool", "conv-64", "FC-512", output]
# layer_params = [{'filter_size':3, 'stride':1 },
# {'pool_stride': 2, 'pool_width': 2, 'pool_height': 2},
# {'filter_size':3, 'stride':1 },
# {'pool_stride': 2, 'pool_width': 2, 'pool_height': 2},
# {'filter_size':3, 'stride':2 },
# {}, {'relu':False} ]
# From the DQN paper, mostly
# layers = ["conv-32", "conv-64", "conv-64", "FC-512", output]
# layer_params = [{'filter_size':8, 'stride':4, 'pad':4 },
# {'filter_size':4, 'stride':2, 'pad':2},
# {'filter_size':3, 'stride':1, 'pad':1},
# {}, {'relu':False} ]
layers = ["FC-20", "FC-20", output]
layer_params = [ {}, {}, {'relu':False} ]
model = MalpiModel(layers, layer_params, input_dim=input_dim, reg=0.005, dtype=np.float32, verbose=verbose)
model.name = name
if verbose:
print
model.describe()
print
return model
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x)) # sigmoid "squashing" function to interval [0,1]
def softmax_batch(x):
probs = np.exp(x - np.max(x, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
return probs
def prepro(I):
""" prepro 210x160x3 uint8 frame into (84x84) float
"""
rgb_weights = [0.2989, 0.5870, 0.1140]
I = I[35:195] # crop
I = imresize(I, (84,84), interp='nearest' )
I = np.sum( I * rgb_weights, axis=2) # Convert to grayscale, shape = (84,84)
return I.astype(np.float) / 255.0
#return I.astype(np.float)
def discount_rewards(r, gamma, done, normalize=True):
""" take 1D float array of rewards and compute discounted reward.
if normalize is True: subtract mean and divide by std dev
"""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(xrange(0, r.size)):
if not done[t]: running_add = 0 # reset the sum, since this was a game boundary
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
if normalize:
# standardize the rewards to be unit normal (helps control the gradient estimator variance)
discounted_r -= np.mean(discounted_r)
discounted_r /= np.std(discounted_r)
return discounted_r
def make_epsilon_greedy_policy(estimator, epsilon, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function approximator and epsilon.
Args:
estimator: An estimator that returns q values for a given state
epsilon: The probability to select a random action . float between 0 and 1.
nA: Number of actions in the environment.
Returns:
A function that takes the observation as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(observation):
A = np.ones(nA, dtype=float) * epsilon / nA
q_values,_ = estimator.forward(observation, mode="test")
best_action = np.argmax(q_values[0])
A[best_action] += (1.0 - epsilon)
return A
return policy_fn
def choose_epsilon_greedy( estimator, observation, epsilon, nA ):
if np.random.random() < epsilon:
return np.random.randint(nA)
else:
q_values,_ = estimator.forward(observation.reshape(1,4), mode="test")
return np.argmax(q_values[0])
def check_weights( model ):
for k,w in model.params.iteritems():
smallest = np.min( np.abs(w) )
print "Smallest %s: %g" % (k,smallest)
mask_zeros = w != 0.0
mask = np.abs(w) < 1e-20
mask = np.logical_and(mask_zeros,mask)
if np.count_nonzero(mask) > 0:
print "Underflow in %s " % (k,)
def hyperparameterGenerator( oneRun = False ):
batch_size = 32 # backprop batch size
update_rate = 20 # every how many episodes to copy behavior model to target
gamma = 0.99 # discount factor for reward
epsilon = 0.5
epsilon_decay = 0.999
learning_rate = 0.01
learning_rate_decay = 0.999
lr_decay_on_best = 0.95
clip_error = True
reg = 0.005
hparams = { "reg": reg, "learning_rate": learning_rate, "learning_rate_decay":learning_rate_decay, "batch_size":batch_size, "update_rate":update_rate, "gamma":gamma, "epsilon":epsilon, "epsilon_decay":epsilon_decay,
"lr_decay_on_best":lr_decay_on_best, "clip_error":clip_error }
variations = np.array([0.9,1.0,1.1])
if oneRun:
reguls = [3.37091767808e-05]
lrs = [0.0002006801544726]
else:
count = 4
reguls = np.array([0.005])
epsilons = np.random.uniform( 0.5, 1.0, count )
epsilon_decays = np.random.uniform( 0.999, 0.9999, count )
lrs = np.random.uniform( 0.0001, 0.03, count )
lr_decays = np.random.uniform( 0.999, 0.99999, count )
decays_on_best = np.array([lr_decay_on_best])
clip_errors = np.array([True,False])
# reguls = np.array([3.37091767808e-05]) * variations
# lrs = np.array([0.0002006801544726]) * variations
#reguls = 10 ** np.random.uniform(-5, -4, 2) #[0.0001, 0.001, 0.01]
#lrs = 10 ** np.random.uniform(-6, -3, 5) #[1e-4, 1e-3, 1e-2]
#reguls = np.append([3.37091767808e-05],reguls)
#lrs = np.append([0.000182436504066],lrs)
for reg in reguls:
for lr in lrs:
for decay in lr_decays:
for epsilon in epsilons:
for epsilon_decay in epsilon_decays:
for decay_on_best in decays_on_best:
for clip_error in clip_errors:
hparams["reg"] = reg
hparams["learning_rate"] = lr
hparams["learning_rate_decay"] = decay
hparams["epsilon"] = epsilon
hparams["epsilon_decay"] = epsilon_decay
hparams["lr_decay_on_best"] = decay_on_best
hparams["clip_error"] = clip_error
yield hparams
def test(tmodel, env, options):
reward_100 = 0
for i in range(100):
episode_reward = 0
state = env.reset()
done = False
steps = 0
while not done and (steps < 1000):
if options.render: env.render()
q_values,_ = tmodel.forward(state.reshape(1,4), mode="test")
action = np.argmax(q_values[0])
state, reward, done, info = env.step(action)
episode_reward += reward
steps += 1
reward_100 += episode_reward
return (reward_100 / 100.0)
def train(env, options):
alpha=1e-5
epsilon=1e-7
kernel = gp.kernels.Matern()
model = gp.GaussianProcessRegressor(kernel=kernel,
alpha=alpha,
n_restarts_optimizer=10,
normalize_y=True)
# batch_size update_rate update_freq gamma epsilon epsilon_decay learning_rate learning_rate_decay lr_decay_on_best clip_error behavior.reg
next_sample = np.array( [ 32, 20, 100, 0.99, 0.9, 0.9995, 0.005, 0.9999, 0.95,True, 0.0005 ] )
# ns2 = [35.471222966270744, 38.37807565265633, 116.15169967184646, 0.7994517015140111, 0.5467350837104111, 0.9931695064926428, 0.009819376267803895, 0.9967592218595942, 0.9663844877653254, 0.7082498175370553, 0.0020246883852151417]
# ns3 = [15.794413185088402, 27.943625798031828, 798.1561128587946, 0.9542275528280187, 0.7105140406717579, 0.9996216020143134, 0.021327395517623794, 0.9996498782205984, 0.9583684951507172, 0.21068863599082088, 0.004261505455968546]
# ns5 = [48.815653567290425, 34.961648567661825, 468.3846881487566, 0.23313941479454803, 0.1630527266282271, 0.9932152896891062, 0.0688362208079374, 0.9985657914516108, 0.9745687054036098, 0.7234555328172226, 0.004001796434991941]
# ns6 = [28.24000380576149, 4.503855398537693, 647.7616508987576, 0.5136221792299456, 0.4310535569147862, 0.9921263218184515, 0.04364309633846753, 0.9968090204151728, 0.9815313824481013, 0.8650881828450184, 0.00560198882477674]
# ns7 = [30.925956532141644, 21.645822197961028, 782.7258088986783, 0.9975081589468211, 0.5755960192901446, 0.9917304919341033, 0.09969599669488056, 0.9992139877010152, 0.947164407569207, 0.6338001376910157, 0.009939094019751054]
# ns8 = [24.20062352160077, 31.63370169555912, 141.8076862504255, 0.6105570419507371, 0.4056939760149664, 0.9932989781711511, 0.0802181271288588, 0.9989581236209448, 0.9128658066048594, 0.7608427670235947, 0.0016435174719399933]
# ns4 = [18.32026345019517, 45.64960707155015, 781.1920097253865, 0.12244453901068054, 0.2941830570247511, 0.9949184958539329, 0.01666072047036751, 0.9999725890071582, 0.9068317107623877, 0.4337409896399025, 0.003750798870686474]
# ns9 = [10.25430465535929, 35.284676962320155, 595.7011299729893, 0.25599137210178063, 0.3280938239975178, 0.992898000862435, 0.02941715637109388, 0.9996840142279082, 0.926579984522795, 0.01586549543950433, 0.0048595528178426595]
ns2 = [35.47122, 38.37808, 116.1517, 0.79945, 0.54674, 0.99317, 0.00982, 0.99676, 0.96638, 0.70825, 0.00202]
ns3 = [15.79441, 27.94363, 798.15611, 0.95423, 0.71051, 0.99962, 0.02133, 0.99965, 0.95837, 0.21069, 0.00426]
ns4 = [18.32026, 45.64961, 781.19201, 0.12244, 0.29418, 0.99492, 0.01666, 0.99997, 0.90683, 0.43374, 0.00375]
ns5 = [48.81565, 34.96165, 468.38469, 0.23314, 0.16305, 0.99322, 0.06884, 0.99857, 0.97457, 0.72346, 0.004]
ns6 = [28.24, 4.50386, 647.76165, 0.51362, 0.43105, 0.99213, 0.04364, 0.99681, 0.98153, 0.86509, 0.0056]
ns7 = [30.92596, 21.64582, 782.72581, 0.99751, 0.5756, 0.99173, 0.0997, 0.99921, 0.94716, 0.6338, 0.00994]
ns8 = [24.20062, 31.6337, 141.80769, 0.61056, 0.40569, 0.9933, 0.08022, 0.99896, 0.91287, 0.76084, 0.00164]
ns9 = [10.2543, 35.28468, 595.70113, 0.25599, 0.32809, 0.9929, 0.02942, 0.99968, 0.92658, 0.01587, 0.00486]
ns10= [41.21059904, 38.28170401, 894.50800476, 0.74548384, 0.55663432, 0.99318955, 0.07205227, 0.99999037, 0.93612528, 0.0950218, 0.004791, 200., 4598.]
ns11= [19.92790589, 30.41193725, 143.25020338, 0.589395, 0.90056903, 0.99319374, 0.0621287, 0.99970608, 0.90470446, 0.26470385, 0.0078707, 200., 690.]
ns12= [35.67269471, 39.23544342, 237.63394761, 0.49218579, 0.21019492, 0.99898196, 0.07409025, 0.99972359, 0.97938121, 0.29803843, 0.00675115, 200., 1287.]
ns13= [15.13436928, 27.73185857, 173.97569991, 0.72684987, 0.72425576, 0.99975706, 0.0601372, 0.99826244, 0.98179968, 0.43993769, 0.00271678, 200., 972.]
#x_list,y_list = readParams()
x_list = [ns2,ns3,ns4,ns5,ns6,ns7,ns8,ns9,ns10[0:11],ns11[0:11],ns12[0:11]]
y_list = [136.04,123.39,192.15, 138.44,62.33,128.06,189.53,192.88,ns10[11],ns11[11],ns12[11]]
xp = np.array(x_list)
yp = np.array(y_list)
bounds = np.array( [ [10, 50], [1,50], [100,1000], [0.1,1.0], [0.1,1.0], [0.99,1.0], [0.0001,0.1], [0.99,1.0], [0.9,1.0],[0.0,1.0], [0.0005,0.01] ] )
do_bayes = False
scores = []
if not do_bayes:
next_sample = ns11
# Try setting learning rate decay to 1.0 so that RMSProp does all of the learning decay.
# Doesn't seem to work as well, so keep the learning rate decay
# next_sample[7] = 1.0
# next_sample[8] = 1.0
for i in range(100):
if do_bayes:
model.fit(xp, yp)
next_sample = sample_next_hyperparameter(expected_improvement, model, yp, greater_is_better=True, bounds=bounds, n_restarts=100)
# Duplicates will break the GP. In case of a duplicate, we will randomly sample a next query point.
if np.any(np.abs(next_sample - xp) <= epsilon):
next_sample = np.random.uniform(bounds[:, 0], bounds[:, 1], bounds.shape[0])
#next_sample = [32, 20, 200, 0.99, 0.88, 0.99957, 0.0045, 0.9999, 0.95, True, 0.005]
# Sample loss for new set of parameters
cv_score = train_one(env, next_sample, options)
scores.append(cv_score)
print "Score %f for %s" % (cv_score, next_sample)
# Update lists
x_list.append(next_sample)
y_list.append(cv_score)
# Update xp and yp
xp = np.array(x_list)
yp = np.array(y_list)
else:
cv_score = train_one(env, next_sample, options)
scores.append(cv_score)
with open( 'current_run.txt', 'w+') as f:
f.write( "%s Iteration %d\n" % (options.model_name,i) )
print "100 iterations: %f / %f" % (np.mean(scores), np.std(scores))
def train_one(env, hparams, options):
ksteps = options.k_steps # number of frames to skip before selecting a new action
num_actions = env.action_space.n
batch_size = int(hparams[0])
update_rate = int(hparams[1])
update_freq = int(hparams[2])
gamma = hparams[3]
epsilon = hparams[4]
epsilon_decay = hparams[5]
learning_rate = hparams[6]
learning_rate_decay = hparams[7]
lr_decay_on_best = hparams[8]
if hparams[9] < 0.5:
clip_error = False
else:
clip_error = True
behavior = initializeModel( options.model_name, num_actions, input_dim=(4,1) )
behavior.reg = hparams[10]
behavior.params["W1"] *= 0.1
optim = Optimizer( "rmsprop", behavior, learning_rate=learning_rate, decay_rate=0.99, upd_frequency=update_freq)
reward_sum = 0
reward_100 = deque(maxlen=100)
best_test = 15.0 # test(behavior, env, options)
steps = 0
episode_steps = 0
episode_number = 0
state = env.reset()
exp_history = Experience2( 2000, state.shape )
with open( os.path.join( options.game + ".txt" ), 'a+') as f:
f.write( "%s = %s\n" % ('Start',time.strftime("%Y-%m-%d %H:%M:%S")) )
f.write( "%s = %s\n" % ('Model Name',behavior.name) )
if options.initialize:
f.write( "Weights initialized\n" )
f.write( str(behavior.layers) + "\n" )
f.write( str(behavior.layer_params) + "\n" )
f.write( "%s = %d\n" % ('batch_size',batch_size) )
f.write( "%s = %d\n" % ('update_rate',update_rate) )
f.write( "%s = %f\n" % ('gamma',gamma) )
f.write( "%s = %f\n" % ('epsilon',epsilon) )
f.write( "%s = %f\n" % ('epsilon_decay',epsilon_decay) )
f.write( "%s = %d\n" % ('k-steps',ksteps) )
f.write( "%s = %f\n" % ('learning_rate',learning_rate) )
f.write( "%s = %f\n" % ('learning_rate_decay',learning_rate_decay) )
f.write( "%s = %f\n" % ('lr_decay_on_best',lr_decay_on_best) )
f.write( "%s = %s\n" % ('clip_error',str(clip_error)) )
f.write( "Optimizer %s\n" % (optim.optim_type,) )
f.write( " %s = %f\n" % ('learning rate',optim.learning_rate) )
f.write( " %s = %f\n" % ('decay rate',optim.decay_rate) )
f.write( " %s = %f\n" % ('epsilon',optim.epsilon) )
f.write( " %s = %f\n" % ('update frequency',optim.upd_frequency) )
f.write( "\n" )
while (options.max_episodes == 0) or (episode_number < options.max_episodes):
if options.render: env.render()
actions_raw, _ = behavior.forward( state.reshape(1,4), mode="test")
action_probs = softmax_batch(actions_raw)
action = np.random.choice(num_actions, p=action_probs[0])
# Random action, for baseline scores
# action = np.random.randint(num_actions)
# step the environment once, or ksteps times
reward = 0
done = False
for k in range(ksteps):
next_state, r, d, info = env.step(action)
reward += r
if d:
done = True
reward_sum += reward
steps += ksteps
episode_steps += ksteps
exp_history.save( state, action, reward, done, next_state )
state = next_state
if done: # an episode finished
states, actions, rewards, batch_done, new_states, batch_probs = exp_history.all()
actions = actions.astype(np.int)
rewards = discount_rewards( rewards, gamma, batch_done, normalize=True )
actions_raw, caches = behavior.forward( states )
action_probs = softmax_batch(actions_raw)
gradients = action_probs[range(action_probs.shape[0]),actions]
#print action_probs
#print actions
#print "action_probs: %s" % (action_probs.shape,)
#print "Gradients: %s" % (gradients.shape,)
#print "Rewards: %s" % (rewards.shape,)
#dx = rewards / gradients
#y = np.zeros(action_probs.shape)
#y[range(action_probs.shape[0]),actions] = dx
#dx = -y
# From: https://github.com/keon/policy-gradient/blob/master/pg.py
y = np.zeros(action_probs.shape)
y[range(action_probs.shape[0]),actions] = 1.0
gradients = y - action_probs
gradients *= np.reshape(rewards, [rewards.shape[0],1])
dx = -gradients
# Gradient of the action taken divided by the probability of the action taken
#action_probs = np.zeros(action_probs.shape)
#action_probs[actions] = 1.0
# From: http://minpy.readthedocs.io/en/latest/tutorial/rl_policy_gradient_tutorial/rl_policy_gradient.html
# ps = np.maximum(1.0e-5, np.minimum(1.0 - 1e-5, ps)) # prevent log of zero
#policy_grad_loss = -np.sum(np.log(ps) * actions_one_hot * advs)
# would still need the derivitive of the loss
#dx = actions_raw + gradients
#dx = -gradients
loss = 0.0
if clip_error:
np.clip( dx, -1.0, 1.0, dx )
# dx needs to have shape(batch_size,num_actions), e.g. (32,6)
_, grad = behavior.backward(caches, loss, dx )
optim.update( grad, check_ratio=True )
episode_number += 1
reward_100.append(reward_sum)
exp_history.clear()
if episode_number % update_rate == 0:
treward = np.mean(reward_100) # test(behavior, env, options)
print
print 'Ep %d' % ( episode_number, )
print 'Reward : %0.2f %0.2f' % ( reward_sum, np.mean(reward_100) )
print "Test reward : %0.2f vs %0.2f" % (treward, best_test)
print "Learning rate: %g" % (optim.learning_rate,)
print "Epsilon : %g" % (epsilon,)
if treward > best_test:
best_test = treward
if treward > 195.0:
print "Final Learning rate: %f" % (optim.learning_rate,)
print "WON! In %d episodes" % (episode_number,)
break
if optim.learning_rate > 0.00001:
optim.learning_rate *= lr_decay_on_best
if optim.learning_rate > 0.00001:
optim.learning_rate *= learning_rate_decay
if epsilon > 0.001:
epsilon *= epsilon_decay
reward_sum = 0
episode_steps = 0
steps = 0
state = env.reset()
with open( os.path.join( options.game + "_pg_won.txt" ), 'a+') as f:
hparams = np.append( hparams, [best_test, episode_number] )
f.write( "%s\n" % (hparams.tolist(),) )
with open( os.path.join( options.game + ".txt" ), 'a+') as f:
f.write( "%s = %f\n" % ('Final epsilon', epsilon) )
f.write( "%s = %f\n" % ('Final learning rate', optim.learning_rate) )
f.write( "%s = %f\n" % ('Best test score', best_test) )
f.write( "%s = %d\n" % ('Episodes', episode_number) )
f.write( "\n\n" )
return best_test
def getOptions():
usage = "Usage: python pg-pong [options] <model name>"
parser = OptionParser( usage=usage )
parser.add_option("-i","--initialize", action="store_true", default=False, help="Initialize model, save to <model name>.pickle, then start training.");
parser.add_option("-d","--dir_model", default="", help="Directory for finding/initializing model files. Defaults to current directory.");
parser.add_option("-r","--render", action="store_true", default=False, help="Render gym environment while training. Will greatly reduce speed.");
parser.add_option("-s","--starting_ep", type="int", default=0, help="Starting episode number (for record keeping).");
parser.add_option("-k","--k_steps", type="int", default=1, help="How many game steps to take before the model chooses a new action.");
parser.add_option("-p","--play", action="store_true", default=False, help="Play only. No training and always choose the best action.");
parser.add_option("--test_only", action="store_true", default=False, help="Run tests, then exit.");
parser.add_option("--desc", action="store_true", default=False, help="Describe the model, then exit.");
parser.add_option("-g","--game", default="Breakout-v0", help="The game environment to use. Defaults to Breakout.");
parser.add_option("-m","--max_episodes", default="0", type="int", help="Maximum number of episodes to train.");
parser.add_option("--upload", action="store_true", default=False, help="Monitor the training run and upload to OpenAI.");
(options, args) = parser.parse_args()
options.model_name = "HyperParamSearch"
if options.desc or options.test_only:
if len(args) != 1:
print usage
exit()
if args[0].endswith('.pickle'):
args[0] = args[0][:-7]
options.model_name = args[0]
if options.k_steps != 1 and options.k_steps != 4:
print "Game step sizes other than 1 and 4 are not currently supported."
exit()
options.dir_model = os.path.expanduser(options.dir_model)
return (options, args)
if __name__ == "__main__":
options, _ = getOptions()
env = gym.envs.make(options.game)
if hasattr(env,'get_action_meanings'):
print env.get_action_meanings()
if options.desc or options.test_only:
if options.initialize:
filename = os.path.join( options.dir_model, options.model_name + ".pickle" )
if os.path.exists(filename):
print "Model already exists at " + filename
print "Delete the existing file or don't use the --initialize/-i flag."
exit()
nA = env.action_space.n
print "Initializing model with %d actions..." % (nA,)
model = initializeModel( options.model_name, nA, input_dim=(4,1) )
model.params["W1"] *= 0.1
model.describe()
model.env = options.game
saveModel( model, options )
else:
print "Reading model..."
with open( os.path.join( options.dir_model, options.model_name+'.pickle'), 'rb') as f:
model = pickle.load( f )
if not hasattr(model, 'env'):
print "Warning, model may not work with the current environment."
if options.desc:
model.describe()
exit()
if options.test_only:
if hasattr(model, 'env'):
if model.env != options.game:
print "Model was not initialized for the current environment: %s vs %s" % (model.env,options.game)
exit()
treward = test(model, env, options)
print "Gym reward: %f" % treward
exit()
if options.upload:
env = gym.wrappers.Monitor(env, "./" + options.game, force=True)
train(env, options)
env.close()
if options.upload:
if hasattr(config, 'openai_key'):
gym.upload('./' + options.game, api_key=config.openai_key)
else:
print "Unable to upload results. Missing 'openai_key' in config."
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import gen_nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [None,
nn_ops.conv2d_backprop_filter(grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
op.get_attr("strides"),
op.get_attr("padding")),
nn_ops.conv2d(grad,
op.inputs[1],
op.get_attr("strides"),
op.get_attr("padding"))]
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax -
array_ops.reshape(math_ops.reduce_sum(grad_softmax * softmax, [1]),
[-1, 1]))
* softmax)
return grad_x
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad, reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops._relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops._elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops._softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._relu_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
return [nn_ops.conv2d_backprop_input(array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
op.get_attr("strides"),
op.get_attr("padding")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
op.get_attr("strides"),
op.get_attr("padding"))]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0],
depth_radius, bias, alpha, beta)]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops._avg_pool_grad(array_ops.shape(op.inputs[0]), grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"))
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0], op.outputs[0], grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"))
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
|
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Model tests."""
from __future__ import absolute_import, print_function
import pytest
import uuid
from mock import patch
from sqlalchemy.exc import SQLAlchemyError
from invenio_pidstore.errors import PIDAlreadyExists, PIDDoesNotExistError, \
PIDInvalidAction, PIDObjectAlreadyAssigned
from invenio_pidstore.models import PersistentIdentifier, PIDStatus, Redirect
@patch('invenio_pidstore.models.logger')
def test_pid_creation(logger, app, db):
"""Test pid creation."""
with app.app_context():
assert PersistentIdentifier.query.count() == 0
pid = PersistentIdentifier.create('doi', '10.1234/foo')
assert PersistentIdentifier.query.count() == 1
assert pid.pid_type == 'doi'
assert pid.pid_value == '10.1234/foo'
assert pid.pid_provider is None
assert pid.status == PIDStatus.NEW
assert pid.object_type is None
assert pid.object_uuid is None
assert logger.info.called
rec_uuid = uuid.uuid4()
pid = PersistentIdentifier.create(
'rec', '2', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid=rec_uuid)
assert PersistentIdentifier.query.count() == 2
assert pid.pid_type == 'rec'
assert pid.pid_value == '2'
assert pid.pid_provider is None
assert pid.status == PIDStatus.REGISTERED
assert pid.object_type == 'rec'
assert pid.object_uuid == rec_uuid
# Can't duplicate existing persistent identifier
assert not logger.exception.called
pytest.raises(
PIDAlreadyExists, PersistentIdentifier.create, 'rec', '2')
assert logger.exception.called
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, PersistentIdentifier.create,
'rec', '2')
assert logger.exception.call_args[0][0].startswith(
"Failed to create")
def test_alembic(app, db):
"""Test alembic recipes."""
ext = app.extensions['invenio-db']
if db.engine.name == 'sqlite':
raise pytest.skip('Upgrades are not supported on SQLite.')
assert not ext.alembic.compare_metadata()
db.drop_all()
ext.alembic.upgrade()
assert not ext.alembic.compare_metadata()
ext.alembic.stamp()
ext.alembic.downgrade(target='96e796392533')
ext.alembic.upgrade()
assert not ext.alembic.compare_metadata()
def test_pidstatus_as():
"""Test PID status."""
assert PIDStatus.NEW.title == 'New'
assert PIDStatus.RESERVED.title == 'Reserved'
assert next(iter(PIDStatus)) == 'N'
def test_pid_get(app, db):
"""Test pid retrieval."""
with app.app_context():
PersistentIdentifier.create('doi', '10.1234/foo')
assert PersistentIdentifier.get('doi', '10.1234/foo')
pytest.raises(
PIDDoesNotExistError,
PersistentIdentifier.get,
'doi', '10.1234/bar'
)
# PID with provider
doi = '10.1234/a'
PersistentIdentifier.create('doi', doi, pid_provider='dcite')
assert PersistentIdentifier.get('doi', doi)
assert PersistentIdentifier.get(
'doi', doi, pid_provider='dcite')
pytest.raises(
PIDDoesNotExistError,
PersistentIdentifier.get,
'doi', doi, pid_provider='cref'
)
# Retrieve by object
myuuid = uuid.uuid4()
doi = '10.1234/b'
PersistentIdentifier.create(
'doi', doi, object_type='rec', object_uuid=myuuid)
pid = PersistentIdentifier.get_by_object('doi', 'rec', myuuid)
assert pid.pid_value == doi
pytest.raises(
PIDDoesNotExistError,
PersistentIdentifier.get_by_object,
'doi', 'rec', uuid.uuid4()
)
@patch('invenio_pidstore.models.logger')
def test_pid_assign(logger, app, db):
"""Test pid object assignment."""
with app.app_context():
# No assigned object
pid = PersistentIdentifier.create('doi', '10.1234/foo')
assert not pid.has_object()
assert pid.get_assigned_object() is None
assert pid.get_assigned_object('rec') is None
# Assign object
rec_uuid = uuid.uuid4()
pid.assign('rec', rec_uuid)
assert logger.info.call_args[0][0].startswith("Assigned")
assert 'pid' in logger.info.call_args[1]['extra']
assert pid.has_object()
assert pid.get_assigned_object() == rec_uuid
assert pid.get_assigned_object('rec') == rec_uuid
assert pid.get_assigned_object('oth') is None
# Doesnt' raise
pid.assign('rec', rec_uuid)
# Assign without overwrite (uuid as str and uuid)
new_uuid = uuid.uuid4()
pytest.raises(PIDObjectAlreadyAssigned, pid.assign, 'rec', new_uuid)
pytest.raises(
PIDObjectAlreadyAssigned, pid.assign, 'rec', str(new_uuid))
# Assign with overwrite
pid.assign('rec', str(new_uuid), overwrite=True)
assert pid.has_object()
assert pid.get_assigned_object() == new_uuid
assert pid.get_assigned_object('rec') == new_uuid
assert pid.get_assigned_object('oth') is None
# Assign with SQLError
pid = PersistentIdentifier.create('recid', '101')
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.assign, 'rec', uuid.uuid4())
@patch('invenio_pidstore.models.logger')
def test_pid_unassign_noobject(logger, app, db):
"""Test unassign."""
with app.app_context():
pid = PersistentIdentifier.create('recid', '101')
assert pid.unassign()
pid.assign('rec', uuid.uuid4())
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.unassign)
assert logger.exception.call_args[0][0].startswith(
"Failed to unassign")
assert 'pid' in logger.exception.call_args[1]['extra']
def test_pid_assign_deleted(app, db):
"""Test pid object assignment."""
with app.app_context():
pid = PersistentIdentifier.create(
'doi', '10.1234/foo', status=PIDStatus.DELETED)
pytest.raises(PIDInvalidAction, pid.assign, 'rec', uuid.uuid4())
@patch('invenio_pidstore.models.logger')
def test_reserve(logger, app, db):
"""Test pid reserve."""
with app.app_context():
i = 1
for s in [PIDStatus.NEW, PIDStatus.RESERVED]:
pid = PersistentIdentifier.create('rec', str(i), status=s)
i += 1
assert pid.reserve()
assert logger.info.call_args[0][0].startswith(
"Reserved PID")
for s in [PIDStatus.REGISTERED, PIDStatus.DELETED,
PIDStatus.REDIRECTED]:
pid = PersistentIdentifier.create('rec', str(i), status=s)
i += 1
pytest.raises(PIDInvalidAction, pid.reserve)
# Test logging of bad errors.
pid = PersistentIdentifier.create('rec', str(i))
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.reserve)
assert logger.exception.call_args[0][0].startswith(
"Failed to reserve")
assert 'pid' in logger.exception.call_args[1]['extra']
@patch('invenio_pidstore.models.logger')
def test_register(logger, app, db):
"""Test pid register."""
with app.app_context():
i = 1
for s in [PIDStatus.NEW, PIDStatus.RESERVED]:
pid = PersistentIdentifier.create('rec', str(i), status=s)
i += 1
assert pid.register()
assert logger.info.call_args[0][0].startswith(
"Registered PID")
for s in [PIDStatus.REGISTERED, PIDStatus.DELETED,
PIDStatus.REDIRECTED]:
pid = PersistentIdentifier.create('rec', str(i), status=s)
i += 1
pytest.raises(PIDInvalidAction, pid.register)
# Test logging of bad errors.
pid = PersistentIdentifier.create('rec', str(i),
status=PIDStatus.RESERVED)
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.register)
assert logger.exception.call_args[0][0].startswith(
"Failed to register")
assert 'pid' in logger.exception.call_args[1]['extra']
@patch('invenio_pidstore.models.logger')
def test_delete(logger, app, db):
"""Test pid delete."""
with app.app_context():
i = 1
for s in [PIDStatus.RESERVED, PIDStatus.RESERVED,
PIDStatus.REDIRECTED, PIDStatus.DELETED]:
pid = PersistentIdentifier.create('rec', str(i), status=s)
i += 1
assert pid.delete()
assert logger.info.call_args[0][0] == "Deleted PID."
# New persistent identifiers are removed completely
count = PersistentIdentifier.query.count()
pid = PersistentIdentifier.create('rec', str(i), status=PIDStatus.NEW)
db.session.commit()
assert PersistentIdentifier.query.count() == count + 1
pid.delete()
assert PersistentIdentifier.query.count() == count
assert logger.info.call_args[0][0] == "Deleted PID (removed)."
pid = PersistentIdentifier.create('rec', str(i + 1))
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.delete)
assert logger.exception.call_args[0][0].startswith(
"Failed to delete")
assert 'pid' in logger.exception.call_args[1]['extra']
@patch('invenio_pidstore.models.logger')
def test_redirect(logger, app, db):
"""Test redirection."""
with app.app_context():
pid1 = PersistentIdentifier.create(
'rec', '1', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid=uuid.uuid4())
pid2 = PersistentIdentifier.create(
'doi', '2', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid=uuid.uuid4())
# Can't redirect these statuses
i = 10
for s in [PIDStatus.NEW, PIDStatus.RESERVED, PIDStatus.DELETED, ]:
pid = PersistentIdentifier.create('rec', str(i), status=s)
i += 1
pytest.raises(PIDInvalidAction, pid.redirect, pid1)
pid = PersistentIdentifier.create(
'rec', str(i), status=PIDStatus.REGISTERED)
# Can't redirect to non-exsting pid.
pytest.raises(PIDDoesNotExistError, pid.redirect,
PersistentIdentifier())
pid.redirect(pid1)
assert logger.info.call_args[0][0].startswith("Redirected")
assert 'pid' in logger.info.call_args[1]['extra']
assert pid.status == PIDStatus.REDIRECTED
assert pid.object_type is None
assert pid.object_uuid is not None
new_pid = pid.get_redirect()
assert new_pid.pid_type == 'rec'
assert new_pid.pid_value == '1'
# You can redirect an already redirected pid
pid.redirect(pid2)
new_pid = pid.get_redirect()
assert new_pid.pid_type == 'doi'
assert new_pid.pid_value == '2'
# Assign with SQLError
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.redirect, '1')
assert logger.exception.call_args[0][0].startswith(
"Failed to redirect")
assert 'pid' in logger.exception.call_args[1]['extra']
def test_redirect_cleanup(app, db):
"""Test proper clean up from redirects."""
with app.app_context():
pid1 = PersistentIdentifier.create(
'recid', '1', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid=uuid.uuid4())
pid2 = PersistentIdentifier.create(
'recid', '2', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid=uuid.uuid4())
pid3 = PersistentIdentifier.create(
'recid', '3', status=PIDStatus.REGISTERED)
db.session.commit()
assert Redirect.query.count() == 0
pid3.redirect(pid1)
assert Redirect.query.count() == 1
pid3.redirect(pid2)
assert Redirect.query.count() == 1
pytest.raises(
PIDObjectAlreadyAssigned, pid3.assign, 'rec', uuid.uuid4())
pid3.unassign()
assert Redirect.query.count() == 0
@patch('invenio_pidstore.models.logger')
def test_sync_status(logger, app, db):
"""Test sync status."""
with app.app_context():
pid = PersistentIdentifier.create(
'rec', '1', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid=uuid.uuid4())
pytest.raises(PIDInvalidAction, pid.reserve)
calls = logger.info.call_count
assert pid.sync_status(PIDStatus.NEW)
assert logger.info.call_count == calls + 1
assert pid.reserve()
calls = logger.info.call_count
assert pid.sync_status(PIDStatus.RESERVED)
assert logger.info.call_count == calls
with patch('invenio_pidstore.models.db.session.begin_nested') as mock:
mock.side_effect = SQLAlchemyError()
pytest.raises(SQLAlchemyError, pid.sync_status, PIDStatus.NEW)
assert logger.exception.call_args[0][0].startswith(
"Failed to sync status")
assert 'pid' in logger.exception.call_args[1]['extra']
def test_repr(app, db):
"""Test representation."""
with app.app_context():
pid = PersistentIdentifier.create(
'recid', '1', status=PIDStatus.REGISTERED, object_type='rec',
object_uuid='de3bb351-bc1a-4e51-8605-c6cd9589a560')
assert str(pid) == \
"<PersistentIdentifier recid:1 / " \
"rec:de3bb351-bc1a-4e51-8605-c6cd9589a560 (R)>"
pid = PersistentIdentifier.create(
'recid', '2', status=PIDStatus.REGISTERED)
assert str(pid) == "<PersistentIdentifier recid:2 (R)>"
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from warnings import warn
import cql
from cql.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack,
int8_pack, int8_unpack)
from cql.cqltypes import lookup_cqltype
from cql.connection import Connection
from cql.cursor import Cursor, _VOID_DESCRIPTION, _COUNT_DESCRIPTION
from cql.apivalues import ProgrammingError, OperationalError
from cql.query import PreparedQuery, prepare_query, cql_quote_name
import socket
import uuid
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PROTOCOL_VERSION = 0x01
PROTOCOL_VERSION_MASK = 0x7f
HEADER_DIRECTION_FROM_CLIENT = 0x00
HEADER_DIRECTION_TO_CLIENT = 0x80
HEADER_DIRECTION_MASK = 0x80
class ConsistencyLevel(object):
@classmethod
def name_from_value(cls, value):
return {0: 'ANY',
1: 'ONE',
2: 'TWO',
3: 'THREE',
4: 'QUORUM',
5: 'ALL',
6: 'LOCAL_QUORUM',
7: 'EACH_QUORUM'}[value]
@classmethod
def value_from_name(cls, name):
return {'ANY': 0,
'ONE': 1,
'TWO': 2,
'THREE': 3,
'QUORUM': 4,
'ALL': 5,
'LOCAL_QUORUM': 6,
'EACH_QUORUM': 7}[name]
class CqlResult:
def __init__(self, column_metadata, rows):
self.column_metadata = column_metadata
self.rows = rows
def __iter__(self):
return iter(self.rows)
def __str__(self):
return '<CqlResult: column_metadata=%r, rows=%r>' \
% (self.column_metadata, self.rows)
__repr__ = __str__
class PreparedResult:
def __init__(self, queryid, param_metadata):
self.queryid = queryid
self.param_metadata = param_metadata
def __str__(self):
return '<PreparedResult: queryid=%r, column_metadata=%r>' \
% (self.queryid, self.column_metadata)
__repr__ = __str__
_message_types_by_name = {}
_message_types_by_opcode = {}
class _register_msg_type(type):
def __init__(cls, name, bases, dct):
if not name.startswith('_'):
_message_types_by_name[cls.name] = cls
_message_types_by_opcode[cls.opcode] = cls
class _MessageType(object):
__metaclass__ = _register_msg_type
params = ()
def __init__(self, **kwargs):
for pname in self.params:
try:
pval = kwargs[pname]
except KeyError:
raise ValueError("%s instances need the %s keyword parameter"
% (self.__class__.__name__, pname))
setattr(self, pname, pval)
def send(self, f, streamid, compression=None):
body = StringIO()
self.send_body(body)
body = body.getvalue()
version = PROTOCOL_VERSION | HEADER_DIRECTION_FROM_CLIENT
flags = 0
if compression is not None and len(body) > 0:
body = compression(body)
flags |= 0x1
msglen = int32_pack(len(body))
header = ''.join(map(int8_pack, (version, flags, streamid, self.opcode))) \
+ msglen
f.write(header)
if len(body) > 0:
f.write(body)
def __str__(self):
paramstrs = ['%s=%r' % (pname, getattr(self, pname)) for pname in self.params]
return '<%s(%s)>' % (self.__class__.__name__, ', '.join(paramstrs))
__repr__ = __str__
def read_frame(f, decompressor=None):
header = f.read(8)
version, flags, stream, opcode = map(int8_unpack, header[:4])
body_len = int32_unpack(header[4:])
assert version & PROTOCOL_VERSION_MASK == PROTOCOL_VERSION, \
"Unsupported CQL protocol version %d" % version
assert version & HEADER_DIRECTION_MASK == HEADER_DIRECTION_TO_CLIENT, \
"Unexpected request from server with opcode %04x, stream id %r" % (opcode, stream)
assert body_len >= 0, "Invalid CQL protocol body_len %r" % body_len
body = f.read(body_len)
if flags & 0x1:
if decompressor is None:
raise ProtocolException("No decompressor available for compressed frame!")
body = decompressor(body)
flags ^= 0x1
if flags:
warn("Unknown protocol flags set: %02x. May cause problems." % flags)
msgclass = _message_types_by_opcode[opcode]
msg = msgclass.recv_body(StringIO(body))
msg.stream_id = stream
return msg
error_classes = {}
class ErrorMessage(_MessageType):
opcode = 0x00
name = 'ERROR'
params = ('code', 'message', 'info')
summary = 'Unknown'
@classmethod
def recv_body(cls, f):
code = read_int(f)
msg = read_string(f)
subcls = error_classes.get(code, cls)
extra_info = subcls.recv_error_info(f)
return subcls(code=code, message=msg, info=extra_info)
def summarymsg(self):
msg = 'code=%04x [%s] message="%s"' \
% (self.code, self.summary, self.message)
if self.info is not None:
msg += (' info=' + str(self.info))
return msg
def __str__(self):
return '<ErrorMessage %s>' % self.summarymsg()
__repr__ = __str__
@staticmethod
def recv_error_info(f):
pass
class ErrorMessageSubclass(_register_msg_type):
def __init__(cls, name, bases, dct):
if cls.errorcode is not None:
error_classes[cls.errorcode] = cls
class ErrorMessageSub(ErrorMessage):
__metaclass__ = ErrorMessageSubclass
errorcode = None
class RequestExecutionException(ErrorMessageSub):
pass
class RequestValidationException(ErrorMessageSub):
pass
class ServerError(ErrorMessageSub):
summary = 'Server error'
errorcode = 0x0000
class ProtocolException(ErrorMessageSub):
summary = 'Protocol error'
errorcode = 0x000A
class UnavailableExceptionErrorMessage(RequestExecutionException):
summary = 'Unavailable exception'
errorcode = 0x1000
@staticmethod
def recv_error_info(f):
return {
'consistencylevel': read_consistencylevel(f),
'required': read_int(f),
'alive': read_int(f),
}
class OverloadedErrorMessage(RequestExecutionException):
summary = 'Coordinator node overloaded'
errorcode = 0x1001
class IsBootstrappingErrorMessage(RequestExecutionException):
summary = 'Coordinator node is bootstrapping'
errorcode = 0x1002
class TruncateError(RequestExecutionException):
summary = 'Error during truncate'
errorcode = 0x1003
class RequestTimeoutException(RequestExecutionException):
pass
class WriteTimeoutErrorMessage(RequestTimeoutException):
summary = 'Timeout during write request'
errorcode = 0x1100
@staticmethod
def recv_error_info(f):
return {
'consistencylevel': read_consistencylevel(f),
'received': read_int(f),
'blockfor': read_int(f),
'writetype': read_string(f),
}
class ReadTimeoutErrorMessage(RequestTimeoutException):
summary = 'Timeout during read request'
errorcode = 0x1200
@staticmethod
def recv_error_info(f):
return {
'consistencylevel': read_consistencylevel(f),
'received': read_int(f),
'blockfor': read_int(f),
'data_present': bool(read_byte(f)),
}
class SyntaxException(RequestValidationException):
summary = 'Syntax error in CQL query'
errorcode = 0x2000
class UnauthorizedErrorMessage(RequestValidationException):
summary = 'Unauthorized'
errorcode = 0x2100
class InvalidRequestException(RequestValidationException):
summary = 'Invalid query'
errorcode = 0x2200
class ConfigurationException(RequestValidationException):
summary = 'Query invalid because of configuration issue'
errorcode = 0x2300
class AlreadyExistsException(ConfigurationException):
summary = 'Item already exists'
errorcode = 0x2400
@staticmethod
def recv_error_info(f):
return {
'keyspace': read_string(f),
'table': read_string(f),
}
class StartupMessage(_MessageType):
opcode = 0x01
name = 'STARTUP'
params = ('cqlversion', 'options')
KNOWN_OPTION_KEYS = set((
'CQL_VERSION',
'COMPRESSION',
))
def send_body(self, f):
optmap = self.options.copy()
optmap['CQL_VERSION'] = self.cqlversion
write_stringmap(f, optmap)
class ReadyMessage(_MessageType):
opcode = 0x02
name = 'READY'
params = ()
@classmethod
def recv_body(cls, f):
return cls()
class AuthenticateMessage(_MessageType):
opcode = 0x03
name = 'AUTHENTICATE'
params = ('authenticator',)
@classmethod
def recv_body(cls, f):
authname = read_string(f)
return cls(authenticator=authname)
class CredentialsMessage(_MessageType):
opcode = 0x04
name = 'CREDENTIALS'
params = ('creds',)
def send_body(self, f):
write_short(f, len(self.creds))
for credkey, credval in self.creds:
write_string(f, credkey)
write_string(f, credval)
class OptionsMessage(_MessageType):
opcode = 0x05
name = 'OPTIONS'
params = ()
def send_body(self, f):
pass
class SupportedMessage(_MessageType):
opcode = 0x06
name = 'SUPPORTED'
params = ('cqlversions', 'options',)
@classmethod
def recv_body(cls, f):
options = read_stringmultimap(f)
cqlversions = options.pop('CQL_VERSION')
return cls(cqlversions=cqlversions, options=options)
class QueryMessage(_MessageType):
opcode = 0x07
name = 'QUERY'
params = ('query', 'consistencylevel',)
def send_body(self, f):
write_longstring(f, self.query)
write_consistencylevel(f, self.consistencylevel)
class ResultMessage(_MessageType):
opcode = 0x08
name = 'RESULT'
params = ('kind', 'results',)
KIND_VOID = 0x0001
KIND_ROWS = 0x0002
KIND_SET_KEYSPACE = 0x0003
KIND_PREPARED = 0x0004
KIND_SCHEMA_CHANGE = 0x0005
type_codes = {
0x0001: 'ascii',
0x0002: 'bigint',
0x0003: 'blob',
0x0004: 'boolean',
0x0005: 'counter',
0x0006: 'decimal',
0x0007: 'double',
0x0008: 'float',
0x0009: 'int',
0x000A: 'text',
0x000B: 'timestamp',
0x000C: 'uuid',
0x000D: 'varchar',
0x000E: 'varint',
0x000F: 'timeuuid',
0x0010: 'inet',
0x0020: 'list',
0x0021: 'map',
0x0022: 'set',
}
FLAGS_GLOBAL_TABLES_SPEC = 0x0001
@classmethod
def recv_body(cls, f):
kind = read_int(f)
if kind == cls.KIND_VOID:
results = None
elif kind == cls.KIND_ROWS:
results = cls.recv_results_rows(f)
elif kind == cls.KIND_SET_KEYSPACE:
ksname = read_string(f)
results = ksname
elif kind == cls.KIND_PREPARED:
results = cls.recv_results_prepared(f)
elif kind == cls.KIND_SCHEMA_CHANGE:
results = cls.recv_results_schema_change(f)
return cls(kind=kind, results=results)
@classmethod
def recv_results_rows(cls, f):
colspecs = cls.recv_results_metadata(f)
rowcount = read_int(f)
rows = [cls.recv_row(f, len(colspecs)) for x in xrange(rowcount)]
return CqlResult(column_metadata=colspecs, rows=rows)
@classmethod
def recv_results_prepared(cls, f):
size = read_short(f)
queryid = uuid.UUID(bytes=f.read(size))
colspecs = cls.recv_results_metadata(f)
return (queryid, colspecs)
@classmethod
def recv_results_metadata(cls, f):
flags = read_int(f)
glob_tblspec = bool(flags & cls.FLAGS_GLOBAL_TABLES_SPEC)
colcount = read_int(f)
if glob_tblspec:
ksname = read_string(f)
cfname = read_string(f)
colspecs = []
for x in xrange(colcount):
if glob_tblspec:
colksname = ksname
colcfname = cfname
else:
colksname = read_string(f)
colcfname = read_string(f)
colname = read_string(f)
coltype = cls.read_type(f)
colspecs.append((colksname, colcfname, colname, coltype))
return colspecs
@classmethod
def recv_results_schema_change(cls, f):
change = read_string(f)
ks = read_string(f)
cf = read_string(f)
return (change, ks, cf)
@classmethod
def read_type(cls, f):
optid = read_short(f)
try:
cqltype = lookup_cqltype(cls.type_codes[optid])
except KeyError:
raise cql.NotSupportedError("Unknown data type code 0x%x. Have to skip"
" entire result set." % optid)
if cqltype.typename in ('list', 'set'):
subtype = cls.read_type(f)
cqltype = cqltype.apply_parameters(subtype)
elif cqltype.typename == 'map':
keysubtype = cls.read_type(f)
valsubtype = cls.read_type(f)
cqltype = cqltype.apply_parameters(keysubtype, valsubtype)
return cqltype
@staticmethod
def recv_row(f, colcount):
return [read_value(f) for x in xrange(colcount)]
class PrepareMessage(_MessageType):
opcode = 0x09
name = 'PREPARE'
params = ('query',)
def send_body(self, f):
write_longstring(f, self.query)
class ExecuteMessage(_MessageType):
opcode = 0x0A
name = 'EXECUTE'
params = ('queryid', 'queryparams', 'consistencylevel',)
def send_body(self, f):
write_short(f, 16)
f.write(self.queryid.bytes)
write_short(f, len(self.queryparams))
for param in self.queryparams:
write_value(f, param)
write_consistencylevel(f, self.consistencylevel)
known_event_types = frozenset((
'TOPOLOGY_CHANGE',
'STATUS_CHANGE',
))
class RegisterMessage(_MessageType):
opcode = 0x0B
name = 'REGISTER'
params = ('eventlist',)
def send_body(self, f):
write_stringlist(f, self.eventlist)
class EventMessage(_MessageType):
opcode = 0x0C
name = 'EVENT'
params = ('eventtype', 'eventargs')
@classmethod
def recv_body(cls, f):
eventtype = read_string(f).upper()
if eventtype in known_event_types:
readmethod = getattr(cls, 'recv_' + eventtype.lower())
return cls(eventtype=eventtype, eventargs=readmethod(f))
raise cql.NotSupportedError('Unknown event type %r' % eventtype)
@classmethod
def recv_topology_change(cls, f):
# "NEW_NODE" or "REMOVED_NODE"
changetype = read_string(f)
address = read_inet(f)
return dict(changetype=changetype, address=address)
@classmethod
def recv_status_change(cls, f):
# "UP" or "DOWN"
changetype = read_string(f)
address = read_inet(f)
return dict(changetype=changetype, address=address)
def read_byte(f):
return int8_unpack(f.read(1))
def write_byte(f, b):
f.write(int8_pack(b))
def read_int(f):
return int32_unpack(f.read(4))
def write_int(f, i):
f.write(int32_pack(i))
def read_short(f):
return uint16_unpack(f.read(2))
def write_short(f, s):
f.write(uint16_pack(s))
def read_consistencylevel(f):
return ConsistencyLevel.name_from_value(read_short(f))
def write_consistencylevel(f, cl):
write_short(f, ConsistencyLevel.value_from_name(cl))
def read_string(f):
size = read_short(f)
contents = f.read(size)
return contents.decode('utf8')
def write_string(f, s):
if isinstance(s, unicode):
s = s.encode('utf8')
write_short(f, len(s))
f.write(s)
def read_longstring(f):
size = read_int(f)
contents = f.read(size)
return contents.decode('utf8')
def write_longstring(f, s):
if isinstance(s, unicode):
s = s.encode('utf8')
write_int(f, len(s))
f.write(s)
def read_stringlist(f):
numstrs = read_short(f)
return [read_string(f) for x in xrange(numstrs)]
def write_stringlist(f, stringlist):
write_short(f, len(stringlist))
for s in stringlist:
write_string(f, s)
def read_stringmap(f):
numpairs = read_short(f)
strmap = {}
for x in xrange(numpairs):
k = read_string(f)
strmap[k] = read_string(f)
return strmap
def write_stringmap(f, strmap):
write_short(f, len(strmap))
for k, v in strmap.items():
write_string(f, k)
write_string(f, v)
def read_stringmultimap(f):
numkeys = read_short(f)
strmmap = {}
for x in xrange(numkeys):
k = read_string(f)
strmmap[k] = read_stringlist(f)
return strmmap
def write_stringmultimap(f, strmmap):
write_short(f, len(strmmap))
for k, v in strmmap.items():
write_string(f, k)
write_stringlist(f, v)
def read_value(f):
size = read_int(f)
if size < 0:
return None
return f.read(size)
def write_value(f, v):
if v is None:
write_int(f, -1)
else:
write_int(f, len(v))
f.write(v)
def read_inet(f):
size = read_byte(f)
addrbytes = f.read(size)
port = read_int(f)
if size == 4:
addrfam = socket.AF_INET
elif size == 16:
addrfam = socket.AF_INET6
else:
raise cql.InternalError("bad inet address: %r" % (addrbytes,))
return (socket.inet_ntop(addrfam, addrbytes), port)
def write_inet(f, addrtuple):
addr, port = addrtuple
if ':' in addr:
addrfam = socket.AF_INET6
else:
addrfam = socket.AF_INET
addrbytes = socket.inet_pton(addrfam, addr)
write_byte(f, len(addrbytes))
f.write(addrbytes)
write_int(f, port)
def ctzb(n):
if n == 0: return 127
if n & 0x1:
return 0
else:
c = 1
if n & 0xFFFFFFFFFFFFFFFF == 0:
n >>= 64; c += 64
if n & 0xFFFFFFFF == 0:
n >>= 32; c += 32
if n & 0xFFFF == 0:
n >>= 16; c += 16
if n & 0xFF == 0:
n >>= 8; c += 8
if n & 0xF == 0:
n >>= 4; c += 4
if n & 0x3 == 0:
n >>= 2; c += 2
c -= n & 0x1
return c
class NativeCursor(Cursor):
def prepare_query(self, query):
pquery, paramnames = prepare_query(query)
prepared = self._connection.wait_for_request(PrepareMessage(query=pquery))
if isinstance(prepared, ErrorMessage):
raise cql.Error('Query preparation failed: %s' % prepared.summarymsg())
if prepared.kind != ResultMessage.KIND_PREPARED:
raise cql.InternalError('Query preparation did not result in prepared query')
queryid, colspecs = prepared.results
kss, cfs, names, ctypes = zip(*colspecs)
return PreparedQuery(query, queryid, ctypes, paramnames)
def get_response(self, query, consistency_level):
qm = QueryMessage(query=query, consistencylevel=consistency_level)
return self._connection.wait_for_request(qm)
def get_response_prepared(self, prepared_query, params, consistency_level):
qparams = [params[pname] for pname in prepared_query.paramnames]
em = ExecuteMessage(queryid=prepared_query.itemid, queryparams=qparams,
consistencylevel=consistency_level)
return self._connection.wait_for_request(em)
def get_column_metadata(self, column_id):
return self.decoder.decode_metadata_and_type_native(column_id)
def columninfo(self, row):
return xrange(len(row))
def columnvalues(self, row):
return row
def handle_cql_execution_errors(self, response):
if not isinstance(response, ErrorMessage):
return
if isinstance(response, UnauthorizedErrorMessage):
eclass = cql.NotAuthenticated
elif isinstance(response, RequestExecutionException):
eclass = cql.OperationalError
elif isinstance(response, RequestValidationException):
eclass = cql.ProgrammingError
else:
eclass = cql.InternalError
raise eclass(response.summarymsg())
def process_execution_results(self, response, decoder=None):
self.handle_cql_execution_errors(response)
if not isinstance(response, ResultMessage):
raise cql.InternalError('Query execution resulted in %s!?' % (response,))
if response.kind == ResultMessage.KIND_PREPARED:
raise cql.InternalError('Query execution resulted in prepared query!?')
self.rs_idx = 0
self.description = None
self.result = []
self.name_info = ()
if response.kind == ResultMessage.KIND_VOID:
self.description = _VOID_DESCRIPTION
elif response.kind == ResultMessage.KIND_SET_KEYSPACE:
self._connection.keyspace_changed(response.results)
self.description = _VOID_DESCRIPTION
elif response.kind == ResultMessage.KIND_ROWS:
schema = response.results.column_metadata
self.decoder = (decoder or self.default_decoder)(schema)
self.result = response.results.rows
if self.result:
self.get_metadata_info(self.result[0])
elif response.kind == ResultMessage.KIND_SCHEMA_CHANGE:
self.description = _VOID_DESCRIPTION
else:
raise Exception('unknown response kind %s: %s' % (response.kind, response))
self.rowcount = len(self.result)
def get_compression(self):
return self._connection.compression
def set_compression(self, val):
if val != self.get_compression():
raise NotImplementedError("Setting per-cursor compression is not "
"supported in NativeCursor.")
compression = property(get_compression, set_compression)
class debugsock:
def __init__(self, sock):
self.sock = sock
def write(self, data):
print '[sending %r]' % (data,)
self.sock.send(data)
def read(self, readlen):
data = ''
while readlen > 0:
add = self.sock.recv(readlen)
print '[received %r]' % (add,)
if add == '':
raise cql.InternalError("short read of %s bytes (%s expected)"
% (len(data), len(data) + readlen))
data += add
readlen -= len(add)
return data
def close(self):
pass
locally_supported_compressions = {}
try:
import snappy
except ImportError:
pass
else:
# work around apparently buggy snappy decompress
def decompress(byts):
if byts == '\x00':
return ''
return snappy.decompress(byts)
locally_supported_compressions['snappy'] = (snappy.compress, decompress)
class NativeConnection(Connection):
cursorclass = NativeCursor
def __init__(self, *args, **kwargs):
self.reqid_slots = (1 << 128) - 1
self.responses = {}
self.waiting = {}
self.conn_ready = False
self.compressor = self.decompressor = None
self.event_watchers = {}
Connection.__init__(self, *args, **kwargs)
def make_reqid(self):
if self.reqid_slots == 0:
raise cql.ProgrammingError("Unable to acquire a stream id")
slot = ctzb(self.reqid_slots)
self.reqid_slots &= ~(1 << slot)
return slot
def release_reqid(self, reqid):
self.reqid_slots |= (1 << reqid)
def establish_connection(self):
self.conn_ready = False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
self.socketf = s.makefile(bufsize=0)
self.sockfd = s
self.open_socket = True
supported = self.wait_for_request(OptionsMessage())
self.supported_cql_versions = supported.cqlversions
self.remote_supported_compressions = supported.options['COMPRESSION']
if self.cql_version:
if self.cql_version not in self.supported_cql_versions:
raise ProgrammingError("cql_version %r is not supported by"
" remote (w/ native protocol). Supported"
" versions: %r"
% (self.cql_version, self.supported_cql_versions))
else:
self.cql_version = self.supported_cql_versions[0]
opts = {}
compresstype = None
if self.compression:
overlap = set(locally_supported_compressions) \
& set(self.remote_supported_compressions)
if len(overlap) == 0:
warn("No available compression types supported on both ends."
" locally supported: %r. remotely supported: %r"
% (locally_supported_compressions,
self.remote_supported_compressions))
else:
compresstype = iter(overlap).next() # choose any
opts['COMPRESSION'] = compresstype
compr, decompr = locally_supported_compressions[compresstype]
# set the decompressor here, but set the compressor only after
# a successful Ready message
self.decompressor = decompr
sm = StartupMessage(cqlversion=self.cql_version, options=opts)
startup_response = self.wait_for_request(sm)
while True:
if isinstance(startup_response, ReadyMessage):
self.conn_ready = True
if compresstype:
self.compressor = compr
break
if isinstance(startup_response, AuthenticateMessage):
self.authenticator = startup_response.authenticator
if self.credentials is None:
raise ProgrammingError('Remote end requires authentication.')
cm = CredentialsMessage(creds=self.credentials)
startup_response = self.wait_for_request(cm)
elif isinstance(startup_response, ErrorMessage):
raise ProgrammingError("Server did not accept credentials. %s"
% startup_response.summarymsg())
else:
raise cql.InternalError("Unexpected response %r during connection setup"
% startup_response)
def set_initial_keyspace(self, keyspace):
c = self.cursor()
c.execute('USE %s' % cql_quote_name(self.keyspace))
c.close()
def terminate_connection(self):
self.socketf.close()
self.sockfd.close()
self.sockfd = None
self.socketf = None
def wait_for_request(self, msg):
"""
Given a message, send it to the server, wait for a response, and
return the response.
"""
return self.wait_for_requests(msg)[0]
def send_msg(self, msg):
reqid = self.make_reqid()
try:
msg.send(self.socketf, reqid, compression=self.compressor)
except socket.error:
self.open_socket = False
raise
return reqid
def wait_for_requests(self, *msgs):
"""
Given any number of message objects, send them all to the server
and wait for responses to each one. Once they arrive, return all
of the responses in the same order as the messages to which they
respond.
"""
reqids = []
for msg in msgs:
reqid = self.send_msg(msg)
reqids.append(reqid)
resultdict = self.wait_for_results(*reqids)
return [resultdict[reqid] for reqid in reqids]
def wait_for_results(self, *reqids):
"""
Given any number of stream-ids, wait until responses have arrived for
each one, and return a dictionary mapping the stream-ids to the
appropriate results.
For internal use, None may be passed in place of a reqid, which will
be considered satisfied when a message of any kind is received (and, if
appropriate, handled).
"""
waiting_for = set(reqids)
results = {}
for r in reqids:
try:
result = self.responses.pop(r)
except KeyError:
pass
else:
results[r] = result
waiting_for.remove(r)
while waiting_for:
newmsg = read_frame(self.socketf, decompressor=self.decompressor)
if newmsg.stream_id in waiting_for:
results[newmsg.stream_id] = newmsg
waiting_for.remove(newmsg.stream_id)
else:
self.handle_incoming(newmsg)
if None in waiting_for:
results[None] = newmsg
waiting_for.remove(None)
if newmsg.stream_id >= 0:
self.release_reqid(newmsg.stream_id)
return results
def wait_for_result(self, reqid):
"""
Given a stream-id, wait until a response arrives with that stream-id,
and return the msg.
"""
return self.wait_for_results(reqid)[reqid]
def handle_incoming(self, msg):
if msg.stream_id < 0:
self.handle_pushed(msg)
return
try:
cb = self.waiting.pop(msg.stream_id)
except KeyError:
self.responses[msg.stream_id] = msg
else:
cb(msg)
def callback_when(self, reqid, cb):
"""
Callback cb with a message object once a message with a stream-id
of reqid is received. The callback may be immediate, if a response
is already in the received queue.
Otherwise, note also that the callback may not be called immediately
upon the arrival of the response packet; it may have to wait until
something else waits on a result.
"""
try:
msg = self.responses.pop(reqid)
except KeyError:
pass
else:
return cb(msg)
self.waiting[reqid] = cb
def request_and_callback(self, msg, cb):
"""
Given a message msg and a callable cb, send the message to the server
and call cb with the result once it arrives. Note that the callback
may not be called immediately upon the arrival of the response packet;
it may have to wait until something else waits on a result.
"""
reqid = self.send_msg(msg)
self.callback_when(reqid, cb)
def handle_pushed(self, msg):
"""
Process an incoming message originated by the server.
"""
watchers = self.event_watchers.get(msg.eventtype, ())
for cb in watchers:
cb(msg.eventargs)
def register_watcher(self, eventtype, cb):
"""
Request that any events of the given type be passed to the given
callback when they arrive. Note that the callback may not be called
immediately upon the arrival of the event packet; it may have to wait
until something else waits on a result, or until wait_for_even() is
called.
If the event type has not been registered for already, this may
block while a new REGISTER message is sent to the server.
The available event types are in the cql.native.known_event_types
list.
When an event arrives, a dictionary will be passed to the callback
with the info about the event. Some example result dictionaries:
(For STATUS_CHANGE events:)
{'changetype': u'DOWN', 'address': ('12.114.19.76', 8000)}
(For TOPOLOGY_CHANGE events:)
{'changetype': u'NEW_NODE', 'address': ('19.10.122.13', 8000)}
"""
if isinstance(eventtype, str):
eventtype = eventtype.decode('utf8')
try:
watchers = self.event_watchers[eventtype]
except KeyError:
ans = self.wait_for_request(RegisterMessage(eventlist=(eventtype,)))
if isinstance(ans, ErrorMessage):
raise cql.ProgrammingError("Server did not accept registration"
" for %s events: %s"
% (eventtype, ans.summarymsg()))
watchers = self.event_watchers.setdefault(eventtype, [])
watchers.append(cb)
def unregister_watcher(self, eventtype, cb):
"""
Given an eventtype and a callback previously registered with
register_watcher(), remove that callback from the list of watchers for
the given event type.
"""
if isinstance(eventtype, str):
eventtype = eventtype.decode('utf8')
self.event_watchers[eventtype].remove(cb)
def wait_for_event(self):
"""
Wait for any sort of event to arrive, and handle it via the
registered callbacks. It is recommended that some event watchers
be registered before calling this; otherwise, no events will be
sent by the server.
"""
eventsseen = []
def i_saw_an_event(ev):
eventsseen.append(ev)
wlists = self.event_watchers.values()
for wlist in wlists:
wlist.append(i_saw_an_event)
while not eventsseen:
self.wait_for_result(None)
for wlist in wlists:
wlist.remove(i_saw_an_event)
def is_open(self):
"""Standard test for whether a Connection is open. Used by ConnectionPool. Note not currently using timeouts so not entirely reliable"""
return self.open_socket and (self.sockfd is not None) and (self.socketf is not None)
|
|
import codecs
from datetime import datetime
from textwrap import dedent
import pytest
import pandas as pd
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
from pandas.io.formats.format import DataFrameFormatter
from pandas.io.formats.latex import (
RegularTableBuilder,
RowBodyIterator,
RowHeaderIterator,
RowStringConverter,
)
def _dedent(string):
"""Dedent without new line in the beginning.
Built-in textwrap.dedent would keep new line character in the beginning
of multi-line string starting from the new line.
This version drops the leading new line character.
"""
return dedent(string).lstrip()
@pytest.fixture
def df_short():
"""Short dataframe for testing table/tabular/longtable LaTeX env."""
return DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
class TestToLatex:
def test_to_latex_to_file(self, float_frame):
with tm.ensure_clean("test.tex") as path:
float_frame.to_latex(path)
with open(path) as f:
assert float_frame.to_latex() == f.read()
def test_to_latex_to_file_utf8_with_encoding(self):
# test with utf-8 and encoding option (GH 7061)
df = DataFrame([["au\xdfgangen"]])
with tm.ensure_clean("test.tex") as path:
df.to_latex(path, encoding="utf-8")
with codecs.open(path, "r", encoding="utf-8") as f:
assert df.to_latex() == f.read()
def test_to_latex_to_file_utf8_without_encoding(self):
# test with utf-8 without encoding option
df = DataFrame([["au\xdfgangen"]])
with tm.ensure_clean("test.tex") as path:
df.to_latex(path)
with codecs.open(path, "r", encoding="utf-8") as f:
assert df.to_latex() == f.read()
def test_to_latex_tabular_with_index(self):
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex()
expected = _dedent(
r"""
\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_tabular_without_index(self):
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(index=False)
expected = _dedent(
r"""
\begin{tabular}{rl}
\toprule
a & b \\
\midrule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
@pytest.mark.parametrize(
"bad_column_format",
[5, 1.2, ["l", "r"], ("r", "c"), {"r", "c", "l"}, {"a": "r", "b": "l"}],
)
def test_to_latex_bad_column_format(self, bad_column_format):
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
msg = r"column_format must be str or unicode"
with pytest.raises(ValueError, match=msg):
df.to_latex(column_format=bad_column_format)
def test_to_latex_column_format_just_works(self, float_frame):
# GH Bug #9402
float_frame.to_latex(column_format="lcr")
def test_to_latex_column_format(self):
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(column_format="lcr")
expected = _dedent(
r"""
\begin{tabular}{lcr}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_float_format_object_col(self):
# GH#40024
ser = Series([1000.0, "test"])
result = ser.to_latex(float_format="{:,.0f}".format)
expected = _dedent(
r"""
\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & 1,000 \\
1 & test \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_empty_tabular(self):
df = DataFrame()
result = df.to_latex()
expected = _dedent(
r"""
\begin{tabular}{l}
\toprule
Empty DataFrame
Columns: Index([], dtype='object')
Index: Index([], dtype='object') \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_series(self):
s = Series(["a", "b", "c"])
result = s.to_latex()
expected = _dedent(
r"""
\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & a \\
1 & b \\
2 & c \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_midrule_location(self):
# GH 18326
df = DataFrame({"a": [1, 2]})
df.index.name = "foo"
result = df.to_latex(index_names=False)
expected = _dedent(
r"""
\begin{tabular}{lr}
\toprule
{} & a \\
\midrule
0 & 1 \\
1 & 2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
class TestToLatexLongtable:
def test_to_latex_empty_longtable(self):
df = DataFrame()
result = df.to_latex(longtable=True)
expected = _dedent(
r"""
\begin{longtable}{l}
\toprule
Empty DataFrame
Columns: Index([], dtype='object')
Index: Index([], dtype='object') \\
\end{longtable}
"""
)
assert result == expected
def test_to_latex_longtable_with_index(self):
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(longtable=True)
expected = _dedent(
r"""
\begin{longtable}{lrl}
\toprule
{} & a & b \\
\midrule
\endfirsthead
\toprule
{} & a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
)
assert result == expected
def test_to_latex_longtable_without_index(self):
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(index=False, longtable=True)
expected = _dedent(
r"""
\begin{longtable}{rl}
\toprule
a & b \\
\midrule
\endfirsthead
\toprule
a & b \\
\midrule
\endhead
\midrule
\multicolumn{2}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
1 & b1 \\
2 & b2 \\
\end{longtable}
"""
)
assert result == expected
@pytest.mark.parametrize(
"df, expected_number",
[
(DataFrame({"a": [1, 2]}), 1),
(DataFrame({"a": [1, 2], "b": [3, 4]}), 2),
(DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}), 3),
],
)
def test_to_latex_longtable_continued_on_next_page(self, df, expected_number):
result = df.to_latex(index=False, longtable=True)
assert fr"\multicolumn{{{expected_number}}}" in result
class TestToLatexHeader:
def test_to_latex_no_header_with_index(self):
# GH 7124
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(header=False)
expected = _dedent(
r"""
\begin{tabular}{lrl}
\toprule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_no_header_without_index(self):
# GH 7124
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(index=False, header=False)
expected = _dedent(
r"""
\begin{tabular}{rl}
\toprule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_specified_header_with_index(self):
# GH 7124
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(header=["AA", "BB"])
expected = _dedent(
r"""
\begin{tabular}{lrl}
\toprule
{} & AA & BB \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_specified_header_without_index(self):
# GH 7124
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(header=["AA", "BB"], index=False)
expected = _dedent(
r"""
\begin{tabular}{rl}
\toprule
AA & BB \\
\midrule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
@pytest.mark.parametrize(
"header, num_aliases",
[
(["A"], 1),
(("B",), 1),
(("Col1", "Col2", "Col3"), 3),
(("Col1", "Col2", "Col3", "Col4"), 4),
],
)
def test_to_latex_number_of_items_in_header_missmatch_raises(
self,
header,
num_aliases,
):
# GH 7124
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
msg = f"Writing 2 cols but got {num_aliases} aliases"
with pytest.raises(ValueError, match=msg):
df.to_latex(header=header)
def test_to_latex_decimal(self):
# GH 12031
df = DataFrame({"a": [1.0, 2.1], "b": ["b1", "b2"]})
result = df.to_latex(decimal=",")
expected = _dedent(
r"""
\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1,0 & b1 \\
1 & 2,1 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
class TestToLatexBold:
def test_to_latex_bold_rows(self):
# GH 16707
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(bold_rows=True)
expected = _dedent(
r"""
\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
\textbf{0} & 1 & b1 \\
\textbf{1} & 2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_no_bold_rows(self):
# GH 16707
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(bold_rows=False)
expected = _dedent(
r"""
\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
class TestToLatexCaptionLabel:
@pytest.fixture
def caption_table(self):
"""Caption for table/tabular LaTeX environment."""
return "a table in a \\texttt{table/tabular} environment"
@pytest.fixture
def short_caption(self):
"""Short caption for testing \\caption[short_caption]{full_caption}."""
return "a table"
@pytest.fixture
def label_table(self):
"""Label for table/tabular LaTeX environment."""
return "tab:table_tabular"
@pytest.fixture
def caption_longtable(self):
"""Caption for longtable LaTeX environment."""
return "a table in a \\texttt{longtable} environment"
@pytest.fixture
def label_longtable(self):
"""Label for longtable LaTeX environment."""
return "tab:longtable"
def test_to_latex_caption_only(self, df_short, caption_table):
# GH 25436
result = df_short.to_latex(caption=caption_table)
expected = _dedent(
r"""
\begin{table}
\centering
\caption{a table in a \texttt{table/tabular} environment}
\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
"""
)
assert result == expected
def test_to_latex_label_only(self, df_short, label_table):
# GH 25436
result = df_short.to_latex(label=label_table)
expected = _dedent(
r"""
\begin{table}
\centering
\label{tab:table_tabular}
\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
"""
)
assert result == expected
def test_to_latex_caption_and_label(self, df_short, caption_table, label_table):
# GH 25436
result = df_short.to_latex(caption=caption_table, label=label_table)
expected = _dedent(
r"""
\begin{table}
\centering
\caption{a table in a \texttt{table/tabular} environment}
\label{tab:table_tabular}
\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
"""
)
assert result == expected
def test_to_latex_caption_and_shortcaption(
self,
df_short,
caption_table,
short_caption,
):
result = df_short.to_latex(caption=(caption_table, short_caption))
expected = _dedent(
r"""
\begin{table}
\centering
\caption[a table]{a table in a \texttt{table/tabular} environment}
\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
"""
)
assert result == expected
def test_to_latex_caption_and_shortcaption_list_is_ok(self, df_short):
caption = ("Long-long-caption", "Short")
result_tuple = df_short.to_latex(caption=caption)
result_list = df_short.to_latex(caption=list(caption))
assert result_tuple == result_list
def test_to_latex_caption_shortcaption_and_label(
self,
df_short,
caption_table,
short_caption,
label_table,
):
# test when the short_caption is provided alongside caption and label
result = df_short.to_latex(
caption=(caption_table, short_caption),
label=label_table,
)
expected = _dedent(
r"""
\begin{table}
\centering
\caption[a table]{a table in a \texttt{table/tabular} environment}
\label{tab:table_tabular}
\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
"""
)
assert result == expected
@pytest.mark.parametrize(
"bad_caption",
[
("full_caption", "short_caption", "extra_string"),
("full_caption", "short_caption", 1),
("full_caption", "short_caption", None),
("full_caption",),
(None,),
],
)
def test_to_latex_bad_caption_raises(self, bad_caption):
# test that wrong number of params is raised
df = DataFrame({"a": [1]})
msg = "caption must be either a string or a tuple of two strings"
with pytest.raises(ValueError, match=msg):
df.to_latex(caption=bad_caption)
def test_to_latex_two_chars_caption(self, df_short):
# test that two chars caption is handled correctly
# it must not be unpacked into long_caption, short_caption.
result = df_short.to_latex(caption="xy")
expected = _dedent(
r"""
\begin{table}
\centering
\caption{xy}
\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
"""
)
assert result == expected
def test_to_latex_longtable_caption_only(self, df_short, caption_longtable):
# GH 25436
# test when no caption and no label is provided
# is performed by test_to_latex_longtable()
result = df_short.to_latex(longtable=True, caption=caption_longtable)
expected = _dedent(
r"""
\begin{longtable}{lrl}
\caption{a table in a \texttt{longtable} environment}\\
\toprule
{} & a & b \\
\midrule
\endfirsthead
\caption[]{a table in a \texttt{longtable} environment} \\
\toprule
{} & a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
)
assert result == expected
def test_to_latex_longtable_label_only(self, df_short, label_longtable):
# GH 25436
result = df_short.to_latex(longtable=True, label=label_longtable)
expected = _dedent(
r"""
\begin{longtable}{lrl}
\label{tab:longtable}\\
\toprule
{} & a & b \\
\midrule
\endfirsthead
\toprule
{} & a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
)
assert result == expected
def test_to_latex_longtable_caption_and_label(
self,
df_short,
caption_longtable,
label_longtable,
):
# GH 25436
result = df_short.to_latex(
longtable=True,
caption=caption_longtable,
label=label_longtable,
)
expected = _dedent(
r"""
\begin{longtable}{lrl}
\caption{a table in a \texttt{longtable} environment}
\label{tab:longtable}\\
\toprule
{} & a & b \\
\midrule
\endfirsthead
\caption[]{a table in a \texttt{longtable} environment} \\
\toprule
{} & a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
)
assert result == expected
def test_to_latex_longtable_caption_shortcaption_and_label(
self,
df_short,
caption_longtable,
short_caption,
label_longtable,
):
# test when the caption, the short_caption and the label are provided
result = df_short.to_latex(
longtable=True,
caption=(caption_longtable, short_caption),
label=label_longtable,
)
expected = _dedent(
r"""
\begin{longtable}{lrl}
\caption[a table]{a table in a \texttt{longtable} environment}
\label{tab:longtable}\\
\toprule
{} & a & b \\
\midrule
\endfirsthead
\caption[]{a table in a \texttt{longtable} environment} \\
\toprule
{} & a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
)
assert result == expected
class TestToLatexEscape:
@pytest.fixture
def df_with_symbols(self):
"""Dataframe with special characters for testing chars escaping."""
a = "a"
b = "b"
yield DataFrame({"co$e^x$": {a: "a", b: "b"}, "co^l1": {a: "a", b: "b"}})
def test_to_latex_escape_false(self, df_with_symbols):
result = df_with_symbols.to_latex(escape=False)
expected = _dedent(
r"""
\begin{tabular}{lll}
\toprule
{} & co$e^x$ & co^l1 \\
\midrule
a & a & a \\
b & b & b \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_escape_default(self, df_with_symbols):
result = df_with_symbols.to_latex() # default: escape=True
expected = _dedent(
r"""
\begin{tabular}{lll}
\toprule
{} & co\$e\textasciicircum x\$ & co\textasciicircum l1 \\
\midrule
a & a & a \\
b & b & b \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_special_escape(self):
df = DataFrame([r"a\b\c", r"^a^b^c", r"~a~b~c"])
result = df.to_latex()
expected = _dedent(
r"""
\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & a\textbackslash b\textbackslash c \\
1 & \textasciicircum a\textasciicircum b\textasciicircum c \\
2 & \textasciitilde a\textasciitilde b\textasciitilde c \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_escape_special_chars(self):
special_characters = ["&", "%", "$", "#", "_", "{", "}", "~", "^", "\\"]
df = DataFrame(data=special_characters)
result = df.to_latex()
expected = _dedent(
r"""
\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & \& \\
1 & \% \\
2 & \$ \\
3 & \# \\
4 & \_ \\
5 & \{ \\
6 & \} \\
7 & \textasciitilde \\
8 & \textasciicircum \\
9 & \textbackslash \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_specified_header_special_chars_without_escape(self):
# GH 7124
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(header=["$A$", "$B$"], escape=False)
expected = _dedent(
r"""
\begin{tabular}{lrl}
\toprule
{} & $A$ & $B$ \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
class TestToLatexPosition:
def test_to_latex_position(self):
the_position = "h"
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(position=the_position)
expected = _dedent(
r"""
\begin{table}[h]
\centering
\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
"""
)
assert result == expected
def test_to_latex_longtable_position(self):
the_position = "t"
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(longtable=True, position=the_position)
expected = _dedent(
r"""
\begin{longtable}[t]{lrl}
\toprule
{} & a & b \\
\midrule
\endfirsthead
\toprule
{} & a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
)
assert result == expected
class TestToLatexFormatters:
def test_to_latex_with_formatters(self):
df = DataFrame(
{
"datetime64": [
datetime(2016, 1, 1),
datetime(2016, 2, 5),
datetime(2016, 3, 3),
],
"float": [1.0, 2.0, 3.0],
"int": [1, 2, 3],
"object": [(1, 2), True, False],
}
)
formatters = {
"datetime64": lambda x: x.strftime("%Y-%m"),
"float": lambda x: f"[{x: 4.1f}]",
"int": lambda x: f"0x{x:x}",
"object": lambda x: f"-{x!s}-",
"__index__": lambda x: f"index: {x}",
}
result = df.to_latex(formatters=dict(formatters))
expected = _dedent(
r"""
\begin{tabular}{llrrl}
\toprule
{} & datetime64 & float & int & object \\
\midrule
index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\
index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\
index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_float_format_no_fixed_width_3decimals(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
result = df.to_latex(float_format="%.3f")
expected = _dedent(
r"""
\begin{tabular}{lr}
\toprule
{} & x \\
\midrule
0 & 0.200 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_float_format_no_fixed_width_integer(self):
# GH 22270
df = DataFrame({"x": [100.0]})
result = df.to_latex(float_format="%.0f")
expected = _dedent(
r"""
\begin{tabular}{lr}
\toprule
{} & x \\
\midrule
0 & 100 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
@pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
def test_to_latex_na_rep_and_float_format(self, na_rep):
df = DataFrame(
[
["A", 1.2225],
["A", None],
],
columns=["Group", "Data"],
)
result = df.to_latex(na_rep=na_rep, float_format="{:.2f}".format)
expected = _dedent(
fr"""
\begin{{tabular}}{{llr}}
\toprule
{{}} & Group & Data \\
\midrule
0 & A & 1.22 \\
1 & A & {na_rep} \\
\bottomrule
\end{{tabular}}
"""
)
assert result == expected
class TestToLatexMultiindex:
@pytest.fixture
def multiindex_frame(self):
"""Multiindex dataframe for testing multirow LaTeX macros."""
yield DataFrame.from_dict(
{
("c1", 0): Series({x: x for x in range(4)}),
("c1", 1): Series({x: x + 4 for x in range(4)}),
("c2", 0): Series({x: x for x in range(4)}),
("c2", 1): Series({x: x + 4 for x in range(4)}),
("c3", 0): Series({x: x for x in range(4)}),
}
).T
@pytest.fixture
def multicolumn_frame(self):
"""Multicolumn dataframe for testing multicolumn LaTeX macros."""
yield DataFrame(
{
("c1", 0): {x: x for x in range(5)},
("c1", 1): {x: x + 5 for x in range(5)},
("c2", 0): {x: x for x in range(5)},
("c2", 1): {x: x + 5 for x in range(5)},
("c3", 0): {x: x for x in range(5)},
}
)
def test_to_latex_multindex_header(self):
# GH 16718
df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]})
df = df.set_index(["a", "b"])
observed = df.to_latex(header=["r1", "r2"])
expected = _dedent(
r"""
\begin{tabular}{llrr}
\toprule
& & r1 & r2 \\
a & b & & \\
\midrule
0 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
"""
)
assert observed == expected
def test_to_latex_multiindex_empty_name(self):
# GH 18669
mi = pd.MultiIndex.from_product([[1, 2]], names=[""])
df = DataFrame(-1, index=mi, columns=range(4))
observed = df.to_latex()
expected = _dedent(
r"""
\begin{tabular}{lrrrr}
\toprule
& 0 & 1 & 2 & 3 \\
{} & & & & \\
\midrule
1 & -1 & -1 & -1 & -1 \\
2 & -1 & -1 & -1 & -1 \\
\bottomrule
\end{tabular}
"""
)
assert observed == expected
def test_to_latex_multiindex_column_tabular(self):
df = DataFrame({("x", "y"): ["a"]})
result = df.to_latex()
expected = _dedent(
r"""
\begin{tabular}{ll}
\toprule
{} & x \\
{} & y \\
\midrule
0 & a \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_multiindex_small_tabular(self):
df = DataFrame({("x", "y"): ["a"]}).T
result = df.to_latex()
expected = _dedent(
r"""
\begin{tabular}{lll}
\toprule
& & 0 \\
\midrule
x & y & a \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_multiindex_tabular(self, multiindex_frame):
result = multiindex_frame.to_latex()
expected = _dedent(
r"""
\begin{tabular}{llrrrr}
\toprule
& & 0 & 1 & 2 & 3 \\
\midrule
c1 & 0 & 0 & 1 & 2 & 3 \\
& 1 & 4 & 5 & 6 & 7 \\
c2 & 0 & 0 & 1 & 2 & 3 \\
& 1 & 4 & 5 & 6 & 7 \\
c3 & 0 & 0 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_multicolumn_tabular(self, multiindex_frame):
# GH 14184
df = multiindex_frame.T
df.columns.names = ["a", "b"]
result = df.to_latex()
expected = _dedent(
r"""
\begin{tabular}{lrrrrr}
\toprule
a & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
b & 0 & 1 & 0 & 1 & 0 \\
\midrule
0 & 0 & 4 & 0 & 4 & 0 \\
1 & 1 & 5 & 1 & 5 & 1 \\
2 & 2 & 6 & 2 & 6 & 2 \\
3 & 3 & 7 & 3 & 7 & 3 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_index_has_name_tabular(self):
# GH 10660
df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]})
result = df.set_index(["a", "b"]).to_latex()
expected = _dedent(
r"""
\begin{tabular}{llr}
\toprule
& & c \\
a & b & \\
\midrule
0 & a & 1 \\
& b & 2 \\
1 & a & 3 \\
& b & 4 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_groupby_tabular(self):
# GH 10660
df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]})
result = df.groupby("a").describe().to_latex()
expected = _dedent(
r"""
\begin{tabular}{lrrrrrrrr}
\toprule
{} & \multicolumn{8}{l}{c} \\
{} & count & mean & std & min & 25\% & 50\% & 75\% & max \\
a & & & & & & & & \\
\midrule
0 & 2.0 & 1.5 & 0.707107 & 1.0 & 1.25 & 1.5 & 1.75 & 2.0 \\
1 & 2.0 & 3.5 & 0.707107 & 3.0 & 3.25 & 3.5 & 3.75 & 4.0 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_multiindex_dupe_level(self):
# see gh-14484
#
# If an index is repeated in subsequent rows, it should be
# replaced with a blank in the created table. This should
# ONLY happen if all higher order indices (to the left) are
# equal too. In this test, 'c' has to be printed both times
# because the higher order index 'A' != 'B'.
df = DataFrame(
index=pd.MultiIndex.from_tuples([("A", "c"), ("B", "c")]), columns=["col"]
)
result = df.to_latex()
expected = _dedent(
r"""
\begin{tabular}{lll}
\toprule
& & col \\
\midrule
A & c & NaN \\
B & c & NaN \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_multicolumn_default(self, multicolumn_frame):
result = multicolumn_frame.to_latex()
expected = _dedent(
r"""
\begin{tabular}{lrrrrr}
\toprule
{} & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
{} & 0 & 1 & 0 & 1 & 0 \\
\midrule
0 & 0 & 5 & 0 & 5 & 0 \\
1 & 1 & 6 & 1 & 6 & 1 \\
2 & 2 & 7 & 2 & 7 & 2 \\
3 & 3 & 8 & 3 & 8 & 3 \\
4 & 4 & 9 & 4 & 9 & 4 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_multicolumn_false(self, multicolumn_frame):
result = multicolumn_frame.to_latex(multicolumn=False)
expected = _dedent(
r"""
\begin{tabular}{lrrrrr}
\toprule
{} & c1 & & c2 & & c3 \\
{} & 0 & 1 & 0 & 1 & 0 \\
\midrule
0 & 0 & 5 & 0 & 5 & 0 \\
1 & 1 & 6 & 1 & 6 & 1 \\
2 & 2 & 7 & 2 & 7 & 2 \\
3 & 3 & 8 & 3 & 8 & 3 \\
4 & 4 & 9 & 4 & 9 & 4 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_multirow_true(self, multicolumn_frame):
result = multicolumn_frame.T.to_latex(multirow=True)
expected = _dedent(
r"""
\begin{tabular}{llrrrrr}
\toprule
& & 0 & 1 & 2 & 3 & 4 \\
\midrule
\multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
& 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
\multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
& 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_multicolumnrow_with_multicol_format(self, multicolumn_frame):
multicolumn_frame.index = multicolumn_frame.T.index
result = multicolumn_frame.T.to_latex(
multirow=True,
multicolumn=True,
multicolumn_format="c",
)
expected = _dedent(
r"""
\begin{tabular}{llrrrrr}
\toprule
& & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\
& & 0 & 1 & 0 & 1 & 0 \\
\midrule
\multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
& 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
\multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
& 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
@pytest.mark.parametrize("name0", [None, "named0"])
@pytest.mark.parametrize("name1", [None, "named1"])
@pytest.mark.parametrize("axes", [[0], [1], [0, 1]])
def test_to_latex_multiindex_names(self, name0, name1, axes):
# GH 18667
names = [name0, name1]
mi = pd.MultiIndex.from_product([[1, 2], [3, 4]])
df = DataFrame(-1, index=mi.copy(), columns=mi.copy())
for idx in axes:
df.axes[idx].names = names
idx_names = tuple(n or "{}" for n in names)
idx_names_row = (
f"{idx_names[0]} & {idx_names[1]} & & & & \\\\\n"
if (0 in axes and any(names))
else ""
)
placeholder = "{}" if any(names) and 1 in axes else " "
col_names = [n if (bool(n) and 1 in axes) else placeholder for n in names]
observed = df.to_latex()
expected = r"""\begin{tabular}{llrrrr}
\toprule
& %s & \multicolumn{2}{l}{1} & \multicolumn{2}{l}{2} \\
& %s & 3 & 4 & 3 & 4 \\
%s\midrule
1 & 3 & -1 & -1 & -1 & -1 \\
& 4 & -1 & -1 & -1 & -1 \\
2 & 3 & -1 & -1 & -1 & -1 \\
& 4 & -1 & -1 & -1 & -1 \\
\bottomrule
\end{tabular}
""" % tuple(
list(col_names) + [idx_names_row]
)
assert observed == expected
@pytest.mark.parametrize("one_row", [True, False])
def test_to_latex_multiindex_nans(self, one_row):
# GH 14249
df = DataFrame({"a": [None, 1], "b": [2, 3], "c": [4, 5]})
if one_row:
df = df.iloc[[0]]
observed = df.set_index(["a", "b"]).to_latex()
expected = _dedent(
r"""
\begin{tabular}{llr}
\toprule
& & c \\
a & b & \\
\midrule
NaN & 2 & 4 \\
"""
)
if not one_row:
expected += r"""1.0 & 3 & 5 \\
"""
expected += r"""\bottomrule
\end{tabular}
"""
assert observed == expected
def test_to_latex_non_string_index(self):
# GH 19981
df = DataFrame([[1, 2, 3]] * 2).set_index([0, 1])
result = df.to_latex()
expected = _dedent(
r"""
\begin{tabular}{llr}
\toprule
& & 2 \\
0 & 1 & \\
\midrule
1 & 2 & 3 \\
& 2 & 3 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_multiindex_multirow(self):
# GH 16719
mi = pd.MultiIndex.from_product(
[[0.0, 1.0], [3.0, 2.0, 1.0], ["0", "1"]], names=["i", "val0", "val1"]
)
df = DataFrame(index=mi)
result = df.to_latex(multirow=True, escape=False)
expected = _dedent(
r"""
\begin{tabular}{lll}
\toprule
& & \\
i & val0 & val1 \\
\midrule
\multirow{6}{*}{0.0} & \multirow{2}{*}{3.0} & 0 \\
& & 1 \\
\cline{2-3}
& \multirow{2}{*}{2.0} & 0 \\
& & 1 \\
\cline{2-3}
& \multirow{2}{*}{1.0} & 0 \\
& & 1 \\
\cline{1-3}
\cline{2-3}
\multirow{6}{*}{1.0} & \multirow{2}{*}{3.0} & 0 \\
& & 1 \\
\cline{2-3}
& \multirow{2}{*}{2.0} & 0 \\
& & 1 \\
\cline{2-3}
& \multirow{2}{*}{1.0} & 0 \\
& & 1 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
class TestTableBuilder:
@pytest.fixture
def dataframe(self):
return DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
@pytest.fixture
def table_builder(self, dataframe):
return RegularTableBuilder(formatter=DataFrameFormatter(dataframe))
def test_create_row_iterator(self, table_builder):
iterator = table_builder._create_row_iterator(over="header")
assert isinstance(iterator, RowHeaderIterator)
def test_create_body_iterator(self, table_builder):
iterator = table_builder._create_row_iterator(over="body")
assert isinstance(iterator, RowBodyIterator)
def test_create_body_wrong_kwarg_raises(self, table_builder):
with pytest.raises(ValueError, match="must be either 'header' or 'body'"):
table_builder._create_row_iterator(over="SOMETHING BAD")
class TestRowStringConverter:
@pytest.mark.parametrize(
"row_num, expected",
[
(0, r"{} & Design & ratio & xy \\"),
(1, r"0 & 1 & 4 & 10 \\"),
(2, r"1 & 2 & 5 & 11 \\"),
],
)
def test_get_strrow_normal_without_escape(self, row_num, expected):
df = DataFrame({r"Design": [1, 2, 3], r"ratio": [4, 5, 6], r"xy": [10, 11, 12]})
row_string_converter = RowStringConverter(
formatter=DataFrameFormatter(df, escape=True),
)
assert row_string_converter.get_strrow(row_num=row_num) == expected
@pytest.mark.parametrize(
"row_num, expected",
[
(0, r"{} & Design \# & ratio, \% & x\&y \\"),
(1, r"0 & 1 & 4 & 10 \\"),
(2, r"1 & 2 & 5 & 11 \\"),
],
)
def test_get_strrow_normal_with_escape(self, row_num, expected):
df = DataFrame(
{r"Design #": [1, 2, 3], r"ratio, %": [4, 5, 6], r"x&y": [10, 11, 12]}
)
row_string_converter = RowStringConverter(
formatter=DataFrameFormatter(df, escape=True),
)
assert row_string_converter.get_strrow(row_num=row_num) == expected
@pytest.mark.parametrize(
"row_num, expected",
[
(0, r"{} & \multicolumn{2}{r}{c1} & \multicolumn{2}{r}{c2} & c3 \\"),
(1, r"{} & 0 & 1 & 0 & 1 & 0 \\"),
(2, r"0 & 0 & 5 & 0 & 5 & 0 \\"),
],
)
def test_get_strrow_multindex_multicolumn(self, row_num, expected):
df = DataFrame(
{
("c1", 0): {x: x for x in range(5)},
("c1", 1): {x: x + 5 for x in range(5)},
("c2", 0): {x: x for x in range(5)},
("c2", 1): {x: x + 5 for x in range(5)},
("c3", 0): {x: x for x in range(5)},
}
)
row_string_converter = RowStringConverter(
formatter=DataFrameFormatter(df),
multicolumn=True,
multicolumn_format="r",
multirow=True,
)
assert row_string_converter.get_strrow(row_num=row_num) == expected
|
|
""" Distribution specific override class for CentOS family (RHEL, Fedora) """
import logging
from typing import Any
from typing import cast
from typing import List
from certbot_apache._internal import apache_util
from certbot_apache._internal import configurator
from certbot_apache._internal import parser
from certbot_apache._internal.configurator import OsOptions
from certbot import errors
from certbot import util
from certbot.errors import MisconfigurationError
logger = logging.getLogger(__name__)
class CentOSConfigurator(configurator.ApacheConfigurator):
"""CentOS specific ApacheConfigurator override class"""
OS_DEFAULTS = OsOptions(
server_root="/etc/httpd",
vhost_root="/etc/httpd/conf.d",
vhost_files="*.conf",
logs_root="/var/log/httpd",
ctl="apachectl",
version_cmd=['apachectl', '-v'],
restart_cmd=['apachectl', 'graceful'],
restart_cmd_alt=['apachectl', 'restart'],
conftest_cmd=['apachectl', 'configtest'],
challenge_location="/etc/httpd/conf.d",
)
def config_test(self) -> None:
"""
Override config_test to mitigate configtest error in vanilla installation
of mod_ssl in Fedora. The error is caused by non-existent self-signed
certificates referenced by the configuration, that would be autogenerated
during the first (re)start of httpd.
"""
os_info = util.get_os_info()
fedora = os_info[0].lower() == "fedora"
try:
super().config_test()
except errors.MisconfigurationError:
if fedora:
self._try_restart_fedora()
else:
raise
def _try_restart_fedora(self) -> None:
"""
Tries to restart httpd using systemctl to generate the self signed key pair.
"""
try:
util.run_script(['systemctl', 'restart', 'httpd'])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
# Finish with actual config check to see if systemctl restart helped
super().config_test()
def _prepare_options(self) -> None:
"""
Override the options dictionary initialization in order to support
alternative restart cmd used in CentOS.
"""
super()._prepare_options()
if not self.options.restart_cmd_alt: # pragma: no cover
raise ValueError("OS option restart_cmd_alt must be set for CentOS.")
self.options.restart_cmd_alt[0] = self.options.ctl
def get_parser(self) -> "CentOSParser":
"""Initializes the ApacheParser"""
return CentOSParser(
self.options.server_root, self, self.options.vhost_root, self.version)
def _deploy_cert(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=arguments-differ
"""
Override _deploy_cert in order to ensure that the Apache configuration
has "LoadModule ssl_module..." before parsing the VirtualHost configuration
that was created by Certbot
"""
super()._deploy_cert(*args, **kwargs)
if self.version < (2, 4, 0):
self._deploy_loadmodule_ssl_if_needed()
def _deploy_loadmodule_ssl_if_needed(self) -> None:
"""
Add "LoadModule ssl_module <pre-existing path>" to main httpd.conf if
it doesn't exist there already.
"""
loadmods = self.parser.find_dir("LoadModule", "ssl_module", exclude=False)
correct_ifmods: List[str] = []
loadmod_args: List[str] = []
loadmod_paths: List[str] = []
for m in loadmods:
noarg_path = m.rpartition("/")[0]
path_args = self.parser.get_all_args(noarg_path)
if loadmod_args:
if loadmod_args != path_args:
msg = ("Certbot encountered multiple LoadModule directives "
"for LoadModule ssl_module with differing library paths. "
"Please remove or comment out the one(s) that are not in "
"use, and run Certbot again.")
raise MisconfigurationError(msg)
else:
loadmod_args = [arg for arg in path_args if arg]
centos_parser: CentOSParser = cast(CentOSParser, self.parser)
if centos_parser.not_modssl_ifmodule(noarg_path):
if centos_parser.loc["default"] in noarg_path:
# LoadModule already in the main configuration file
if "ifmodule/" in noarg_path.lower() or "ifmodule[1]" in noarg_path.lower():
# It's the first or only IfModule in the file
return
# Populate the list of known !mod_ssl.c IfModules
nodir_path = noarg_path.rpartition("/directive")[0]
correct_ifmods.append(nodir_path)
else:
loadmod_paths.append(noarg_path)
if not loadmod_args:
# Do not try to enable mod_ssl
return
# Force creation as the directive wasn't found from the beginning of
# httpd.conf
rootconf_ifmod = self.parser.create_ifmod(
parser.get_aug_path(self.parser.loc["default"]),
"!mod_ssl.c", beginning=True)
# parser.get_ifmod returns a path postfixed with "/", remove that
self.parser.add_dir(rootconf_ifmod[:-1], "LoadModule", loadmod_args)
correct_ifmods.append(rootconf_ifmod[:-1])
self.save_notes += "Added LoadModule ssl_module to main configuration.\n"
# Wrap LoadModule mod_ssl inside of <IfModule !mod_ssl.c> if it's not
# configured like this already.
for loadmod_path in loadmod_paths:
nodir_path = loadmod_path.split("/directive")[0]
# Remove the old LoadModule directive
self.parser.aug.remove(loadmod_path)
# Create a new IfModule !mod_ssl.c if not already found on path
ssl_ifmod = self.parser.get_ifmod(nodir_path, "!mod_ssl.c", beginning=True)[:-1]
if ssl_ifmod not in correct_ifmods:
self.parser.add_dir(ssl_ifmod, "LoadModule", loadmod_args)
correct_ifmods.append(ssl_ifmod)
self.save_notes += ("Wrapped pre-existing LoadModule ssl_module "
"inside of <IfModule !mod_ssl> block.\n")
class CentOSParser(parser.ApacheParser):
"""CentOS specific ApacheParser override class"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
# CentOS specific configuration file for Apache
self.sysconfig_filep: str = "/etc/sysconfig/httpd"
super().__init__(*args, **kwargs)
def update_runtime_variables(self) -> None:
""" Override for update_runtime_variables for custom parsing """
# Opportunistic, works if SELinux not enforced
super().update_runtime_variables()
self.parse_sysconfig_var()
def parse_sysconfig_var(self) -> None:
""" Parses Apache CLI options from CentOS configuration file """
defines = apache_util.parse_define_file(self.sysconfig_filep, "OPTIONS")
for k, v in defines.items():
self.variables[k] = v
def not_modssl_ifmodule(self, path: str) -> bool:
"""Checks if the provided Augeas path has argument !mod_ssl"""
if "ifmodule" not in path.lower():
return False
# Trim the path to the last ifmodule
workpath = path.lower()
while workpath:
# Get path to the last IfModule (ignore the tail)
parts = workpath.rpartition("ifmodule")
if not parts[0]:
# IfModule not found
break
ifmod_path = parts[0] + parts[1]
# Check if ifmodule had an index
if parts[2].startswith("["):
# Append the index from tail
ifmod_path += parts[2].partition("/")[0]
# Get the original path trimmed to correct length
# This is required to preserve cases
ifmod_real_path = path[0:len(ifmod_path)]
if "!mod_ssl.c" in self.get_all_args(ifmod_real_path):
return True
# Set the workpath to the heading part
workpath = parts[0]
return False
|
|
# -*- coding: utf-8 -*-
import urlparse
from nose.tools import * # flake8: noqa
from website.models import Node
from website.util.sanitize import strip_html
from tests.base import ApiTestCase
from tests.factories import AuthUserFactory, DashboardFactory, FolderFactory, ProjectFactory
from api.base.settings.defaults import API_BASE
class TestUsers(ApiTestCase):
def setUp(self):
super(TestUsers, self).setUp()
self.user_one = AuthUserFactory()
self.user_two = AuthUserFactory()
def tearDown(self):
super(TestUsers, self).tearDown()
def test_returns_200(self):
res = self.app.get('/{}users/'.format(API_BASE))
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_find_user_in_users(self):
url = "/{}users/".format(API_BASE)
res = self.app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert_in(self.user_two._id, ids)
def test_all_users_in_users(self):
url = "/{}users/".format(API_BASE)
res = self.app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert_in(self.user_one._id, ids)
assert_in(self.user_two._id, ids)
def test_find_multiple_in_users(self):
url = "/{}users/?filter[full_name]=fred".format(API_BASE)
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_in(self.user_one._id, ids)
assert_in(self.user_two._id, ids)
def test_find_single_user_in_users(self):
url = "/{}users/?filter[full_name]=my".format(API_BASE)
self.user_one.fullname = 'My Mom'
self.user_one.save()
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_in(self.user_one._id, ids)
assert_not_in(self.user_two._id, ids)
def test_find_no_user_in_users(self):
url = "/{}users/?filter[full_name]=NotMyMom".format(API_BASE)
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_not_in(self.user_one._id, ids)
assert_not_in(self.user_two._id, ids)
def test_users_list_takes_profile_image_size_param(self):
size = 42
url = "/{}users/?profile_image_size={}".format(API_BASE, size)
res = self.app.get(url)
user_json = res.json['data']
for user in user_json:
profile_image_url = user['attributes']['profile_image_url']
query_dict = urlparse.parse_qs(urlparse.urlparse(profile_image_url).query)
assert_equal(int(query_dict.get('s')[0]), size)
class TestUserDetail(ApiTestCase):
def setUp(self):
super(TestUserDetail, self).setUp()
self.user_one = AuthUserFactory()
self.user_one.social['twitter'] = 'howtopizza'
self.user_one.save()
self.user_two = AuthUserFactory()
def tearDown(self):
super(TestUserDetail, self).tearDown()
def test_gets_200(self):
url = "/{}users/{}/".format(API_BASE, self.user_one._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_get_correct_pk_user(self):
url = "/{}users/{}/".format(API_BASE, self.user_one._id)
res = self.app.get(url)
user_json = res.json['data']
assert_equal(user_json['attributes']['full_name'], self.user_one.fullname)
assert_equal(user_json['attributes']['twitter'], 'howtopizza')
def test_get_incorrect_pk_user_logged_in(self):
url = "/{}users/{}/".format(API_BASE, self.user_two._id)
res = self.app.get(url)
user_json = res.json['data']
assert_not_equal(user_json['attributes']['full_name'], self.user_one.fullname)
def test_get_new_users(self):
url = "/{}users/{}/".format(API_BASE, self.user_two._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
user_json = res.json['data']['attributes']
assert_equal(user_json['full_name'], self.user_two.fullname)
assert_equal(user_json['gitHub'], '')
assert_equal(user_json['scholar'], '')
assert_equal(user_json['personal_website'], '')
assert_equal(user_json['twitter'], '')
assert_equal(user_json['linkedIn'], '')
assert_equal(user_json['impactStory'], '')
assert_equal(user_json['orcid'], '')
assert_equal(user_json['researcherId'], '')
def test_get_incorrect_pk_user_not_logged_in(self):
url = "/{}users/{}/".format(API_BASE, self.user_two._id)
res = self.app.get(url, auth=self.user_one.auth)
user_json = res.json['data']
assert_not_equal(user_json['attributes']['full_name'], self.user_one.fullname)
assert_equal(user_json['attributes']['full_name'], self.user_two.fullname)
def test_user_detail_takes_profile_image_size_param(self):
size = 42
url = "/{}users/{}/?profile_image_size={}".format(API_BASE, self.user_one._id, size)
res = self.app.get(url)
user_json = res.json['data']
profile_image_url = user_json['attributes']['profile_image_url']
query_dict = urlparse.parse_qs(urlparse.urlparse(profile_image_url).query)
assert_equal(int(query_dict.get('s')[0]), size)
class TestUserNodes(ApiTestCase):
def setUp(self):
super(TestUserNodes, self).setUp()
self.user_one = AuthUserFactory()
self.user_one.social['twitter'] = 'howtopizza'
self.user_one.save()
self.user_two = AuthUserFactory()
self.public_project_user_one = ProjectFactory(title="Public Project User One",
is_public=True,
creator=self.user_one)
self.private_project_user_one = ProjectFactory(title="Private Project User One",
is_public=False,
creator=self.user_one)
self.public_project_user_two = ProjectFactory(title="Public Project User Two",
is_public=True,
creator=self.user_two)
self.private_project_user_two = ProjectFactory(title="Private Project User Two",
is_public=False,
creator=self.user_two)
self.deleted_project_user_one = FolderFactory(title="Deleted Project User One",
is_public=False,
creator=self.user_one,
is_deleted=True)
self.folder = FolderFactory()
self.deleted_folder = FolderFactory(title="Deleted Folder User One",
is_public=False,
creator=self.user_one,
is_deleted=True)
self.dashboard = DashboardFactory()
def tearDown(self):
super(TestUserNodes, self).tearDown()
def test_authorized_in_gets_200(self):
url = "/{}users/{}/nodes/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_one.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_anonymous_gets_200(self):
url = "/{}users/{}/nodes/".format(API_BASE, self.user_one._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_get_projects_logged_in(self):
url = "/{}users/{}/nodes/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert_in(self.public_project_user_one._id, ids)
assert_in(self.private_project_user_one._id, ids)
assert_not_in(self.public_project_user_two._id, ids)
assert_not_in(self.private_project_user_two._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.deleted_folder._id, ids)
assert_not_in(self.deleted_project_user_one._id, ids)
def test_get_projects_not_logged_in(self):
url = "/{}users/{}/nodes/".format(API_BASE, self.user_one._id)
res = self.app.get(url)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert_in(self.public_project_user_one._id, ids)
assert_not_in(self.private_project_user_one._id, ids)
assert_not_in(self.public_project_user_two._id, ids)
assert_not_in(self.private_project_user_two._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.deleted_project_user_one._id, ids)
def test_get_projects_logged_in_as_different_user(self):
url = "/{}users/{}/nodes/".format(API_BASE, self.user_two._id)
res = self.app.get(url, auth=self.user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert_in(self.public_project_user_two._id, ids)
assert_not_in(self.public_project_user_one._id, ids)
assert_not_in(self.private_project_user_one._id, ids)
assert_not_in(self.private_project_user_two._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.deleted_project_user_one._id, ids)
class TestUserRoutesNodeRoutes(ApiTestCase):
def setUp(self):
super(TestUserRoutesNodeRoutes, self).setUp()
self.user_one = AuthUserFactory()
self.user_one.social['twitter'] = 'howtopizza'
self.user_two = AuthUserFactory()
self.public_project_user_one = ProjectFactory(title="Public Project User One", is_public=True, creator=self.user_one)
self.private_project_user_one = ProjectFactory(title="Private Project User One", is_public=False, creator=self.user_one)
self.public_project_user_two = ProjectFactory(title="Public Project User Two", is_public=True, creator=self.user_two)
self.private_project_user_two = ProjectFactory(title="Private Project User Two", is_public=False, creator=self.user_two)
self.deleted_project_user_one = FolderFactory(title="Deleted Project User One", is_public=False, creator=self.user_one, is_deleted=True)
self.folder = FolderFactory()
self.deleted_folder = FolderFactory(title="Deleted Folder User One", is_public=False, creator=self.user_one, is_deleted=True)
self.dashboard = DashboardFactory()
def tearDown(self):
super(TestUserRoutesNodeRoutes, self).tearDown()
Node.remove()
def test_get_200_path_users_me_userone_logged_in(self):
url = "/{}users/me/".format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
assert_equal(res.status_code, 200)
def test_get_200_path_users_me_usertwo_logged_in(self):
url = "/{}users/me/".format(API_BASE)
res = self.app.get(url, auth=self.user_two.auth)
assert_equal(res.status_code, 200)
def test_get_403_path_users_me_no_user(self):
# TODO: change expected exception from 403 to 401 for unauthorized users
url = "/{}users/me/".format(API_BASE)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_get_404_path_users_user_id_me_user_logged_in(self):
url = "/{}users/{}/me/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_404_path_users_user_id_me_no_user(self):
url = "/{}users/{}/me/".format(API_BASE, self.user_one._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_404_path_users_user_id_me_unauthorized_user(self):
url = "/{}users/{}/me/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_200_path_users_user_id_user_logged_in(self):
url = "/{}users/{}/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_one.auth)
assert_equal(res.status_code, 200)
def test_get_200_path_users_user_id_no_user(self):
url = "/{}users/{}/".format(API_BASE, self.user_two._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
def test_get_200_path_users_user_id_unauthorized_user(self):
url = "/{}users/{}/".format(API_BASE, self.user_two._id)
res = self.app.get(url, auth=self.user_one.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.user_two._id)
def test_get_200_path_users_me_nodes_user_logged_in(self):
url = "/{}users/me/nodes/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_one.auth)
assert_equal(res.status_code, 200)
ids = {each['id'] for each in res.json['data']}
assert_in(self.public_project_user_one._id, ids)
assert_in(self.private_project_user_one._id, ids)
assert_not_in(self.public_project_user_two._id, ids)
assert_not_in(self.private_project_user_two._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.deleted_folder._id, ids)
assert_not_in(self.deleted_project_user_one._id, ids)
def test_get_403_path_users_me_nodes_no_user(self):
# TODO: change expected exception from 403 to 401 for unauthorized users
url = "/{}users/me/nodes/".format(API_BASE)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_get_200_path_users_user_id_nodes_user_logged_in(self):
url = "/{}users/{}/nodes/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_one.auth)
assert_equal(res.status_code, 200)
ids = {each['id'] for each in res.json['data']}
assert_in(self.public_project_user_one._id, ids)
assert_in(self.private_project_user_one._id, ids)
assert_not_in(self.public_project_user_two._id, ids)
assert_not_in(self.private_project_user_two._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.deleted_folder._id, ids)
assert_not_in(self.deleted_project_user_one._id, ids)
def test_get_200_path_users_user_id_nodes_no_user(self):
url = "/{}users/{}/nodes/".format(API_BASE, self.user_one._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
# an anonymous/unauthorized user can only see the public projects user_one contributes to.
ids = {each['id'] for each in res.json['data']}
assert_in(self.public_project_user_one._id, ids)
assert_not_in(self.private_project_user_one._id, ids)
assert_not_in(self.public_project_user_two._id, ids)
assert_not_in(self.private_project_user_two._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.deleted_folder._id, ids)
assert_not_in(self.deleted_project_user_one._id, ids)
def test_get_200_path_users_user_id_nodes_unauthorized_user(self):
url = "/{}users/{}/nodes/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_two.auth)
assert_equal(res.status_code, 200)
# an anonymous/unauthorized user can only see the public projects user_one contributes to.
ids = {each['id'] for each in res.json['data']}
assert_in(self.public_project_user_one._id, ids)
assert_not_in(self.private_project_user_one._id, ids)
assert_not_in(self.public_project_user_two._id, ids)
assert_not_in(self.private_project_user_two._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.deleted_folder._id, ids)
assert_not_in(self.deleted_project_user_one._id, ids)
def test_get_404_path_users_user_id_nodes_me_user_logged_in(self):
url = "/{}users/{}/nodes/me/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_404_path_users_user_id_nodes_me_unauthorized_user(self):
url = "/{}users/{}/nodes/me/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_404_path_users_user_id_nodes_me_no_user(self):
url = "/{}users/{}/nodes/me/".format(API_BASE, self.user_one._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_404_path_nodes_me_user_logged_in(self):
url = "/{}nodes/me/".format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_404_path_nodes_me_no_user(self):
url = "/{}nodes/me/".format(API_BASE)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_404_path_nodes_user_id_user_logged_in(self):
url = "/{}nodes/{}/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_404_path_nodes_user_id_unauthorized_user(self):
url = "/{}nodes/{}/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_404_path_nodes_user_id_no_user(self):
url = "/{}nodes/{}/".format(API_BASE, self.user_one._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
class TestUserUpdate(ApiTestCase):
def setUp(self):
super(TestUserUpdate, self).setUp()
self.user_one = AuthUserFactory.build(
fullname='Martin Luther King Jr.',
given_name='Martin',
family_name='King',
suffix='Jr.',
social=dict(
github='userOneGithub',
scholar='userOneScholar',
personal='http://www.useronepersonalwebsite.com',
twitter='userOneTwitter',
linkedIn='userOneLinkedIn',
impactStory='userOneImpactStory',
orcid='userOneOrcid',
researcherId='userOneResearcherId'
)
)
self.user_one.save()
self.user_one_url = "/v2/users/{}/".format(self.user_one._id)
self.user_two = AuthUserFactory()
self.user_two.save()
self.new_user_one_data = {
'data': {
'type': 'users',
'id': self.user_one._id,
'attributes': {
'full_name': 'el-Hajj Malik el-Shabazz',
'given_name': 'Malcolm',
'middle_names': 'Malik el-Shabazz',
'family_name': 'X',
'suffix': 'Sr.',
'gitHub': 'newGitHub',
'scholar': 'newScholar',
'personal_website': 'http://www.newpersonalwebsite.com',
'twitter': 'http://www.newpersonalwebsite.com',
'linkedIn': 'newLinkedIn',
'impactStory': 'newImpactStory',
'orcid': 'newOrcid',
'researcherId': 'newResearcherId',
}
}
}
self.missing_id = {
'data': {
'type': 'users',
'attributes': {
'full_name': 'el-Hajj Malik el-Shabazz',
'family_name': 'Z',
}
}
}
self.missing_type = {
'data': {
'id': self.user_one._id,
'attributes': {
'fullname': 'el-Hajj Malik el-Shabazz',
'family_name': 'Z',
}
}
}
self.incorrect_id = {
'data': {
'id': '12345',
'type': 'users',
'attributes': {
'full_name': 'el-Hajj Malik el-Shabazz',
'family_name': 'Z',
}
}
}
self.incorrect_type = {
'data': {
'id': self.user_one._id,
'type': 'Wrong type.',
'attributes': {
'full_name': 'el-Hajj Malik el-Shabazz',
'family_name': 'Z',
}
}
}
def tearDown(self):
super(TestUserUpdate, self).tearDown()
def test_patch_user_incorrect_type(self):
res = self.app.put_json_api(self.user_one_url, self.incorrect_type, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_patch_user_incorrect_id(self):
res = self.app.put_json_api(self.user_one_url, self.incorrect_id, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_patch_user_no_type(self):
res = self.app.put_json_api(self.user_one_url, self.missing_type, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.')
def test_patch_user_no_id(self):
res = self.app.put_json_api(self.user_one_url, self.missing_id, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.')
def test_partial_patch_user_incorrect_type(self):
res = self.app.patch_json_api(self.user_one_url, self.incorrect_type, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_partial_patch_user_incorrect_id(self):
res = self.app.patch_json_api(self.user_one_url, self.incorrect_id, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_partial_patch_user_no_type(self):
res = self.app.patch_json_api(self.user_one_url, self.missing_type, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_partial_patch_user_no_id(self):
res = self.app.patch_json_api(self.user_one_url, self.missing_id, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_patch_fields_not_nested(self):
res = self.app.put_json_api(self.user_one_url, {'data': {'id': self.user_one._id, 'type': 'users', 'full_name': 'New name'}}, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data/attributes.')
def test_partial_patch_fields_not_nested(self):
res = self.app.patch_json_api(self.user_one_url, {'data': {'id': self.user_one._id, 'type': 'users', 'full_name': 'New name'}}, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_patch_user_logged_out(self):
res = self.app.patch_json_api(self.user_one_url, {
'data': {
'id': self.user_one._id,
'type': 'users',
'attributes': {
'full_name': self.new_user_one_data['data']['attributes']['full_name'],
}
}
}, expect_errors=True)
assert_equal(res.status_code, 401)
def test_patch_user_without_required_field(self):
# PATCH does not require required fields
res = self.app.patch_json_api(self.user_one_url, {
'data': {
'id': self.user_one._id,
'type': 'users',
'attributes': {
'family_name': self.new_user_one_data['data']['attributes']['family_name'],
}
}
}, auth=self.user_one.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['family_name'], self.new_user_one_data['data']['attributes']['family_name'])
self.user_one.reload()
assert_equal(self.user_one.family_name, self.new_user_one_data['data']['attributes']['family_name'])
def test_put_user_without_required_field(self):
# PUT requires all required fields
res = self.app.put_json_api(self.user_one_url, {
'data': {
'id': self.user_one._id,
'type': 'users',
'attributes': {
'family_name': self.new_user_one_data['data']['attributes']['family_name'],
}
}
}, auth=self.user_one.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_partial_patch_user_logged_in(self):
# Test to make sure new fields are patched and old fields stay the same
res = self.app.patch_json_api(self.user_one_url, {
'data': {
'id': self.user_one._id,
'type': 'users',
'attributes': {
'full_name': 'new_fullname',
'gitHub': 'even_newer_github',
'suffix': 'The Millionth'
}
}
}, auth=self.user_one.auth)
self.user_one.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['full_name'], 'new_fullname')
assert_equal(res.json['data']['attributes']['suffix'], 'The Millionth')
assert_equal(res.json['data']['attributes']['gitHub'], 'even_newer_github')
assert_equal(res.json['data']['attributes']['given_name'], self.user_one.given_name)
assert_equal(res.json['data']['attributes']['middle_names'], self.user_one.middle_names)
assert_equal(res.json['data']['attributes']['family_name'], self.user_one.family_name)
assert_equal(res.json['data']['attributes']['personal_website'], self.user_one.social['personal'])
assert_equal(res.json['data']['attributes']['twitter'], self.user_one.social['twitter'])
assert_equal(res.json['data']['attributes']['linkedIn'], self.user_one.social['linkedIn'])
assert_equal(res.json['data']['attributes']['impactStory'], self.user_one.social['impactStory'])
assert_equal(res.json['data']['attributes']['orcid'], self.user_one.social['orcid'])
assert_equal(res.json['data']['attributes']['researcherId'], self.user_one.social['researcherId'])
assert_equal(self.user_one.fullname, 'new_fullname')
assert_equal(self.user_one.suffix, 'The Millionth')
assert_equal(self.user_one.social['github'], 'even_newer_github')
def test_partial_patch_user_logged_in_no_social_fields(self):
# Test to make sure new fields are patched and old fields stay the same
res = self.app.patch_json_api(self.user_one_url, {
'data': {
'id': self.user_one._id,
'type': 'users',
'attributes': {
'full_name': 'new_fullname',
'suffix': 'The Millionth',
}
}
}, auth=self.user_one.auth)
self.user_one.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['full_name'], 'new_fullname')
assert_equal(res.json['data']['attributes']['suffix'], 'The Millionth')
assert_equal(res.json['data']['attributes']['gitHub'], self.user_one.social['github'])
assert_equal(res.json['data']['attributes']['given_name'], self.user_one.given_name)
assert_equal(res.json['data']['attributes']['middle_names'], self.user_one.middle_names)
assert_equal(res.json['data']['attributes']['family_name'], self.user_one.family_name)
assert_equal(res.json['data']['attributes']['personal_website'], self.user_one.social['personal'])
assert_equal(res.json['data']['attributes']['twitter'], self.user_one.social['twitter'])
assert_equal(res.json['data']['attributes']['linkedIn'], self.user_one.social['linkedIn'])
assert_equal(res.json['data']['attributes']['impactStory'], self.user_one.social['impactStory'])
assert_equal(res.json['data']['attributes']['orcid'], self.user_one.social['orcid'])
assert_equal(res.json['data']['attributes']['researcherId'], self.user_one.social['researcherId'])
assert_equal(self.user_one.fullname, 'new_fullname')
assert_equal(self.user_one.suffix, 'The Millionth')
assert_equal(self.user_one.social['github'], self.user_one.social['github'])
def test_partial_put_user_logged_in(self):
# Test to make sure new fields are patched and old fields stay the same
res = self.app.put_json_api(self.user_one_url, {
'data': {
'id': self.user_one._id,
'type': 'users',
'attributes': {
'full_name': 'new_fullname',
'gitHub': 'even_newer_github',
'suffix': 'The Millionth'
}
}
}, auth=self.user_one.auth)
self.user_one.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['full_name'], 'new_fullname')
assert_equal(res.json['data']['attributes']['suffix'], 'The Millionth')
assert_equal(res.json['data']['attributes']['gitHub'], 'even_newer_github')
assert_equal(res.json['data']['attributes']['given_name'], self.user_one.given_name)
assert_equal(res.json['data']['attributes']['middle_names'], self.user_one.middle_names)
assert_equal(res.json['data']['attributes']['family_name'], self.user_one.family_name)
assert_equal(self.user_one.fullname, 'new_fullname')
assert_equal(self.user_one.suffix, 'The Millionth')
assert_equal(self.user_one.social['github'], 'even_newer_github')
def test_put_user_logged_in(self):
# Logged in user updates their user information via put
res = self.app.put_json_api(self.user_one_url, self.new_user_one_data, auth=self.user_one.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['full_name'], self.new_user_one_data['data']['attributes']['full_name'])
assert_equal(res.json['data']['attributes']['given_name'], self.new_user_one_data['data']['attributes']['given_name'])
assert_equal(res.json['data']['attributes']['middle_names'], self.new_user_one_data['data']['attributes']['middle_names'])
assert_equal(res.json['data']['attributes']['family_name'], self.new_user_one_data['data']['attributes']['family_name'])
assert_equal(res.json['data']['attributes']['suffix'], self.new_user_one_data['data']['attributes']['suffix'])
assert_equal(res.json['data']['attributes']['gitHub'], self.new_user_one_data['data']['attributes']['gitHub'])
assert_equal(res.json['data']['attributes']['personal_website'], self.new_user_one_data['data']['attributes']['personal_website'])
assert_equal(res.json['data']['attributes']['twitter'], self.new_user_one_data['data']['attributes']['twitter'])
assert_equal(res.json['data']['attributes']['linkedIn'], self.new_user_one_data['data']['attributes']['linkedIn'])
assert_equal(res.json['data']['attributes']['impactStory'], self.new_user_one_data['data']['attributes']['impactStory'])
assert_equal(res.json['data']['attributes']['orcid'], self.new_user_one_data['data']['attributes']['orcid'])
assert_equal(res.json['data']['attributes']['researcherId'], self.new_user_one_data['data']['attributes']['researcherId'])
self.user_one.reload()
assert_equal(self.user_one.fullname, self.new_user_one_data['data']['attributes']['full_name'])
assert_equal(self.user_one.given_name, self.new_user_one_data['data']['attributes']['given_name'])
assert_equal(self.user_one.middle_names, self.new_user_one_data['data']['attributes']['middle_names'])
assert_equal(self.user_one.family_name, self.new_user_one_data['data']['attributes']['family_name'])
assert_equal(self.user_one.suffix, self.new_user_one_data['data']['attributes']['suffix'])
assert_equal(self.user_one.social['github'], self.new_user_one_data['data']['attributes']['gitHub'])
assert_equal(self.user_one.social['personal'], self.new_user_one_data['data']['attributes']['personal_website'])
assert_equal(self.user_one.social['twitter'], self.new_user_one_data['data']['attributes']['twitter'])
assert_equal(self.user_one.social['linkedIn'], self.new_user_one_data['data']['attributes']['linkedIn'])
assert_equal(self.user_one.social['impactStory'], self.new_user_one_data['data']['attributes']['impactStory'])
assert_equal(self.user_one.social['orcid'], self.new_user_one_data['data']['attributes']['orcid'])
assert_equal(self.user_one.social['researcherId'], self.new_user_one_data['data']['attributes']['researcherId'])
def test_put_user_logged_out(self):
res = self.app.put_json_api(self.user_one_url, self.new_user_one_data, expect_errors=True)
assert_equal(res.status_code, 401)
def test_put_wrong_user(self):
# User tries to update someone else's user information via put
res = self.app.put_json_api(self.user_one_url, self.new_user_one_data, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_patch_wrong_user(self):
# User tries to update someone else's user information via patch
res = self.app.patch_json_api(self.user_one_url, {
'data': {
'id': self.user_one._id,
'type': 'users',
'attributes': {
'full_name': self.new_user_one_data['data']['attributes']['full_name'],
}
}
}, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.user_one.reload()
assert_not_equal(self.user_one.fullname, self.new_user_one_data['data']['attributes']['full_name'])
def test_update_user_sanitizes_html_properly(self):
"""Post request should update resource, and any HTML in fields should be stripped"""
bad_fullname = 'Malcolm <strong>X</strong>'
bad_family_name = 'X <script>alert("is")</script> a cool name'
res = self.app.patch_json_api(self.user_one_url, {
'data': {
'id': self.user_one._id,
'type': 'users',
'attributes': {
'full_name': bad_fullname,
'family_name': bad_family_name,
}
}
}, auth=self.user_one.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['full_name'], strip_html(bad_fullname))
assert_equal(res.json['data']['attributes']['family_name'], strip_html(bad_family_name))
class TestDeactivatedUser(ApiTestCase):
def setUp(self):
super(TestDeactivatedUser, self).setUp()
self.user = AuthUserFactory()
def test_requesting_as_deactivated_user_returns_400_response(self):
url = '/{}users/{}/'.format(API_BASE, self.user._id)
res = self.app.get(url, auth=self.user.auth , expect_errors=True)
assert_equal(res.status_code, 200)
self.user.is_disabled = True
self.user.save()
res = self.app.get(url, auth=self.user.auth , expect_errors=True)
assert_equal(res.status_code, 400)
def test_requesting_deactivated_user_returns_410_response(self):
url = '/{}users/{}/'.format(API_BASE, self.user._id)
res = self.app.get(url, auth=self.user.auth , expect_errors=True)
assert_equal(res.status_code, 200)
self.user.is_disabled = True
self.user.save()
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 410)
class TestExceptionFormatting(ApiTestCase):
def setUp(self):
super(TestExceptionFormatting, self).setUp()
self.user = AuthUserFactory.build(
fullname='Martin Luther King Jr.',
given_name='Martin',
family_name='King',
suffix='Jr.',
social=dict(
github='userOneGithub',
scholar='userOneScholar',
personal='http://www.useronepersonalwebsite.com',
twitter='userOneTwitter',
linkedIn='userOneLinkedIn',
impactStory='userOneImpactStory',
orcid='userOneOrcid',
researcherId='userOneResearcherId'
)
)
self.url = '/{}users/{}/'.format(API_BASE, self.user._id)
self.user_two = AuthUserFactory()
def test_updates_user_with_no_fullname(self):
res = self.app.put_json_api(self.url, {'data': {'id': self.user._id, 'type': 'users', 'attributes': {}}}, auth=self.user.auth, expect_errors=True)
errors = res.json['errors']
assert(isinstance(errors, list))
assert_equal(res.json['errors'][0]['source'], {'pointer': '/data/attributes/full_name'})
assert_equal(res.json['errors'][0]['detail'], 'This field is required.')
def test_updates_user_unauthorized(self):
res = self.app.put_json_api(self.url, expect_errors=True)
errors = res.json['errors']
assert(isinstance(errors, list))
assert_equal(errors[0], {'detail': "Authentication credentials were not provided."})
def test_updates_user_forbidden(self):
res = self.app.put_json_api(self.url, auth=self.user_two.auth, expect_errors=True)
errors = res.json['errors']
assert(isinstance(errors, list))
assert_equal(errors[0], {'detail': 'You do not have permission to perform this action.'})
def test_user_does_not_exist_formatting(self):
url = '/{}users/{}/'.format(API_BASE, '12345')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
errors = res.json['errors']
assert(isinstance(errors, list))
assert_equal(errors[0], {'detail': 'Not found.'})
def test_basic_auth_me_wrong_password(self):
url = '/{}users/{}/'.format(API_BASE, 'me')
res = self.app.get(url, auth=(self.user.username, 'nottherightone'), expect_errors=True)
assert_equal(res.status_code, 401)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Defines embedders for various cache objects."""
import abc
import torch
from torch import nn
def from_config(config):
"""Creates an embedder specified by the config.
Args:
config (cfg.Config): specifies embedder type and constructor args.
Returns:
Embedder: embedder specified by the config.
"""
embedder_type = config.get("type")
if embedder_type == "byte":
return ByteEmbedder(config.get("bytes_per_entry"), config.get("embed_dim"))
elif embedder_type == "dynamic-vocab":
return DynamicVocabEmbedder(
config.get("embed_dim"), config.get("max_vocab_size"))
elif embedder_type == "positional":
return PositionalEmbedder(config.get("embed_dim"))
else:
raise ValueError("{} not a supported embedding type.".format(embedder_type))
class Embedder(nn.Module):
"""Embeds a batch of objects into an embedding space.
Subclasses of Embedder should register with the from_config method.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, embed_dim):
"""Sets the output embedding dimension to be embed_dim.
Args:
embed_dim (int): dimension of output of forward call.
"""
super(Embedder, self).__init__()
self._embed_dim = embed_dim
@property
def embed_dim(self):
return self._embed_dim
class ByteEmbedder(Embedder):
"""Embeds each byte and concatenates."""
def __init__(self, bytes_per_entry, embed_dim):
"""Embeds entries that have bytes_per_entry many bytes.
Args:
bytes_per_entry (int): number of bytes per input.
embed_dim (int): see parent class.
"""
super(ByteEmbedder, self).__init__(embed_dim)
if embed_dim % bytes_per_entry != 0:
raise ValueError(
"Embed dim ({}) must be an even multiple of bytes per entry ({})"
.format(embed_dim, bytes_per_entry))
embed_dim_per_byte = embed_dim // bytes_per_entry
# 256 possible byte values
self._byte_embedding = nn.Embedding(256, embed_dim_per_byte)
self._bytes_per_entry = bytes_per_entry
self._final_layer = nn.Linear(embed_dim, embed_dim)
def forward(self, ints):
"""Returns embeddings for each int interpretted as a byte array.
Args:
ints (list[int]): batch of inputs of length batch_size.
Returns:
embeddings (torch.FloatTensor): batch of embeddings of shape
(batch_size, embed_dim). Each int is interpretted as bytes_per_entry
bytes and each byte is embedded separately.
"""
def int_to_byte_tensor(ints, num_bytes):
"""Converts ints to tensor of shape (num_bytes).
Args:
ints (list[int]): ints to convert.
num_bytes (int): number of bytes to convert to.
Returns:
byte_tensor (torch.LongTensor): shape (len(ints), num_bytes).
byte_tensor[i][j] = value of jth byte of ints[i].
"""
# Byte order doesn't matter as long as it's consistent.
return torch.tensor(
[int(x).to_bytes(num_bytes, byteorder="big") for x in ints]).long()
# (batch_size, bytes_per_entry, embed_dim_per_byte)
byte_tensors = int_to_byte_tensor(ints, self._bytes_per_entry)
byte_embeddings = self._byte_embedding(byte_tensors)
return self._final_layer(byte_embeddings.view(-1, self.embed_dim))
class DynamicVocabEmbedder(Embedder):
"""Dynamically constructs a vocab, assigning embeddings to new inputs.
After max_vocab_size unique inputs are observed, all new inputs are assigned
to a UNK embedding.
"""
def __init__(self, embed_dim, max_vocab_size):
super().__init__(embed_dim)
self._max_vocab_size = max_vocab_size
self._input_to_index = {}
# Reserve index 0 for UNK
self._vocab_size = 1
# Override default initialization of embeddings with Xavier
weight = torch.zeros(max_vocab_size, embed_dim)
nn.init.xavier_uniform_(weight)
self._embedding = nn.Embedding(max_vocab_size, embed_dim, _weight=weight)
def forward(self, inputs):
"""Returns embeddings for each int interpretted as a byte array.
Args:
inputs (list[Object]): batch of hashable inputs of length batch_size.
Returns:
embeddings (torch.FloatTensor): batch of embeddings of shape
(batch_size, embed_dim).
"""
def input_to_index(inp):
if (inp not in self._input_to_index and
self._max_vocab_size > self._vocab_size):
self._input_to_index[inp] = self._vocab_size
self._vocab_size += 1
# Return index 0 (UNK) if vocab is full and inp is not in vocab
return self._input_to_index.get(inp, 0)
indices = torch.tensor([input_to_index(inp) for inp in inputs]).long()
return self._embedding(indices)
def state_dict(self, destination=None, prefix="", keep_vars=False):
state_dict = super().state_dict(destination, prefix, keep_vars)
state_dict[prefix + "vocab_size"] = self._vocab_size
state_dict[prefix + "input_to_index"] = self._input_to_index
return state_dict
def _load_from_state_dict(self, state_dict, prefix, strict, missing_keys,
unexpected_keys, error_msgs):
self._vocab_size = state_dict.pop(prefix + "vocab_size")
self._input_to_index = state_dict.pop(prefix + "input_to_index")
super()._load_from_state_dict(
state_dict, prefix, strict, missing_keys, unexpected_keys, error_msgs)
class PositionalEmbedder(Embedder):
"""Takes position index and returns a simple fixed embedding."""
def forward(self, position_indices):
"""Returns a fixed embedding for each input index.
Embeds positions according to Vaswani, et. al., 2017:
embed_{2i} = sin(pos / 10000^(2i / embed_dim))
embed_{2i + 1} = cos(pos / 10000^(2i / embed_dim))
Args:
position_indices (list[int]): batch of positions of length batch_size
Returns:
embeddings (torch.FloatTensor): of shape (batch_size, embed_dim)
"""
batch_size = len(position_indices)
# i's in above equation
embed_indices = torch.arange(self.embed_dim).expand(batch_size, -1).float()
position_tensor = torch.tensor(position_indices).unsqueeze(-1).float()
embedding = position_tensor / 10000. ** (2 * embed_indices / self.embed_dim)
embedding = torch.where(
embed_indices % 2 == 0, torch.sin(embedding), torch.cos(embedding))
return embedding
|
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import itertools
import re
from oslo_log import log as logging
from oslo_utils import strutils
import six
import six.moves.urllib.parse as urlparse
import webob
from webob import exc
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova import quota
from nova import utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
_STATE_MAP = {
vm_states.ACTIVE: {
'default': 'ACTIVE',
task_states.REBOOTING: 'REBOOT',
task_states.REBOOT_PENDING: 'REBOOT',
task_states.REBOOT_STARTED: 'REBOOT',
task_states.REBOOTING_HARD: 'HARD_REBOOT',
task_states.REBOOT_PENDING_HARD: 'HARD_REBOOT',
task_states.REBOOT_STARTED_HARD: 'HARD_REBOOT',
task_states.UPDATING_PASSWORD: 'PASSWORD',
task_states.REBUILDING: 'REBUILD',
task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD',
task_states.REBUILD_SPAWNING: 'REBUILD',
task_states.MIGRATING: 'MIGRATING',
task_states.RESIZE_PREP: 'RESIZE',
task_states.RESIZE_MIGRATING: 'RESIZE',
task_states.RESIZE_MIGRATED: 'RESIZE',
task_states.RESIZE_FINISH: 'RESIZE',
},
vm_states.BUILDING: {
'default': 'BUILD',
},
vm_states.STOPPED: {
'default': 'SHUTOFF',
task_states.RESIZE_PREP: 'RESIZE',
task_states.RESIZE_MIGRATING: 'RESIZE',
task_states.RESIZE_MIGRATED: 'RESIZE',
task_states.RESIZE_FINISH: 'RESIZE',
task_states.REBUILDING: 'REBUILD',
task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD',
task_states.REBUILD_SPAWNING: 'REBUILD',
},
vm_states.RESIZED: {
'default': 'VERIFY_RESIZE',
# Note(maoy): the OS API spec 1.1 doesn't have CONFIRMING_RESIZE
# state so we comment that out for future reference only.
# task_states.RESIZE_CONFIRMING: 'CONFIRMING_RESIZE',
task_states.RESIZE_REVERTING: 'REVERT_RESIZE',
},
vm_states.PAUSED: {
'default': 'PAUSED',
task_states.MIGRATING: 'MIGRATING',
},
vm_states.SUSPENDED: {
'default': 'SUSPENDED',
},
vm_states.RESCUED: {
'default': 'RESCUE',
},
vm_states.ERROR: {
'default': 'ERROR',
task_states.REBUILDING: 'REBUILD',
task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD',
task_states.REBUILD_SPAWNING: 'REBUILD',
},
vm_states.DELETED: {
'default': 'DELETED',
},
vm_states.SOFT_DELETED: {
'default': 'SOFT_DELETED',
},
vm_states.SHELVED: {
'default': 'SHELVED',
},
vm_states.SHELVED_OFFLOADED: {
'default': 'SHELVED_OFFLOADED',
},
}
def status_from_state(vm_state, task_state='default'):
"""Given vm_state and task_state, return a status string."""
task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN'))
status = task_map.get(task_state, task_map['default'])
if status == "UNKNOWN":
LOG.error("status is UNKNOWN from vm_state=%(vm_state)s "
"task_state=%(task_state)s. Bad upgrade or db "
"corrupted?",
{'vm_state': vm_state, 'task_state': task_state})
return status
def task_and_vm_state_from_status(statuses):
"""Map the server's multiple status strings to list of vm states and
list of task states.
"""
vm_states = set()
task_states = set()
lower_statuses = [status.lower() for status in statuses]
for state, task_map in _STATE_MAP.items():
for task_state, mapped_state in task_map.items():
status_string = mapped_state
if status_string.lower() in lower_statuses:
vm_states.add(state)
task_states.add(task_state)
# Add sort to avoid different order on set in Python 3
return sorted(vm_states), sorted(task_states)
def get_sort_params(input_params, default_key='created_at',
default_dir='desc'):
"""Retrieves sort keys/directions parameters.
Processes the parameters to create a list of sort keys and sort directions
that correspond to the 'sort_key' and 'sort_dir' parameter values. These
sorting parameters can be specified multiple times in order to generate
the list of sort keys and directions.
The input parameters are not modified.
:param input_params: webob.multidict of request parameters (from
nova.wsgi.Request.params)
:param default_key: default sort key value, added to the list if no
'sort_key' parameters are supplied
:param default_dir: default sort dir value, added to the list if no
'sort_dir' parameters are supplied
:returns: list of sort keys, list of sort dirs
"""
params = input_params.copy()
sort_keys = []
sort_dirs = []
while 'sort_key' in params:
sort_keys.append(params.pop('sort_key').strip())
while 'sort_dir' in params:
sort_dirs.append(params.pop('sort_dir').strip())
if len(sort_keys) == 0 and default_key:
sort_keys.append(default_key)
if len(sort_dirs) == 0 and default_dir:
sort_dirs.append(default_dir)
return sort_keys, sort_dirs
def get_pagination_params(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If 'limit' is not specified, 0, or
> max_limit, we default to max_limit. Negative values
for either marker or limit will cause
exc.HTTPBadRequest() exceptions to be raised.
"""
params = {}
if 'limit' in request.GET:
params['limit'] = _get_int_param(request, 'limit')
if 'page_size' in request.GET:
params['page_size'] = _get_int_param(request, 'page_size')
if 'marker' in request.GET:
params['marker'] = _get_marker_param(request)
if 'offset' in request.GET:
params['offset'] = _get_int_param(request, 'offset')
return params
def _get_int_param(request, param):
"""Extract integer param from request or fail."""
try:
int_param = utils.validate_integer(request.GET[param], param,
min_value=0)
except exception.InvalidInput as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return int_param
def _get_marker_param(request):
"""Extract marker id from request or fail."""
return request.GET['marker']
def limited(items, request):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
"""
params = get_pagination_params(request)
offset = params.get('offset', 0)
limit = CONF.api.max_limit
limit = min(limit, params.get('limit') or limit)
return items[offset:(offset + limit)]
def get_limit_and_marker(request):
"""Get limited parameter from request."""
params = get_pagination_params(request)
limit = CONF.api.max_limit
limit = min(limit, params.get('limit', limit))
marker = params.get('marker', None)
return limit, marker
def get_id_from_href(href):
"""Return the id or uuid portion of a url.
Given: 'http://www.foo.com/bar/123?q=4'
Returns: '123'
Given: 'http://www.foo.com/bar/abc123?q=4'
Returns: 'abc123'
"""
return urlparse.urlsplit("%s" % href).path.split('/')[-1]
def remove_trailing_version_from_href(href):
"""Removes the api version from the href.
Given: 'http://www.nova.com/compute/v1.1'
Returns: 'http://www.nova.com/compute'
Given: 'http://www.nova.com/v1.1'
Returns: 'http://www.nova.com'
"""
parsed_url = urlparse.urlsplit(href)
url_parts = parsed_url.path.rsplit('/', 1)
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
if not expression.match(url_parts.pop()):
LOG.debug('href %s does not contain version', href)
raise ValueError(_('href %s does not contain version') % href)
new_path = url_join(*url_parts)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return urlparse.urlunsplit(parsed_url)
def check_img_metadata_properties_quota(context, metadata):
if not metadata:
return
try:
QUOTAS.limit_check(context, metadata_items=len(metadata))
except exception.OverQuota:
expl = _("Image metadata limit exceeded")
raise webob.exc.HTTPForbidden(explanation=expl)
def get_networks_for_instance_from_nw_info(nw_info):
networks = collections.OrderedDict()
for vif in nw_info:
ips = vif.fixed_ips()
floaters = vif.floating_ips()
label = vif['network']['label']
if label not in networks:
networks[label] = {'ips': [], 'floating_ips': []}
for ip in itertools.chain(ips, floaters):
ip['mac_address'] = vif['address']
networks[label]['ips'].extend(ips)
networks[label]['floating_ips'].extend(floaters)
return networks
def get_networks_for_instance(context, instance):
"""Returns a prepared nw_info list for passing into the view builders
We end up with a data structure like::
{'public': {'ips': [{'address': '10.0.0.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'},
{'address': '2001::1',
'version': 6,
'mac_address': 'aa:aa:aa:aa:aa:aa'}],
'floating_ips': [{'address': '172.16.0.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'},
{'address': '172.16.2.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'}]},
...}
"""
nw_info = compute_utils.get_nw_info_for_instance(instance)
return get_networks_for_instance_from_nw_info(nw_info)
def raise_http_conflict_for_instance_invalid_state(exc, action, server_id):
"""Raises a webob.exc.HTTPConflict instance containing a message
appropriate to return via the API based on the original
InstanceInvalidState exception.
"""
attr = exc.kwargs.get('attr')
state = exc.kwargs.get('state')
if attr is not None and state is not None:
msg = _("Cannot '%(action)s' instance %(server_id)s while it is in "
"%(attr)s %(state)s") % {'action': action, 'attr': attr,
'state': state,
'server_id': server_id}
else:
# At least give some meaningful message
msg = _("Instance %(server_id)s is in an invalid state for "
"'%(action)s'") % {'action': action, 'server_id': server_id}
raise webob.exc.HTTPConflict(explanation=msg)
def check_snapshots_enabled(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.api.allow_instance_snapshots:
LOG.warning('Rejecting snapshot request, snapshots currently'
' disabled')
msg = _("Instance snapshots are not permitted at this time.")
raise webob.exc.HTTPBadRequest(explanation=msg)
return f(*args, **kwargs)
return inner
def url_join(*parts):
"""Convenience method for joining parts of a URL
Any leading and trailing '/' characters are removed, and the parts joined
together with '/' as a separator. If last element of 'parts' is an empty
string, the returned URL will have a trailing slash.
"""
parts = parts or [""]
clean_parts = [part.strip("/") for part in parts if part]
if not parts[-1]:
# Empty last element should add a trailing slash
clean_parts.append("")
return "/".join(clean_parts)
class ViewBuilder(object):
"""Model API responses as dictionaries."""
def _get_project_id(self, request):
"""Get project id from request url if present or empty string
otherwise
"""
project_id = request.environ["nova.context"].project_id
if project_id in request.url:
return project_id
return ''
def _get_links(self, request, identifier, collection_name):
return [{
"rel": "self",
"href": self._get_href_link(request, identifier, collection_name),
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request,
identifier,
collection_name),
}]
def _get_next_link(self, request, identifier, collection_name):
"""Return href string with proper limit and marker params."""
params = collections.OrderedDict(sorted(request.params.items()))
params["marker"] = identifier
prefix = self._update_compute_link_prefix(request.application_url)
url = url_join(prefix,
self._get_project_id(request),
collection_name)
return "%s?%s" % (url, urlparse.urlencode(params))
def _get_href_link(self, request, identifier, collection_name):
"""Return an href string pointing to this object."""
prefix = self._update_compute_link_prefix(request.application_url)
return url_join(prefix,
self._get_project_id(request),
collection_name,
str(identifier))
def _get_bookmark_link(self, request, identifier, collection_name):
"""Create a URL that refers to a specific resource."""
base_url = remove_trailing_version_from_href(request.application_url)
base_url = self._update_compute_link_prefix(base_url)
return url_join(base_url,
self._get_project_id(request),
collection_name,
str(identifier))
def _get_collection_links(self,
request,
items,
collection_name,
id_key="uuid"):
"""Retrieve 'next' link, if applicable. This is included if:
1) 'limit' param is specified and equals the number of items.
2) 'limit' param is specified but it exceeds CONF.api.max_limit,
in this case the number of items is CONF.api.max_limit.
3) 'limit' param is NOT specified but the number of items is
CONF.api.max_limit.
"""
links = []
max_items = min(
int(request.params.get("limit", CONF.api.max_limit)),
CONF.api.max_limit)
if max_items and max_items == len(items):
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
elif 'id' in last_item:
last_item_id = last_item["id"]
else:
last_item_id = last_item["flavorid"]
links.append({
"rel": "next",
"href": self._get_next_link(request,
last_item_id,
collection_name),
})
return links
def _update_link_prefix(self, orig_url, prefix):
if not prefix:
return orig_url
url_parts = list(urlparse.urlsplit(orig_url))
prefix_parts = list(urlparse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
url_parts[2] = prefix_parts[2] + url_parts[2]
return urlparse.urlunsplit(url_parts).rstrip('/')
def _update_glance_link_prefix(self, orig_url):
return self._update_link_prefix(orig_url, CONF.api.glance_link_prefix)
def _update_compute_link_prefix(self, orig_url):
return self._update_link_prefix(orig_url, CONF.api.compute_link_prefix)
def get_instance(compute_api, context, instance_id, expected_attrs=None):
"""Fetch an instance from the compute API, handling error checking."""
try:
return compute_api.get(context, instance_id,
expected_attrs=expected_attrs)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
def normalize_name(name):
# NOTE(alex_xu): This method is used by v2.1 legacy v2 compat mode.
# In the legacy v2 API, some of APIs strip the spaces and some of APIs not.
# The v2.1 disallow leading/trailing, for compatible v2 API and consistent,
# we enable leading/trailing spaces and strip spaces in legacy v2 compat
# mode. Althrough in legacy v2 API there are some APIs didn't strip spaces,
# but actually leading/trailing spaces(that means user depend on leading/
# trailing spaces distinguish different instance) is pointless usecase.
return name.strip()
def raise_feature_not_supported(msg=None):
if msg is None:
msg = _("The requested functionality is not supported.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
def get_flavor(context, flavor_id):
try:
return objects.Flavor.get_by_flavor_id(context, flavor_id)
except exception.FlavorNotFound as error:
raise exc.HTTPNotFound(explanation=error.format_message())
def check_cells_enabled(function):
@functools.wraps(function)
def inner(*args, **kwargs):
if not CONF.cells.enable:
raise_feature_not_supported()
return function(*args, **kwargs)
return inner
def is_all_tenants(search_opts):
"""Checks to see if the all_tenants flag is in search_opts
:param dict search_opts: The search options for a request
:returns: boolean indicating if all_tenants are being requested or not
"""
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
all_tenants = strutils.bool_from_string(all_tenants, True)
except ValueError as err:
raise exception.InvalidInput(six.text_type(err))
else:
# The empty string is considered enabling all_tenants
all_tenants = 'all_tenants' in search_opts
return all_tenants
|
|
from sympy import (
symbols, log, ln, Float, nan, oo, zoo, I, pi, E, exp, Symbol,
LambertW, sqrt, Rational, expand_log, S, sign, conjugate,
sin, cos, sinh, cosh, tanh, exp_polar, re, Function, simplify)
def test_exp_values():
x, y = symbols('x,y')
k = Symbol('k', integer=True)
assert exp(nan) == nan
assert exp(oo) == oo
assert exp(-oo) == 0
assert exp(0) == 1
assert exp(1) == E
assert exp(-1 + x).as_base_exp() == (S.Exp1, x - 1)
assert exp(1 + x).as_base_exp() == (S.Exp1, x + 1)
assert exp(pi*I/2) == I
assert exp(pi*I) == -1
assert exp(3*pi*I/2) == -I
assert exp(2*pi*I) == 1
assert exp(pi*I*2*k) == 1
assert exp(pi*I*2*(k + Rational(1, 2))) == -1
assert exp(pi*I*2*(k + Rational(1, 4))) == I
assert exp(pi*I*2*(k + Rational(3, 4))) == -I
assert exp(log(x)) == x
assert exp(2*log(x)) == x**2
assert exp(pi*log(x)) == x**pi
assert exp(17*log(x) + E*log(y)) == x**17 * y**E
assert exp(x*log(x)) != x**x
assert exp(sin(x)*log(x)) != x
assert exp(3*log(x) + oo*x) == exp(oo*x) * x**3
assert exp(4*log(x)*log(y) + 3*log(x)) == x**3 * exp(4*log(x)*log(y))
def test_exp_log():
x = Symbol("x", real=True)
assert log(exp(x)) == x
assert exp(log(x)) == x
assert log(x).inverse() == exp
assert exp(x).inverse() == log
y = Symbol("y", polar=True)
z = Symbol("z")
assert log(exp_polar(z)) == z
assert exp(log(y)) == y
def test_exp_expand():
x = Symbol("x")
y = Symbol("y")
e = exp(log(Rational(2))*(1 + x) - log(Rational(2))*x)
assert e.expand() == 2
assert exp(x + y) != exp(x)*exp(y)
assert exp(x + y).expand() == exp(x)*exp(y)
def test_exp__as_base_exp():
x, y = symbols('x,y')
assert exp(x).as_base_exp() == (E, x)
assert exp(2*x).as_base_exp() == (E, 2*x)
assert exp(x*y).as_base_exp() == (E, x*y)
assert exp(-x).as_base_exp() == (E, -x)
# Pow( *expr.as_base_exp() ) == expr invariant should hold
assert E**x == exp(x)
assert E**(2*x) == exp(2*x)
assert E**(x*y) == exp(x*y)
assert exp(x).base is S.Exp1
assert exp(x).exp == x
def test_exp_infinity():
y = Symbol('y')
assert exp(I*y) != nan
assert exp(I*oo) == nan
assert exp(-I*oo) == nan
assert exp(y*I*oo) != nan
def test_exp_subs():
x, y = symbols('x,y')
e = (exp(3*log(x), evaluate=False)) # evaluates to x**3
assert e.subs(x**3, y**3) == e
assert e.subs(x**2, 5) == e
assert (x**3).subs(x**2, y) != y**(3/S(2))
assert exp(exp(x) + exp(x**2)).subs(exp(exp(x)), y) == y * exp(exp(x**2))
assert exp(x).subs(E, y) == y**x
x = symbols('x', real=True)
assert exp(5*x).subs(exp(7*x), y) == y**Rational(5, 7)
assert exp(2*x + 7).subs(exp(3*x), y) == y**Rational(2, 3) * exp(7)
x = symbols('x', positive=True)
assert exp(3*log(x)).subs(x**2, y) == y**Rational(3, 2)
# differentiate between E and exp
assert exp(exp(x + E)).subs(exp, 3) == 3**(3**(x + E))
assert exp(exp(x + E)).subs(E, 3) == 3**(3**(x + 3))
assert exp(3).subs(E, sin) == sin(3)
def test_exp_conjugate():
x = Symbol('x')
assert conjugate(exp(x)) == exp(conjugate(x))
def test_exp_rewrite():
x = symbols('x')
assert exp(x).rewrite(sin) == sinh(x) + cosh(x)
assert exp(x*I).rewrite(cos) == cos(x) + I*sin(x)
assert exp(1).rewrite(cos) == sinh(1) + cosh(1)
assert exp(1).rewrite(sin) == sinh(1) + cosh(1)
assert exp(1).rewrite(sin) == sinh(1) + cosh(1)
assert exp(x).rewrite(tanh) == (1 + tanh(x/2))/(1 - tanh(x/2))
def test_exp_leading_term():
x = symbols('x')
assert exp(x).as_leading_term(x) == 1
assert exp(1/x).as_leading_term(x) == exp(1/x)
assert exp(2 + x).as_leading_term(x) == exp(2)
def test_exp_taylor_term():
x = symbols('x')
assert exp(x).taylor_term(1, x) == x
assert exp(x).taylor_term(3, x) == x**3/6
def test_log_values():
assert log(nan) == nan
assert log(oo) == oo
assert log(-oo) == oo
assert log(zoo) == zoo
assert log(-zoo) == zoo
assert log(0) == zoo
assert log(1) == 0
assert log(-1) == I*pi
assert log(E) == 1
assert log(-E).expand() == 1 + I*pi
assert log(pi) == log(pi)
assert log(-pi).expand() == log(pi) + I*pi
assert log(17) == log(17)
assert log(-17) == log(17) + I*pi
assert log(I) == I*pi/2
assert log(-I) == -I*pi/2
assert log(17*I) == I*pi/2 + log(17)
assert log(-17*I).expand() == -I*pi/2 + log(17)
assert log(oo*I) == oo
assert log(-oo*I) == oo
assert exp(-log(3))**(-1) == 3
assert log(S.Half) == -log(2)
assert log(2*3).func is log
assert log(2*3**2).func is log
def test_log_base():
assert log(1, 2) == 0
assert log(2, 2) == 1
assert log(3, 2) == log(3)/log(2)
assert log(6, 2) == 1 + log(3)/log(2)
assert log(6, 3) == 1 + log(2)/log(3)
assert log(2**3, 2) == 3
assert log(3**3, 3) == 3
assert log(5, 1) == zoo
assert log(1, 1) == nan
assert log(Rational(2, 3), 10) == (-log(3) + log(2))/log(10)
assert log(Rational(2, 3), Rational(1, 3)) == -log(2)/log(3) + 1
assert log(Rational(2, 3), Rational(2, 5)) == \
(-log(3) + log(2))/(-log(5) + log(2))
def test_log_symbolic():
x, y = symbols('x,y')
assert log(x, exp(1)) == log(x)
assert log(exp(x)) != x
assert log(x, exp(1)) == log(x)
assert log(x*y) != log(x) + log(y)
assert log(x/y).expand() != log(x) - log(y)
assert log(x/y).expand(force=True) == log(x) - log(y)
assert log(x**y).expand() != y*log(x)
assert log(x**y).expand(force=True) == y*log(x)
assert log(x, 2) == log(x)/log(2)
assert log(E, 2) == 1/log(2)
p, q = symbols('p,q', positive=True)
r = Symbol('r', real=True)
assert log(p**2) != 2*log(p)
assert log(p**2).expand() == 2*log(p)
assert log(x**2).expand() != 2*log(x)
assert log(p**q) != q*log(p)
assert log(exp(p)) == p
assert log(p*q) != log(p) + log(q)
assert log(p*q).expand() == log(p) + log(q)
assert log(-sqrt(3)) == log(sqrt(3)) + I*pi
assert log(-exp(p)) != p + I*pi
assert log(-exp(x)).expand() != x + I*pi
assert log(-exp(r)).expand() == r + I*pi
assert log(x**y) != y*log(x)
assert (log(x**-5)**-1).expand() != -1/log(x)/5
assert (log(p**-5)**-1).expand() == -1/log(p)/5
assert log(-x).func is log and log(-x).args[0] == -x
assert log(-p).func is log and log(-p).args[0] == -p
def test_exp_assumptions():
x = Symbol('x')
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
for e in exp, exp_polar:
assert e(x).is_real is None
assert e(x).is_imaginary is None
assert e(i).is_real is None
assert e(i).is_imaginary is None
assert e(r).is_real is True
assert e(r).is_imaginary is False
assert e(re(x)).is_real is True
assert e(re(x)).is_imaginary is False
assert exp(0, evaluate=False).is_algebraic
a = Symbol('a', algebraic=True)
an = Symbol('an', algebraic=True, nonzero=True)
r = Symbol('r', rational=True)
rn = Symbol('rn', rational=True, nonzero=True)
assert exp(a).is_algebraic is None
assert exp(an).is_algebraic is False
assert exp(pi*r).is_algebraic is None
assert exp(pi*rn).is_algebraic is False
def test_log_assumptions():
p = symbols('p', positive=True)
n = symbols('n', negative=True)
z = symbols('z', zero=True)
x = symbols('x', infinite=True, positive=True)
assert log(z).is_positive is False
assert log(x).is_positive is True
assert log(2) > 0
assert log(1, evaluate=False).is_zero
assert log(1 + z).is_zero
assert log(p).is_zero is None
assert log(n).is_zero is False
assert log(0.5).is_negative is True
assert log(exp(p) + 1).is_positive
assert log(1, evaluate=False).is_algebraic
assert log(42, evaluate=False).is_algebraic is False
assert log(1 + z).is_rational
def test_log_hashing():
x = Symbol("y")
assert x != log(log(x))
assert hash(x) != hash(log(log(x)))
assert log(x) != log(log(log(x)))
e = 1/log(log(x) + log(log(x)))
assert e.base.func is log
e = 1/log(log(x) + log(log(log(x))))
assert e.base.func is log
x = Symbol("x")
e = log(log(x))
assert e.func is log
assert not x.func is log
assert hash(log(log(x))) != hash(x)
assert e != x
def test_log_sign():
assert sign(log(2)) == 1
def test_log_expand_complex():
assert log(1 + I).expand(complex=True) == log(2)/2 + I*pi/4
assert log(1 - sqrt(2)).expand(complex=True) == log(sqrt(2) - 1) + I*pi
def test_log_apply_evalf():
value = (log(3)/log(2) - 1).evalf()
assert value.epsilon_eq(Float("0.58496250072115618145373"))
def test_log_expand():
w = Symbol("w", positive=True)
e = log(w**(log(5)/log(3)))
assert e.expand() == log(5)/log(3) * log(w)
x, y, z = symbols('x,y,z', positive=True)
assert log(x*(y + z)).expand(mul=False) == log(x) + log(y + z)
assert log(log(x**2)*log(y*z)).expand() in [log(2*log(x)*log(y) +
2*log(x)*log(z)), log(log(x)*log(z) + log(y)*log(x)) + log(2),
log((log(y) + log(z))*log(x)) + log(2)]
assert log(x**log(x**2)).expand(deep=False) == log(x)*log(x**2)
assert log(x**log(x**2)).expand() == 2*log(x)**2
assert (log(x*(y + z))*(x + y)), expand(mul=True, log=True) == y*log(
x) + y*log(y + z) + z*log(x) + z*log(y + z)
x, y = symbols('x,y')
assert log(x*y).expand(force=True) == log(x) + log(y)
assert log(x**y).expand(force=True) == y*log(x)
assert log(exp(x)).expand(force=True) == x
# there's generally no need to expand out logs since this requires
# factoring and if simplification is sought, it's cheaper to put
# logs together than it is to take them apart.
assert log(2*3**2).expand() != 2*log(3) + log(2)
def test_log_simplify():
x = Symbol("x", positive=True)
assert log(x**2).expand() == 2*log(x)
assert expand_log(log(x**(2 + log(2)))) == (2 + log(2))*log(x)
def test_lambertw():
x = Symbol('x')
k = Symbol('k')
assert LambertW(x, 0) == LambertW(x)
assert LambertW(x, 0, evaluate=False) != LambertW(x)
assert LambertW(0) == 0
assert LambertW(E) == 1
assert LambertW(-1/E) == -1
assert LambertW(-log(2)/2) == -log(2)
assert LambertW(oo) == oo
assert LambertW(0, 1) == -oo
assert LambertW(0, 42) == -oo
assert LambertW(-pi/2, -1) == -I*pi/2
assert LambertW(-1/E, -1) == -1
assert LambertW(-2*exp(-2), -1) == -2
assert LambertW(x**2).diff(x) == 2*LambertW(x**2)/x/(1 + LambertW(x**2))
assert LambertW(x, k).diff(x) == LambertW(x, k)/x/(1 + LambertW(x, k))
assert LambertW(sqrt(2)).evalf(30).epsilon_eq(
Float("0.701338383413663009202120278965", 30), 1e-29)
assert re(LambertW(2, -1)).evalf().epsilon_eq(Float("-0.834310366631110"))
assert LambertW(-1).is_real is False # issue 5215
assert LambertW(2, evaluate=False).is_real
p = Symbol('p', positive=True)
assert LambertW(p, evaluate=False).is_real
assert LambertW(p - 1, evaluate=False).is_real is None
assert LambertW(-p - 2/S.Exp1, evaluate=False).is_real is False
assert LambertW(S.Half, -1, evaluate=False).is_real is False
assert LambertW(-S.One/10, -1, evaluate=False).is_real
assert LambertW(-10, -1, evaluate=False).is_real is False
assert LambertW(-2, 2, evaluate=False).is_real is False
assert LambertW(0, evaluate=False).is_algebraic
na = Symbol('na', nonzero=True, algebraic=True)
assert LambertW(na).is_algebraic is False
def test_issue_5673():
e = LambertW(-1)
assert e.is_comparable is False
assert e.is_positive is not True
e2 = 1 - 1/(1 - exp(-1000))
assert e.is_positive is not True
e3 = -2 + exp(exp(LambertW(log(2)))*LambertW(log(2)))
assert e3.is_nonzero is not True
def test_exp_expand_NC():
A, B, C = symbols('A,B,C', commutative=False)
x, y, z = symbols('x,y,z')
assert exp(A + B).expand() == exp(A + B)
assert exp(A + B + C).expand() == exp(A + B + C)
assert exp(x + y).expand() == exp(x)*exp(y)
assert exp(x + y + z).expand() == exp(x)*exp(y)*exp(z)
def test_as_numer_denom():
from sympy.abc import x
n = symbols('n', negative=True)
assert exp(x).as_numer_denom() == (exp(x), 1)
assert exp(-x).as_numer_denom() == (1, exp(x))
assert exp(-2*x).as_numer_denom() == (1, exp(2*x))
assert exp(-2).as_numer_denom() == (1, exp(2))
assert exp(n).as_numer_denom() == (1, exp(-n))
assert exp(-n).as_numer_denom() == (exp(-n), 1)
assert exp(-I*x).as_numer_denom() == (1, exp(I*x))
assert exp(-I*n).as_numer_denom() == (1, exp(I*n))
assert exp(-n).as_numer_denom() == (exp(-n), 1)
def test_polar():
x, y = symbols('x y', polar=True)
z = Symbol('z')
assert abs(exp_polar(I*4)) == 1
assert exp_polar(I*10).n() == exp_polar(I*10)
assert log(exp_polar(z)) == z
assert log(x*y).expand() == log(x) + log(y)
assert log(x**z).expand() == z*log(x)
assert exp_polar(3).exp == 3
# Compare exp(1.0*pi*I).
assert (exp_polar(1.0*pi*I).n(n=5)).as_real_imag()[1] >= 0
assert exp_polar(0).is_rational is True # issue 8008
def test_log_product():
from sympy.abc import n, m
i, j = symbols('i,j', positive=True, integer=True)
x, y = symbols('x,y', positive=True)
from sympy.concrete import Product, Sum
f, g = Function('f'), Function('g')
assert simplify(log(Product(x**i, (i, 1, n)))) == Sum(i*log(x), (i, 1, n))
assert simplify(log(Product(x**i*y**j, (i, 1, n), (j, 1, m)))) == \
log(Product(x**i*y**j, (i, 1, n), (j, 1, m)))
expr = log(Product(-2, (n, 0, 4)))
assert simplify(expr) == expr
def test_issue_8866():
x = Symbol('x')
assert simplify(log(x, 10, evaluate=False)) == simplify(log(x, 10))
assert expand_log(log(x, 10, evaluate=False)) == expand_log(log(x, 10))
y = Symbol('y', positive=True)
l1 = log(exp(y), exp(10))
b1 = log(exp(y), exp(5))
l2 = log(exp(y), exp(10), evaluate=False)
b2 = log(exp(y), exp(5), evaluate=False)
assert simplify(log(l1, b1)) == simplify(log(l2, b2))
assert expand_log(log(l1, b1)) == expand_log(log(l2, b2))
def test_issue_9116():
n = Symbol('n', positive=True, integer=True)
assert ln(n).is_nonnegative is True
assert log(n).is_nonnegative is True
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
# @author: Sridar Kandaswamy, skandasw@cisco.com, Cisco Systems, Inc.
# @author: Dan Florea, dflorea@cisco.com, Cisco Systems, Inc.
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import ip_lib
from neutron.common import topics
from neutron import context
from neutron.extensions import firewall as fw_ext
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.firewall.agents import firewall_agent_api as api
LOG = logging.getLogger(__name__)
class FWaaSL3PluginApi(api.FWaaSPluginApiMixin):
"""Agent side of the FWaaS agent to FWaaS Plugin RPC API."""
def __init__(self, topic, host):
super(FWaaSL3PluginApi, self).__init__(topic, host)
def get_firewalls_for_tenant(self, context, **kwargs):
"""Get the Firewalls with rules from the Plugin to send to driver."""
LOG.debug(_("Retrieve Firewall with rules from Plugin"))
return self.call(context,
self.make_msg('get_firewalls_for_tenant',
host=self.host),
topic=self.topic)
def get_tenants_with_firewalls(self, context, **kwargs):
"""Get all Tenants that have Firewalls configured from plugin."""
LOG.debug(_("Retrieve Tenants with Firewalls configured from Plugin"))
return self.call(context,
self.make_msg('get_tenants_with_firewalls',
host=self.host),
topic=self.topic)
class FWaaSL3AgentRpcCallback(api.FWaaSAgentRpcCallbackMixin):
"""FWaaS Agent support to be used by Neutron L3 agent."""
def __init__(self, conf):
LOG.debug(_("Initializing firewall agent"))
self.conf = conf
fwaas_driver_class_path = cfg.CONF.fwaas.driver
self.fwaas_enabled = cfg.CONF.fwaas.enabled
try:
self.fwaas_driver = importutils.import_object(
fwaas_driver_class_path)
LOG.debug(_("FWaaS Driver Loaded: '%s'"), fwaas_driver_class_path)
except ImportError:
msg = _('Error importing FWaaS device driver: %s')
raise ImportError(msg % fwaas_driver_class_path)
self.services_sync = False
self.root_helper = config.get_root_helper(conf)
# setup RPC to msg fwaas plugin
self.fwplugin_rpc = FWaaSL3PluginApi(topics.FIREWALL_PLUGIN,
conf.host)
super(FWaaSL3AgentRpcCallback, self).__init__(host=conf.host)
def _get_router_info_list_for_tenant(self, routers, tenant_id):
"""Returns the list of router info objects on which to apply the fw."""
root_ip = ip_lib.IPWrapper(self.root_helper)
# Get the routers for the tenant
router_ids = [
router['id']
for router in routers
if router['tenant_id'] == tenant_id]
local_ns_list = root_ip.get_namespaces(
self.root_helper) if self.conf.use_namespaces else []
router_info_list = []
# Pick up namespaces for Tenant Routers
for rid in router_ids:
if self.router_info[rid].use_namespaces:
router_ns = self.router_info[rid].ns_name()
if router_ns in local_ns_list:
router_info_list.append(self.router_info[rid])
else:
router_info_list.append(self.router_info[rid])
return router_info_list
def _invoke_driver_for_plugin_api(self, context, fw, func_name):
"""Invoke driver method for plugin API and provide status back."""
LOG.debug(_("%(func_name)s from agent for fw: %(fwid)s"),
{'func_name': func_name, 'fwid': fw['id']})
try:
routers = self.plugin_rpc.get_routers(context)
router_info_list = self._get_router_info_list_for_tenant(
routers,
fw['tenant_id'])
if not router_info_list:
LOG.debug(_('No Routers on tenant: %s'), fw['tenant_id'])
# fw was created before any routers were added, and if a
# delete is sent then we need to ack so that plugin can
# cleanup.
if func_name == 'delete_firewall':
self.fwplugin_rpc.firewall_deleted(context, fw['id'])
return
LOG.debug(_("Apply fw on Router List: '%s'"),
[ri.router['id'] for ri in router_info_list])
# call into the driver
try:
self.fwaas_driver.__getattribute__(func_name)(
router_info_list,
fw)
status = constants.ACTIVE
except fw_ext.FirewallInternalDriverError:
LOG.error(_("Firewall Driver Error for %(func_name)s "
"for fw: %(fwid)s"),
{'func_name': func_name, 'fwid': fw['id']})
status = constants.ERROR
# delete needs different handling
if func_name == 'delete_firewall':
if status == constants.ACTIVE:
self.fwplugin_rpc.firewall_deleted(context, fw['id'])
else:
self.fwplugin_rpc.set_firewall_status(
context,
fw['id'],
status)
except Exception:
LOG.exception(
_("FWaaS RPC failure in %(func_name)s for fw: %(fwid)s"),
{'func_name': func_name, 'fwid': fw['id']})
self.services_sync = True
return
def _invoke_driver_for_sync_from_plugin(self, ctx, router_info_list, fw):
"""Invoke the delete driver method for status of PENDING_DELETE and
update method for all other status to (re)apply on driver which is
Idempotent.
"""
if fw['status'] == constants.PENDING_DELETE:
try:
self.fwaas_driver.delete_firewall(router_info_list, fw)
self.fwplugin_rpc.firewall_deleted(
ctx,
fw['id'])
except fw_ext.FirewallInternalDriverError:
LOG.error(_("Firewall Driver Error on fw state %(fwmsg)s "
"for fw: %(fwid)s"),
{'fwmsg': fw['status'], 'fwid': fw['id']})
self.fwplugin_rpc.set_firewall_status(
ctx,
fw['id'],
constants.ERROR)
else:
# PENDING_UPDATE, PENDING_CREATE, ...
try:
self.fwaas_driver.update_firewall(router_info_list, fw)
status = constants.ACTIVE
except fw_ext.FirewallInternalDriverError:
LOG.error(_("Firewall Driver Error on fw state %(fwmsg)s "
"for fw: %(fwid)s"),
{'fwmsg': fw['status'], 'fwid': fw['id']})
status = constants.ERROR
self.fwplugin_rpc.set_firewall_status(
ctx,
fw['id'],
status)
def _process_router_add(self, ri):
"""On router add, get fw with rules from plugin and update driver."""
LOG.debug(_("Process router add, router_id: '%s'"), ri.router['id'])
routers = []
routers.append(ri.router)
router_info_list = self._get_router_info_list_for_tenant(
routers,
ri.router['tenant_id'])
if router_info_list:
# Get the firewall with rules
# for the tenant the router is on.
ctx = context.Context('', ri.router['tenant_id'])
fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx)
LOG.debug(_("Process router add, fw_list: '%s'"),
[fw['id'] for fw in fw_list])
for fw in fw_list:
self._invoke_driver_for_sync_from_plugin(
ctx,
router_info_list,
fw)
def process_router_add(self, ri):
"""On router add, get fw with rules from plugin and update driver."""
# avoid msg to plugin when fwaas is not configured
if not self.fwaas_enabled:
return
try:
self._process_router_add(ri)
except Exception:
LOG.exception(
_("FWaaS RPC info call failed for '%s'."),
ri.router['id'])
self.services_sync = True
def process_services_sync(self, ctx):
"""On RPC issues sync with plugin and apply the sync data."""
try:
# get all routers
routers = self.plugin_rpc.get_routers(ctx)
# get the list of tenants with firewalls configured
# from the plugin
tenant_ids = self.fwplugin_rpc.get_tenants_with_firewalls(ctx)
LOG.debug(_("Tenants with Firewalls: '%s'"), tenant_ids)
for tenant_id in tenant_ids:
ctx = context.Context('', tenant_id)
fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx)
if fw_list:
# if fw present on tenant
router_info_list = self._get_router_info_list_for_tenant(
routers,
tenant_id)
if router_info_list:
LOG.debug(_("Router List: '%s'"),
[ri.router['id'] for ri in router_info_list])
LOG.debug(_("fw_list: '%s'"),
[fw['id'] for fw in fw_list])
# apply sync data on fw for this tenant
for fw in fw_list:
# fw, routers present on this host for tenant
# install
LOG.debug(_("Apply fw on Router List: '%s'"),
[ri.router['id']
for ri in router_info_list])
# no need to apply sync data for ACTIVE fw
if fw['status'] != constants.ACTIVE:
self._invoke_driver_for_sync_from_plugin(
ctx,
router_info_list,
fw)
self.services_sync = False
except Exception:
LOG.exception(_("Failed fwaas process services sync"))
self.services_sync = True
def create_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to create a firewall."""
return self._invoke_driver_for_plugin_api(
context,
firewall,
'create_firewall')
def update_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to update a firewall."""
return self._invoke_driver_for_plugin_api(
context,
firewall,
'update_firewall')
def delete_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to delete a firewall."""
return self._invoke_driver_for_plugin_api(
context,
firewall,
'delete_firewall')
|
|
from django import forms
from django.core.exceptions import PermissionDenied
from django.db import router
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from django.template.response import TemplateResponse
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _, ungettext
from django.utils.text import capfirst
from xadmin.sites import site
from xadmin.util import model_format_dict, get_deleted_objects, model_ngettext
from xadmin.views import BaseAdminPlugin, ListAdminView
from xadmin.views.base import filter_hook, ModelAdminView
ACTION_CHECKBOX_NAME = '_selected_action'
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
def action_checkbox(obj):
return checkbox.render(ACTION_CHECKBOX_NAME, force_unicode(obj.pk))
action_checkbox.short_description = mark_safe(
'<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
action_checkbox.allow_export = False
action_checkbox.is_column = False
class BaseActionView(ModelAdminView):
action_name = None
description = None
icon = 'tasks'
model_perm = 'change'
@classmethod
def has_perm(cls, list_view):
return list_view.get_model_perms()[cls.model_perm]
def init_action(self, list_view):
self.list_view = list_view
self.admin_site = list_view.admin_site
@filter_hook
def do_action(self, queryset):
pass
class DeleteSelectedAction(BaseActionView):
action_name = "delete_selected"
description = _(u'Delete selected %(verbose_name_plural)s')
delete_confirmation_template = None
delete_selected_confirmation_template = None
model_perm = 'delete'
icon = 'remove'
@filter_hook
def delete_models(self, queryset):
n = queryset.count()
if n:
queryset.delete()
self.message_user(_("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(self.opts, n)
}, 'success')
@filter_hook
def do_action(self, queryset):
# Check that the user has delete permission for the actual model
if not self.has_delete_permission():
raise PermissionDenied
using = router.db_for_write(self.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, self.opts, self.user, self.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if self.request.POST.get('post'):
if perms_needed:
raise PermissionDenied
self.delete_models(queryset)
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(self.opts.verbose_name)
else:
objects_name = force_unicode(self.opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = self.get_context()
context.update({
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": self.opts,
"app_label": self.app_label,
'action_checkbox_name': ACTION_CHECKBOX_NAME,
})
# Display the confirmation page
return TemplateResponse(self.request, self.delete_selected_confirmation_template or
self.get_template_list('views/model_delete_selected_confirm.html'), context, current_app=self.admin_site.name)
class ActionPlugin(BaseAdminPlugin):
# Actions
actions = []
actions_selection_counter = True
global_actions = [DeleteSelectedAction]
def init_request(self, *args, **kwargs):
self.actions = self.get_actions()
return bool(self.actions)
def get_list_display(self, list_display):
if self.actions:
list_display.insert(0, 'action_checkbox')
self.admin_view.action_checkbox = action_checkbox
return list_display
def get_list_display_links(self, list_display_links):
if self.actions:
if len(list_display_links) == 1 and list_display_links[0] == 'action_checkbox':
return list(self.admin_view.list_display[1:2])
return list_display_links
def get_context(self, context):
if self.actions and self.admin_view.result_count:
av = self.admin_view
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', av.result_count)
new_context = {
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(av.result_list)},
'selection_note_all': selection_note_all % {'total_count': av.result_count},
'action_choices': self.get_action_choices(),
'actions_selection_counter': self.actions_selection_counter,
}
context.update(new_context)
return context
def post_response(self, response, *args, **kwargs):
request = self.admin_view.request
av = self.admin_view
# Actions with no confirmation
if self.actions and 'action' in request.POST:
action = request.POST['action']
if action not in self.actions:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
av.message_user(msg)
else:
ac, name, description, icon = self.actions[action]
select_across = request.POST.get('select_across', False) == '1'
selected = request.POST.getlist(ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
av.message_user(msg)
else:
queryset = av.list_queryset._clone()
if not select_across:
# Perform the action only on the selected objects
queryset = av.list_queryset.filter(pk__in=selected)
response = self.response_action(ac, queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(request.get_full_path())
return response
def response_action(self, ac, queryset):
if isinstance(ac, type) and issubclass(ac, BaseActionView):
action_view = self.get_model_view(ac, self.admin_view.model)
action_view.init_action(self.admin_view)
return action_view.do_action(queryset)
else:
return ac(self.admin_view, self.request, queryset)
def get_actions(self):
if self.actions is None:
return SortedDict()
actions = [self.get_action(action) for action in self.global_actions]
for klass in self.admin_view.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
if not class_actions:
continue
actions.extend(
[self.get_action(action) for action in class_actions])
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into a SortedDict keyed by name.
actions = SortedDict([
(name, (ac, name, desc, icon))
for ac, name, desc, icon in actions
])
return actions
def get_action_choices(self):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = []
for ac, name, description, icon in self.actions.itervalues():
choice = (name, description % model_format_dict(self.opts), icon)
choices.append(choice)
return choices
def get_action(self, action):
if isinstance(action, type) and issubclass(action, BaseActionView):
if not action.has_perm(self.admin_view):
return None
return action, getattr(action, 'action_name'), getattr(action, 'description'), getattr(action, 'icon')
elif callable(action):
func = action
action = action.__name__
elif hasattr(self.admin_view.__class__, action):
func = getattr(self.admin_view.__class__, action)
else:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description, getattr(func, 'icon', 'tasks')
# View Methods
def result_header(self, item, field_name, row):
if item.attr and field_name == 'action_checkbox':
item.classes.append("action-checkbox-column")
return item
def result_item(self, item, obj, field_name, row):
if item.field is None and field_name == u'action_checkbox':
item.classes.append("action-checkbox")
return item
# Media
def get_media(self, media):
if self.actions and self.admin_view.result_count:
media = media + self.vendor('xadmin.plugin.actions.js', 'xadmin.plugins.css')
return media
# Block Views
def block_results_bottom(self, context, nodes):
if self.actions and self.admin_view.result_count:
nodes.append(loader.render_to_string('xadmin/blocks/model_list.results_bottom.actions.html', context_instance=context))
site.register_plugin(ActionPlugin, ListAdminView)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.videointelligence_v1p3beta1.types import video_intelligence
from .transports.base import VideoIntelligenceServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import VideoIntelligenceServiceGrpcTransport
from .transports.grpc_asyncio import VideoIntelligenceServiceGrpcAsyncIOTransport
class VideoIntelligenceServiceClientMeta(type):
"""Metaclass for the VideoIntelligenceService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[VideoIntelligenceServiceTransport]]
_transport_registry["grpc"] = VideoIntelligenceServiceGrpcTransport
_transport_registry["grpc_asyncio"] = VideoIntelligenceServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[VideoIntelligenceServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class VideoIntelligenceServiceClient(metaclass=VideoIntelligenceServiceClientMeta):
"""Service that implements the Video Intelligence API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "videointelligence.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
VideoIntelligenceServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
VideoIntelligenceServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> VideoIntelligenceServiceTransport:
"""Returns the transport used by the client instance.
Returns:
VideoIntelligenceServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, VideoIntelligenceServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, VideoIntelligenceServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, VideoIntelligenceServiceTransport):
# transport is a VideoIntelligenceServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def annotate_video(
self,
request: Union[video_intelligence.AnnotateVideoRequest, dict] = None,
*,
input_uri: str = None,
features: Sequence[video_intelligence.Feature] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Performs asynchronous video annotation. Progress and results can
be retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains
``AnnotateVideoProgress`` (progress). ``Operation.response``
contains ``AnnotateVideoResponse`` (results).
.. code-block:: python
from google.cloud import videointelligence_v1p3beta1
def sample_annotate_video():
# Create a client
client = videointelligence_v1p3beta1.VideoIntelligenceServiceClient()
# Initialize request argument(s)
request = videointelligence_v1p3beta1.AnnotateVideoRequest(
features="PERSON_DETECTION",
)
# Make the request
operation = client.annotate_video(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.videointelligence_v1p3beta1.types.AnnotateVideoRequest, dict]):
The request object. Video annotation request.
input_uri (str):
Input video location. Currently, only `Cloud
Storage <https://cloud.google.com/storage/>`__ URIs are
supported. URIs must be specified in the following
format: ``gs://bucket-id/object-id`` (other URI formats
return
[google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]).
For more information, see `Request
URIs <https://cloud.google.com/storage/docs/request-endpoints>`__.
To identify multiple videos, a video URI may include
wildcards in the ``object-id``. Supported wildcards: '*'
to match 0 or more characters; '?' to match 1 character.
If unset, the input video should be embedded in the
request as ``input_content``. If set, ``input_content``
must be unset.
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
features (Sequence[google.cloud.videointelligence_v1p3beta1.types.Feature]):
Required. Requested video annotation
features.
This corresponds to the ``features`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.videointelligence_v1p3beta1.types.AnnotateVideoResponse` Video annotation response. Included in the response
field of the Operation returned by the GetOperation
call of the google::longrunning::Operations service.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([input_uri, features])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a video_intelligence.AnnotateVideoRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, video_intelligence.AnnotateVideoRequest):
request = video_intelligence.AnnotateVideoRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if input_uri is not None:
request.input_uri = input_uri
if features is not None:
request.features = features
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.annotate_video]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
video_intelligence.AnnotateVideoResponse,
metadata_type=video_intelligence.AnnotateVideoProgress,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-videointelligence",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("VideoIntelligenceServiceClient",)
|
|
from __future__ import absolute_import
import unittest
import caasp_nodes
from caasp_nodes import (get_expr_affected_by, get_from_args_or_with_expr,
get_replacement_for, get_with_prio)
from _utils import caasp_log
from . import Utils
try:
from mock import patch, MagicMock
except ImportError:
_mocking_lib_available = False
else:
_mocking_lib_available = True
caasp_nodes.__utils__ = Utils()
class TestGetFromArgsOrWithExpr(unittest.TestCase):
'''
Some basic tests for get_from_args_or_with_expr()
'''
def test_get_from_args_or_with_expr(self):
def do_test(**kwargs):
with patch('caasp_nodes.get_with_expr', MagicMock(return_value=[4, 5, 6])):
return get_from_args_or_with_expr(
'etcd_members', kwargs, 'G@roles:etcd')
res = do_test(etcd_members=[1, 2, 3])
self.assertEqual(res, [1, 2, 3],
'did not get the etcd members from the kwargs: {}'.format(res))
res = do_test(masters=[1, 2, 3])
self.assertEqual(res, [4, 5, 6],
'did not get the masters with the expresion: {}'.format(res))
class TestGetWithPrio(unittest.TestCase):
'''
Some basic tests for get_with_prio()
'''
def setUp(self):
self.unassigned_node_1 = 'unassigned_node_1'
self.unassigned_node_2 = 'unassigned_node_2'
self.unassigned_node_3 = 'unassigned_node_3'
@unittest.skipIf(not _mocking_lib_available,
"no mocking library available (install rpm:python-mock)")
def test_get_with_prio_for_etcd(self):
'''
Check get_with_prio() tries to get as many nodes as
we requested.
'''
from caasp_nodes import _get_prio_etcd
etcd_prio = _get_prio_etcd()
counter = {'value': 0}
def mocked_get_with_expr(expr, **kwargs):
counter['value'] += 1
return [self.unassigned_node_1]
# we return always unassigned_node_1, so we should
# exahust the priorities list looking for more nodes
with patch('caasp_nodes.get_with_expr', mocked_get_with_expr):
nodes = get_with_prio(2, 'etcd', etcd_prio)
self.assertIn(self.unassigned_node_1, nodes,
'unassigned_node_1 not found in list')
self.assertEqual(counter['value'], len(etcd_prio),
'priority list was not exahusted')
counter = {'value': 0}
def mocked_get_with_expr(expr, **kwargs):
counter['value'] += 1
return [self.unassigned_node_1, self.unassigned_node_2]
# now we will get all the nodes required in just one call
with patch('caasp_nodes.get_with_expr', mocked_get_with_expr):
nodes = get_with_prio(2, 'etcd', etcd_prio)
self.assertIn(self.unassigned_node_1, nodes,
'unassigned_node_1 not found in list')
self.assertIn(self.unassigned_node_2, nodes,
'unassigned_node_2 not found in list')
self.assertEqual(counter['value'], 1,
'unexpected number of calls ({}) to get_with_expr()'.format(counter['value']))
counter = {'value': 0}
def mocked_get_with_expr(expr, **kwargs):
counter['value'] += 1
return {1: [self.unassigned_node_1],
2: [self.unassigned_node_1, self.unassigned_node_2],
3: [self.unassigned_node_1, self.unassigned_node_2, self.unassigned_node_3]}[counter['value']]
# now we will return one more node every time we
# invoke get_with_expr()
with patch('caasp_nodes.get_with_expr',
mocked_get_with_expr):
nodes = get_with_prio(3, 'etcd', etcd_prio)
self.assertIn(self.unassigned_node_1, nodes,
'unassigned_node_1 not found in list')
self.assertIn(self.unassigned_node_2, nodes,
'unassigned_node_2 not found in list')
self.assertIn(self.unassigned_node_3, nodes,
'unassigned_node_3 not found in list')
self.assertEqual(counter['value'], 3,
'unexpected number of calls ({}) to get_with_expr()'.format(counter['value']))
class TestGetReplacementFor(unittest.TestCase):
'''
Some basic tests for get_replacement_for()
'''
def setUp(self):
self.ca = 'ca'
self.master_1 = 'master_1'
self.master_2 = 'master_2'
self.master_3 = 'master_3'
self.minion_1 = 'minion_1'
self.minion_2 = 'minion_2'
self.minion_3 = 'minion_3'
self.other_node = 'other_node'
self.masters = [self.master_1, self.master_2, self.master_3]
self.etcd_members = [self.master_1, self.master_2, self.master_3]
self.minions = [self.minion_1, self.minion_2, self.minion_3]
self.get_replacement_for_kwargs = {
'forbidden': [self.ca],
'etcd_members': self.etcd_members,
'masters': self.masters,
'minions': self.minions,
'booted_etcd_members': self.etcd_members,
'booted_masters': self.masters,
'booted_minions': self.minions
}
def test_user_provided_for_etc_master(self):
'''
Check the user-provided etcd & master replacement is valid,
at least for some roles
'''
replacement, roles = get_replacement_for(self.master_2,
replacement=self.other_node,
**self.get_replacement_for_kwargs)
self.assertEqual(replacement, self.other_node,
'unexpected replacement')
self.assertIn('etcd', roles,
'etcd role not found in replacement')
self.assertIn('kube-master', roles,
'kube-master role not found in replacement')
self.assertNotIn('kube-minion', roles,
'kube-minion role found in replacement')
def test_user_provided_for_etcd_minion(self):
'''
Check the user-provided etcd & minion replacement is valid,
at least for some roles
'''
kwargs = self.get_replacement_for_kwargs
# add one of the minions to the etcd cluster
etcd_members = [self.master_1, self.master_2, self.minion_1]
kwargs.update({
'etcd_members': etcd_members,
'booted_etcd_members': etcd_members,
})
replacement, roles = get_replacement_for(self.minion_1,
replacement=self.other_node,
**kwargs)
# when removing minion_1 (with roles minion and etcd), we can migrate
# both roles to a free node
self.assertEqual(replacement, self.other_node,
'unexpected replacement')
self.assertIn('etcd', roles,
'etcd role not found in replacement')
self.assertNotIn('kube-master', roles,
'kube-master role found in replacement')
self.assertIn('kube-minion', roles,
'kube-minion role not found in replacement')
# however, we can migrate only the etcd role to another minion
# (as it is a user provided replacement, it will raise an exception)
with self.assertRaises(caasp_log.ExecutionAborted):
replacement, roles = get_replacement_for(self.minion_1,
replacement=self.minion_3,
**kwargs)
def test_user_provided_for_minion(self):
'''
Check the user-provided minion replacement is valid,
at least for some roles
'''
replacement, roles = get_replacement_for(self.minion_3,
replacement=self.other_node,
**self.get_replacement_for_kwargs)
# the minion role should be migrated to other_node
self.assertEqual(replacement, self.other_node,
'unexpected replacement')
self.assertNotIn('etcd', roles,
'etcd role found in replacement')
self.assertNotIn('kube-master', roles,
'kube-master role found in replacement')
self.assertIn('kube-minion', roles,
'kube-minion role not found in replacement')
# check we cannot use an excluded node
with self.assertRaises(caasp_log.ExecutionAborted):
replacement, roles = get_replacement_for(self.minion_1,
replacement=self.minion_3,
excluded=[self.minion_3],
**self.get_replacement_for_kwargs)
def test_invalid_etcd_replacement(self):
'''
Check get_replacement_for() realizes a minion
is not a valid replacement for a master & etcd.
'''
# the master role cannot be migrated to a minion
with self.assertRaises(caasp_log.ExecutionAborted):
replacement, roles = get_replacement_for(self.master_2,
replacement=self.minion_3,
**self.get_replacement_for_kwargs)
def test_forbidden_replacement(self):
'''
Check get_replacement_for() realizes the CA
is not a valid replacement.
'''
# the master role cannot be migrated to a CA
with self.assertRaises(caasp_log.ExecutionAborted):
replacement, roles = get_replacement_for(self.master_2,
replacement=self.ca,
**self.get_replacement_for_kwargs)
def test_forbidden_target(self):
'''
Check get_replacement_for() realizes the CA
cannot be removed
'''
with self.assertRaises(caasp_log.ExecutionAborted):
replacement, roles = get_replacement_for(self.ca,
replacement=self.minion_3,
**self.get_replacement_for_kwargs)
@unittest.skipIf(not _mocking_lib_available,
"no mocking library available (install rpm:python-mock)")
def test_auto_etcd_replacement(self):
'''
Check we can get a replacement for a master, migrating all the
roles we can to that replacement...
'''
with patch('caasp_nodes._get_one_for_role', MagicMock(return_value=self.other_node)):
replacement, roles = get_replacement_for(self.master_2,
**self.get_replacement_for_kwargs)
# we can migrate both the master and the etcd role to a empty node
self.assertEqual(replacement, self.other_node,
'unexpected replacement')
self.assertIn('etcd', roles,
'etcd role not found in replacement')
self.assertIn('kube-master', roles,
'kube-master role not found in replacement')
self.assertNotIn('kube-minion', roles,
'kube-minion role found in replacement')
with patch('caasp_nodes._get_one_for_role', MagicMock(return_value=self.minion_1)):
replacement, roles = get_replacement_for(self.master_2,
**self.get_replacement_for_kwargs)
# we can only migrate the etcd role (and not the master role) to a minion
self.assertEqual(replacement, self.minion_1,
'unexpected replacement')
self.assertIn('etcd', roles,
'etcd role not found in replacement')
self.assertNotIn('kube-master', roles,
'kube-master role not found in replacement')
with patch('caasp_nodes._get_one_for_role', MagicMock(return_value=self.master_3)):
replacement, roles = get_replacement_for(self.master_2,
**self.get_replacement_for_kwargs)
# we can not migrate master_2 to master_3: it is already a master and a etcd server
self.assertEqual(replacement, '',
'unexpected replacement ' + replacement)
class TestGetExprAffectedBy(unittest.TestCase):
'''
Some basic tests for get_expr_affected_by()
'''
def setUp(self):
self.ca = 'ca'
self.master_1 = 'master_1'
self.master_2 = 'master_2'
self.master_3 = 'master_3'
self.minion_1 = 'minion_1'
self.minion_2 = 'minion_2'
self.minion_3 = 'minion_3'
self.other_node = 'other_node'
self.only_etcd_1 = 'only_etcd_1'
self.masters = [self.master_1, self.master_2, self.master_3]
self.etcd_members = [self.master_1, self.master_2,
self.master_3, self.only_etcd_1]
self.minions = [self.minion_1, self.minion_2, self.minion_3]
self.common_expected_affected_matches = [
'G@bootstrap_complete:true',
'not G@bootstrap_in_progress:true',
'not G@update_in_progress:true',
'not G@node_removal_in_progress:true',
'not G@node_addition_in_progress:true'
]
def test_get_expr_affected_by_master_removal(self):
'''
Calculate the exporession for matching nodes affected by
a master (k8s master & etcd) node removal
'''
affected_expr = get_expr_affected_by(self.master_1,
masters=self.masters,
minions=self.minions,
etcd_members=self.etcd_members)
affected_items = affected_expr.split(' and ')
expected_matches = self.common_expected_affected_matches + [
'P@roles:(admin|etcd|kube-master|kube-minion)',
'not L@master_1',
]
for expr in expected_matches:
self.assertIn(expr, affected_items,
'{} is not in affected in expr: {}'.format(expr, affected_expr))
def test_get_expr_affected_by_etcd_removal(self):
'''
Calculate the expression for matching nodes affected by
a etcd-only node removal
'''
affected_expr = get_expr_affected_by(self.only_etcd_1,
masters=self.masters,
minions=self.minions,
etcd_members=self.etcd_members)
affected_items = affected_expr.split(' and ')
expected_matches = self.common_expected_affected_matches + [
'P@roles:(etcd|kube-master)',
'not L@only_etcd_1']
for expr in expected_matches:
self.assertIn(expr, affected_items,
'{} is not in affected in expr: {}'.format(expr, affected_expr))
def test_get_expr_affected_by_etcd_removal_with_excluded(self):
'''
Same test, but with some excluded node
'''
affected_expr = get_expr_affected_by(self.only_etcd_1,
excluded=[self.master_2],
masters=self.masters,
minions=self.minions,
etcd_members=self.etcd_members)
affected_items = affected_expr.split(' and ')
expected_matches = self.common_expected_affected_matches + [
'P@roles:(etcd|kube-master)',
'not L@master_2,only_etcd_1']
for expr in expected_matches:
self.assertIn(expr, affected_items,
'{} is not in affected in expr: {}'.format(expr, affected_expr))
if __name__ == '__main__':
unittest.main()
|
|
"""
****
GEXF
****
Read and write graphs in GEXF format.
GEXF (Graph Exchange XML Format) is a language for describing complex
network structures, their associated data and dynamics.
This implementation does not support mixed graphs (directed and
unidirected edges together).
Format
------
GEXF is an XML format. See http://gexf.net/format/schema.html for the
specification and http://gexf.net/format/basic.html for examples.
"""
# Based on GraphML NetworkX GraphML reader
import itertools
import networkx as nx
from networkx.utils import open_file, make_str
try:
from xml.etree.cElementTree import Element, ElementTree, tostring
except ImportError:
try:
from xml.etree.ElementTree import Element, ElementTree, tostring
except ImportError:
pass
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)'])
__all__ = ['write_gexf', 'read_gexf', 'relabel_gexf_graph', 'generate_gexf']
@open_file(1,mode='wb')
def write_gexf(G, path, encoding='utf-8',prettyprint=True,version='1.1draft'):
"""Write G in GEXF format to path.
"GEXF (Graph Exchange XML Format) is a language for describing
complex networks structures, their associated data and dynamics" [1]_.
Parameters
----------
G : graph
A NetworkX graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_gexf(G, "test.gexf")
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together).
The node id attribute is set to be the string of the node label.
If you want to specify an id use set it as node data, e.g.
node['a']['id']=1 to set the id of node 'a' to 1.
References
----------
.. [1] GEXF graph format, http://gexf.net/format/
"""
writer = GEXFWriter(encoding=encoding,prettyprint=prettyprint,
version=version)
writer.add_graph(G)
writer.write(path)
def generate_gexf(G, encoding='utf-8',prettyprint=True,version='1.1draft'):
"""Generate lines of GEXF format representation of G"
"GEXF (Graph Exchange XML Format) is a language for describing
complex networks structures, their associated data and dynamics" [1]_.
Parameters
----------
G : graph
A NetworkX graph
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> linefeed=chr(10) # linefeed=\n
>>> s=linefeed.join(nx.generate_gexf(G)) # doctest: +SKIP
>>> for line in nx.generate_gexf(G): # doctest: +SKIP
... print line
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together).
The node id attribute is set to be the string of the node label.
If you want to specify an id use set it as node data, e.g.
node['a']['id']=1 to set the id of node 'a' to 1.
References
----------
.. [1] GEXF graph format, http://gexf.net/format/
"""
writer = GEXFWriter(encoding=encoding,prettyprint=prettyprint,
version=version)
writer.add_graph(G)
for line in str(writer).splitlines():
yield line
@open_file(0,mode='rb')
def read_gexf(path,node_type=None,relabel=False,version='1.1draft'):
"""Read graph in GEXF format from path.
"GEXF (Graph Exchange XML Format) is a language for describing
complex networks structures, their associated data and dynamics" [1]_.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
node_type: Python type (default: None)
Convert node ids to this type if not None.
relabel : bool (default: False)
If True relabel the nodes to use the GEXF node "label" attribute
instead of the node "id" attribute as the NetworkX node label.
Returns
-------
graph: NetworkX graph
If no parallel edges are found a Graph or DiGraph is returned.
Otherwise a MultiGraph or MultiDiGraph is returned.
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together).
References
----------
.. [1] GEXF graph format, http://gexf.net/format/
"""
reader = GEXFReader(node_type=node_type,version=version)
if relabel:
G=relabel_gexf_graph(reader(path))
else:
G=reader(path)
return G
class GEXF(object):
# global register_namespace
versions={}
d={'NS_GEXF':"http://www.gexf.net/1.1draft",
'NS_VIZ':"http://www.gexf.net/1.1draft/viz",
'NS_XSI':"http://www.w3.org/2001/XMLSchema-instance",
'SCHEMALOCATION':' '.join(['http://www.gexf.net/1.1draft',
'http://www.gexf.net/1.1draft/gexf.xsd'
]),
'VERSION':'1.1'
}
versions['1.1draft']=d
d={'NS_GEXF':"http://www.gexf.net/1.2draft",
'NS_VIZ':"http://www.gexf.net/1.2draft/viz",
'NS_XSI':"http://www.w3.org/2001/XMLSchema-instance",
'SCHEMALOCATION':' '.join(['http://www.gexf.net/1.2draft',
'http://www.gexf.net/1.2draft/gexf.xsd'
]),
'VERSION':'1.2'
}
versions['1.2draft']=d
types=[(int,"integer"),
(float,"float"),
(float,"double"),
(bool,"boolean"),
(list,"string"),
(dict,"string"),
]
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
types.extend([
(str,"liststring"),
(str,"anyURI"),
(str,"string")])
except ValueError: # Python 2.6+
types.extend([
(str,"liststring"),
(str,"anyURI"),
(str,"string"),
(unicode,"liststring"),
(unicode,"anyURI"),
(unicode,"string")])
xml_type = dict(types)
python_type = dict(reversed(a) for a in types)
convert_bool={'true':True,'false':False}
# try:
# register_namespace = ET.register_namespace
# except AttributeError:
# def register_namespace(prefix, uri):
# ET._namespace_map[uri] = prefix
def set_version(self,version):
d=self.versions.get(version)
if d is None:
raise nx.NetworkXError('Unknown GEXF version %s'%version)
self.NS_GEXF = d['NS_GEXF']
self.NS_VIZ = d['NS_VIZ']
self.NS_XSI = d['NS_XSI']
self.SCHEMALOCATION = d['NS_XSI']
self.VERSION=d['VERSION']
self.version=version
# register_namespace('viz', d['NS_VIZ'])
class GEXFWriter(GEXF):
# class for writing GEXF format files
# use write_gexf() function
def __init__(self, graph=None, encoding="utf-8",
mode='static',prettyprint=True,
version='1.1draft'):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GEXF writer requires '
'xml.elementtree.ElementTree')
self.prettyprint=prettyprint
self.mode=mode
self.encoding = encoding
self.set_version(version)
self.xml = Element("gexf",
{'xmlns':self.NS_GEXF,
'xmlns:xsi':self.NS_XSI,
'xmlns:viz':self.NS_VIZ,
'xsi:schemaLocation':self.SCHEMALOCATION,
'version':self.VERSION})
# counters for edge and attribute identifiers
self.edge_id=itertools.count()
self.attr_id=itertools.count()
# default attributes are stored in dictionaries
self.attr={}
self.attr['node']={}
self.attr['edge']={}
self.attr['node']['dynamic']={}
self.attr['node']['static']={}
self.attr['edge']['dynamic']={}
self.attr['edge']['static']={}
if graph is not None:
self.add_graph(graph)
def __str__(self):
if self.prettyprint:
self.indent(self.xml)
s=tostring(self.xml).decode(self.encoding)
return s
def add_graph(self, G):
# Add a graph element to the XML
if G.is_directed():
default='directed'
else:
default='undirected'
graph_element = Element("graph",defaultedgetype=default,mode=self.mode)
self.graph_element=graph_element
self.add_nodes(G,graph_element)
self.add_edges(G,graph_element)
self.xml.append(graph_element)
def add_nodes(self, G, graph_element):
nodes_element = Element('nodes')
for node,data in G.nodes_iter(data=True):
node_data=data.copy()
node_id = make_str(node_data.pop('id', node))
kw={'id':node_id}
label = make_str(node_data.pop('label', node))
kw['label']=label
try:
pid=node_data.pop('pid')
kw['pid'] = make_str(pid)
except KeyError:
pass
# add node element with attributes
node_element = Element("node", **kw)
# add node element and attr subelements
default=G.graph.get('node_default',{})
node_data=self.add_parents(node_element, node_data)
if self.version=='1.1':
node_data=self.add_slices(node_element, node_data)
else:
node_data=self.add_spells(node_element, node_data)
node_data=self.add_viz(node_element,node_data)
node_data=self.add_attributes("node", node_element,
node_data, default)
nodes_element.append(node_element)
graph_element.append(nodes_element)
def add_edges(self, G, graph_element):
def edge_key_data(G):
# helper function to unify multigraph and graph edge iterator
if G.is_multigraph():
for u,v,key,data in G.edges_iter(data=True,keys=True):
edge_data=data.copy()
edge_data.update(key=key)
edge_id=edge_data.pop('id',None)
if edge_id is None:
edge_id=next(self.edge_id)
yield u,v,edge_id,edge_data
else:
for u,v,data in G.edges_iter(data=True):
edge_data=data.copy()
edge_id=edge_data.pop('id',None)
if edge_id is None:
edge_id=next(self.edge_id)
yield u,v,edge_id,edge_data
edges_element = Element('edges')
for u,v,key,edge_data in edge_key_data(G):
kw={'id':make_str(key)}
try:
edge_weight=edge_data.pop('weight')
kw['weight']=make_str(edge_weight)
except KeyError:
pass
try:
edge_type=edge_data.pop('type')
kw['type']=make_str(edge_type)
except KeyError:
pass
edge_element = Element("edge",
source=make_str(u),target=make_str(v),
**kw)
default=G.graph.get('edge_default',{})
edge_data=self.add_viz(edge_element,edge_data)
edge_data=self.add_attributes("edge", edge_element,
edge_data, default)
edges_element.append(edge_element)
graph_element.append(edges_element)
def add_attributes(self, node_or_edge, xml_obj, data, default):
# Add attrvalues to node or edge
attvalues=Element('attvalues')
if len(data)==0:
return data
if 'start' in data or 'end' in data:
mode='dynamic'
else:
mode='static'
for k,v in data.items():
# rename generic multigraph key to avoid any name conflict
if k == 'key':
k='networkx_key'
attr_id = self.get_attr_id(make_str(k), self.xml_type[type(v)],
node_or_edge, default, mode)
if type(v)==list:
# dynamic data
for val,start,end in v:
e=Element("attvalue")
e.attrib['for']=attr_id
e.attrib['value']=make_str(val)
if start is not None:
e.attrib['start']=make_str(start)
if end is not None:
e.attrib['end']=make_str(end)
attvalues.append(e)
else:
# static data
e=Element("attvalue")
e.attrib['for']=attr_id
e.attrib['value']=make_str(v)
attvalues.append(e)
xml_obj.append(attvalues)
return data
def get_attr_id(self, title, attr_type, edge_or_node, default, mode):
# find the id of the attribute or generate a new id
try:
return self.attr[edge_or_node][mode][title]
except KeyError:
# generate new id
new_id=str(next(self.attr_id))
self.attr[edge_or_node][mode][title] = new_id
attr_kwargs = {"id":new_id, "title":title, "type":attr_type}
attribute=Element("attribute",**attr_kwargs)
# add subelement for data default value if present
default_title=default.get(title)
if default_title is not None:
default_element=Element("default")
default_element.text=make_str(default_title)
attribute.append(default_element)
# new insert it into the XML
attributes_element=None
for a in self.graph_element.findall("attributes"):
# find existing attributes element by class and mode
a_class=a.get('class')
a_mode=a.get('mode','static') # default mode is static
if a_class==edge_or_node and a_mode==mode:
attributes_element=a
if attributes_element is None:
# create new attributes element
attr_kwargs = {"mode":mode,"class":edge_or_node}
attributes_element=Element('attributes', **attr_kwargs)
self.graph_element.insert(0,attributes_element)
attributes_element.append(attribute)
return new_id
def add_viz(self,element,node_data):
viz=node_data.pop('viz',False)
if viz:
color=viz.get('color')
if color is not None:
if self.VERSION=='1.1':
e=Element("{%s}color"%self.NS_VIZ,
r=str(color.get('r')),
g=str(color.get('g')),
b=str(color.get('b')),
)
else:
e=Element("{%s}color"%self.NS_VIZ,
r=str(color.get('r')),
g=str(color.get('g')),
b=str(color.get('b')),
a=str(color.get('a')),
)
element.append(e)
size=viz.get('size')
if size is not None:
e=Element("{%s}size"%self.NS_VIZ,value=str(size))
element.append(e)
thickness=viz.get('thickness')
if thickness is not None:
e=Element("{%s}thickness"%self.NS_VIZ,value=str(thickness))
element.append(e)
shape=viz.get('shape')
if shape is not None:
if shape.startswith('http'):
e=Element("{%s}shape"%self.NS_VIZ,
value='image',uri=str(shape))
else:
e=Element("{%s}shape"%self.NS_VIZ,value=str(shape.get))
element.append(e)
position=viz.get('position')
if position is not None:
e=Element("{%s}position"%self.NS_VIZ,
x=str(position.get('x')),
y=str(position.get('y')),
z=str(position.get('z')),
)
element.append(e)
return node_data
def add_parents(self,node_element,node_data):
parents=node_data.pop('parents',False)
if parents:
parents_element=Element('parents')
for p in parents:
e=Element('parent')
e.attrib['for']=str(p)
parents_element.append(e)
node_element.append(parents_element)
return node_data
def add_slices(self,node_element,node_data):
slices=node_data.pop('slices',False)
if slices:
slices_element=Element('slices')
for start,end in slices:
e=Element('slice',start=str(start),end=str(end))
slices_element.append(e)
node_element.append(slices_element)
return node_data
def add_spells(self,node_element,node_data):
spells=node_data.pop('spells',False)
if spells:
spells_element=Element('spells')
for start,end in spells:
e=Element('spell')
if start is not None:
e.attrib['start']=make_str(start)
if end is not None:
e.attrib['end']=make_str(end)
spells_element.append(e)
node_element.append(spells_element)
return node_data
def write(self, fh):
# Serialize graph G in GEXF to the open fh
if self.prettyprint:
self.indent(self.xml)
document = ElementTree(self.xml)
header='<?xml version="1.0" encoding="%s"?>'%self.encoding
fh.write(header.encode(self.encoding))
document.write(fh, encoding=self.encoding)
def indent(self, elem, level=0):
# in-place prettyprint formatter
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class GEXFReader(GEXF):
# Class to read GEXF format files
# use read_gexf() function
def __init__(self, node_type=None,version='1.1draft'):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GEXF reader requires '
'xml.elementtree.ElementTree')
self.node_type=node_type
# assume simple graph and test for multigraph on read
self.simple_graph=True
self.set_version(version)
def __call__(self, stream):
self.xml = ElementTree(file=stream)
g=self.xml.find("{%s}graph" % self.NS_GEXF)
if g is not None:
return self.make_graph(g)
# try all the versions
for version in self.versions:
self.set_version(version)
g=self.xml.find("{%s}graph" % self.NS_GEXF)
if g is not None:
return self.make_graph(g)
raise nx.NetworkXError("No <graph> element in GEXF file")
def make_graph(self, graph_xml):
# mode is "static" or "dynamic"
graph_mode = graph_xml.get("mode", "")
self.dynamic=(graph_mode=='dynamic')
# start with empty DiGraph or MultiDiGraph
edgedefault = graph_xml.get("defaultedgetype", None)
if edgedefault=='directed':
G=nx.MultiDiGraph()
else:
G=nx.MultiGraph()
# graph attributes
graph_start=graph_xml.get('start')
if graph_start is not None:
G.graph['start']=graph_start
graph_end=graph_xml.get('end')
if graph_end is not None:
G.graph['end']=graph_end
# node and edge attributes
attributes_elements=graph_xml.findall("{%s}attributes"%self.NS_GEXF)
# dictionaries to hold attributes and attribute defaults
node_attr={}
node_default={}
edge_attr={}
edge_default={}
for a in attributes_elements:
attr_class = a.get("class")
if attr_class=='node':
na,nd = self.find_gexf_attributes(a)
node_attr.update(na)
node_default.update(nd)
G.graph['node_default']=node_default
elif attr_class=='edge':
ea,ed = self.find_gexf_attributes(a)
edge_attr.update(ea)
edge_default.update(ed)
G.graph['edge_default']=edge_default
else:
raise # unknown attribute class
# Hack to handle Gephi0.7beta bug
# add weight attribute
ea={'weight':{'type': 'double', 'mode': 'static', 'title': 'weight'}}
ed={}
edge_attr.update(ea)
edge_default.update(ed)
G.graph['edge_default']=edge_default
# add nodes
nodes_element=graph_xml.find("{%s}nodes" % self.NS_GEXF)
if nodes_element is not None:
for node_xml in nodes_element.findall("{%s}node" % self.NS_GEXF):
self.add_node(G, node_xml, node_attr)
# add edges
edges_element=graph_xml.find("{%s}edges" % self.NS_GEXF)
if edges_element is not None:
for edge_xml in edges_element.findall("{%s}edge" % self.NS_GEXF):
self.add_edge(G, edge_xml, edge_attr)
# switch to Graph or DiGraph if no parallel edges were found.
if self.simple_graph:
if G.is_directed():
G=nx.DiGraph(G)
else:
G=nx.Graph(G)
return G
def add_node(self, G, node_xml, node_attr, node_pid=None):
# add a single node with attributes to the graph
# get attributes and subattributues for node
data = self.decode_attr_elements(node_attr, node_xml)
data = self.add_parents(data, node_xml) # add any parents
if self.version=='1.1':
data = self.add_slices(data, node_xml) # add slices
else:
data = self.add_spells(data, node_xml) # add spells
data = self.add_viz(data, node_xml) # add viz
data = self.add_start_end(data, node_xml) # add start/end
# find the node id and cast it to the appropriate type
node_id = node_xml.get("id")
if self.node_type is not None:
node_id=self.node_type(node_id)
# every node should have a label
node_label = node_xml.get("label")
data['label']=node_label
# parent node id
node_pid = node_xml.get("pid", node_pid)
if node_pid is not None:
data['pid']=node_pid
# check for subnodes, recursive
subnodes=node_xml.find("{%s}nodes" % self.NS_GEXF)
if subnodes is not None:
for node_xml in subnodes.findall("{%s}node" % self.NS_GEXF):
self.add_node(G, node_xml, node_attr, node_pid=node_id)
G.add_node(node_id, data)
def add_start_end(self, data, xml):
# start and end times
node_start = xml.get("start")
if node_start is not None:
data['start']=node_start
node_end = xml.get("end")
if node_end is not None:
data['end']=node_end
return data
def add_viz(self, data, node_xml):
# add viz element for node
viz={}
color=node_xml.find("{%s}color"%self.NS_VIZ)
if color is not None:
if self.VERSION=='1.1':
viz['color']={'r':int(color.get('r')),
'g':int(color.get('g')),
'b':int(color.get('b'))}
else:
viz['color']={'r':int(color.get('r')),
'g':int(color.get('g')),
'b':int(color.get('b')),
'a':float(color.get('a', 1)),
}
size=node_xml.find("{%s}size"%self.NS_VIZ)
if size is not None:
viz['size']=float(size.get('value'))
thickness=node_xml.find("{%s}thickness"%self.NS_VIZ)
if thickness is not None:
viz['thickness']=float(thickness.get('value'))
shape=node_xml.find("{%s}shape"%self.NS_VIZ)
if shape is not None:
viz['shape']=shape.get('shape')
if viz['shape']=='image':
viz['shape']=shape.get('uri')
position=node_xml.find("{%s}position"%self.NS_VIZ)
if position is not None:
viz['position']={'x':float(position.get('x',0)),
'y':float(position.get('y',0)),
'z':float(position.get('z',0))}
if len(viz)>0:
data['viz']=viz
return data
def add_parents(self, data, node_xml):
parents_element=node_xml.find("{%s}parents"%self.NS_GEXF)
if parents_element is not None:
data['parents']=[]
for p in parents_element.findall("{%s}parent"%self.NS_GEXF):
parent=p.get('for')
data['parents'].append(parent)
return data
def add_slices(self, data, node_xml):
slices_element=node_xml.find("{%s}slices"%self.NS_GEXF)
if slices_element is not None:
data['slices']=[]
for s in slices_element.findall("{%s}slice"%self.NS_GEXF):
start=s.get('start')
end=s.get('end')
data['slices'].append((start,end))
return data
def add_spells(self, data, node_xml):
spells_element=node_xml.find("{%s}spells"%self.NS_GEXF)
if spells_element is not None:
data['spells']=[]
for s in spells_element.findall("{%s}spell"%self.NS_GEXF):
start=s.get('start')
end=s.get('end')
data['spells'].append((start,end))
return data
def add_edge(self, G, edge_element, edge_attr):
# add an edge to the graph
# raise error if we find mixed directed and undirected edges
edge_direction = edge_element.get("type")
if G.is_directed() and edge_direction=='undirected':
raise nx.NetworkXError(\
"Undirected edge found in directed graph.")
if (not G.is_directed()) and edge_direction=='directed':
raise nx.NetworkXError(\
"Directed edge found in undirected graph.")
# Get source and target and recast type if required
source = edge_element.get("source")
target = edge_element.get("target")
if self.node_type is not None:
source=self.node_type(source)
target=self.node_type(target)
data = self.decode_attr_elements(edge_attr, edge_element)
data = self.add_start_end(data,edge_element)
# GEXF stores edge ids as an attribute
# NetworkX uses them as keys in multigraphs
# if networkx_key is not specified as an attribute
edge_id = edge_element.get("id")
if edge_id is not None:
data["id"] = edge_id
# check if there is a 'multigraph_key' and use that as edge_id
multigraph_key = data.pop('networkx_key',None)
if multigraph_key is not None:
edge_id=multigraph_key
weight = edge_element.get('weight')
if weight is not None:
data['weight']=float(weight)
edge_label = edge_element.get("label")
if edge_label is not None:
data['label']=edge_label
if G.has_edge(source,target):
# seen this edge before - this is a multigraph
self.simple_graph=False
G.add_edge(source, target, key=edge_id, **data)
if edge_direction=='mutual':
G.add_edge(target, source, key=edge_id, **data)
def decode_attr_elements(self, gexf_keys, obj_xml):
# Use the key information to decode the attr XML
attr = {}
# look for outer "<attvalues>" element
attr_element=obj_xml.find("{%s}attvalues" % self.NS_GEXF)
if attr_element is not None:
# loop over <attvalue> elements
for a in attr_element.findall("{%s}attvalue" % self.NS_GEXF):
key = a.get('for') # for is required
try: # should be in our gexf_keys dictionary
title=gexf_keys[key]['title']
except KeyError:
raise nx.NetworkXError("No attribute defined for=%s"%key)
atype=gexf_keys[key]['type']
value=a.get('value')
if atype=='boolean':
value=self.convert_bool[value]
else:
value=self.python_type[atype](value)
if gexf_keys[key]['mode']=='dynamic':
# for dynamic graphs use list of three-tuples
# [(value1,start1,end1), (value2,start2,end2), etc]
start=a.get('start')
end=a.get('end')
if title in attr:
attr[title].append((value,start,end))
else:
attr[title]=[(value,start,end)]
else:
# for static graphs just assign the value
attr[title] = value
return attr
def find_gexf_attributes(self, attributes_element):
# Extract all the attributes and defaults
attrs = {}
defaults = {}
mode=attributes_element.get('mode')
for k in attributes_element.findall("{%s}attribute" % self.NS_GEXF):
attr_id = k.get("id")
title=k.get('title')
atype=k.get('type')
attrs[attr_id]={'title':title,'type':atype,'mode':mode}
# check for the "default" subelement of key element and add
default=k.find("{%s}default" % self.NS_GEXF)
if default is not None:
if atype=='boolean':
value=self.convert_bool[default.text]
else:
value=self.python_type[atype](default.text)
defaults[title]=value
return attrs,defaults
def relabel_gexf_graph(G):
"""Relabel graph using "label" node keyword for node label.
Parameters
----------
G : graph
A NetworkX graph read from GEXF data
Returns
-------
H : graph
A NetworkX graph with relabed nodes
Notes
-----
This function relabels the nodes in a NetworkX graph with the
"label" attribute. It also handles relabeling the specific GEXF
node attributes "parents", and "pid".
"""
# build mapping of node labels, do some error checking
try:
mapping=[(u,G.node[u]['label']) for u in G]
except KeyError:
raise nx.NetworkXError('Failed to relabel nodes: '
'missing node labels found. '
'Use relabel=False.')
x,y=zip(*mapping)
if len(set(y))!=len(G):
raise nx.NetworkXError('Failed to relabel nodes: '
'duplicate node labels found. '
'Use relabel=False.')
mapping=dict(mapping)
H=nx.relabel_nodes(G,mapping)
# relabel attributes
for n in G:
m=mapping[n]
H.node[m]['id']=n
if 'pid' in H.node[m]:
H.node[m]['pid']=mapping[G.node[n]['pid']]
if 'parents' in H.node[m]:
H.node[m]['parents']=[mapping[p] for p in G.node[n]['parents']]
return H
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import xml.etree.cElementTree
except:
raise SkipTest("xml.etree.cElementTree not available")
# fixture for nose tests
def teardown_module(module):
import os
try:
os.unlink('test.gexf')
except:
pass
|
|
#!/usr/bin/env python
"""html2text: Turn HTML into equivalent Markdown-structured text."""
__version__ = "2.37"
__author__ = "Aaron Swartz (me@aaronsw.com)"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
# TODO:
# Support decoded entities with unifiable.
if not hasattr(__builtins__, 'True'): True, False = 1, 0
import re, sys, urllib, htmlentitydefs, codecs, StringIO, types
import sgmllib
import urlparse
sgmllib.charref = re.compile('&#([xX]?[0-9a-fA-F]+)[^0-9a-fA-F]')
try: from textwrap import wrap
except: pass
# Use Unicode characters instead of their ascii psuedo-replacements
UNICODE_SNOB = 0
# Put the links after each paragraph instead of at the end.
LINKS_EACH_PARAGRAPH = 0
# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
BODY_WIDTH = 78
# Don't show internal links (href="#local-anchor") -- corresponding link targets
# won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = False
### Entity Nonsense ###
def name2cp(k):
if k == 'apos': return ord("'")
if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
else:
k = htmlentitydefs.entitydefs[k]
if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
return ord(codecs.latin_1_decode(k)[0])
unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
'ndash':'-', 'oelig':'oe', 'aelig':'ae',
'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u'}
unifiable_n = {}
for k in unifiable.keys():
unifiable_n[name2cp(k)] = unifiable[k]
def charref(name):
if name[0] in ['x','X']:
c = int(name[1:], 16)
else:
c = int(name)
if not UNICODE_SNOB and c in unifiable_n.keys():
return unifiable_n[c]
else:
return unichr(c)
def entityref(c):
if not UNICODE_SNOB and c in unifiable.keys():
return unifiable[c]
else:
try: name2cp(c)
except KeyError: return "&" + c
else: return unichr(name2cp(c))
def replaceEntities(s):
s = s.group(1)
if s[0] == "#":
return charref(s[1:])
else: return entityref(s)
r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape(s):
return r_unescape.sub(replaceEntities, s)
def fixattrs(attrs):
# Fix bug in sgmllib.py
if not attrs: return attrs
newattrs = []
for attr in attrs:
newattrs.append((attr[0], unescape(attr[1])))
return newattrs
### End Entity Nonsense ###
def onlywhite(line):
"""Return true if the line does only consist of whitespace characters."""
for c in line:
if c is not ' ' and c is not ' ':
return c is ' '
return line
def optwrap(text):
"""Wrap all paragraphs in the provided text."""
if not BODY_WIDTH:
return text
assert wrap, "Requires Python 2.3."
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if para[0] is not ' ' and para[0] is not '-' and para[0] is not '*':
for line in wrap(para, BODY_WIDTH):
result += line + "\n"
result += "\n"
newlines = 2
else:
if not onlywhite(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result
def hn(tag):
if tag[0] == 'h' and len(tag) == 2:
try:
n = int(tag[1])
if n in range(1, 10): return n
except ValueError: return 0
class _html2text(sgmllib.SGMLParser):
def __init__(self, out=None, baseurl=''):
sgmllib.SGMLParser.__init__(self)
if out is None: self.out = self.outtextf
else: self.out = out
self.outtext = u''
self.quiet = 0
self.p_p = 0
self.outcount = 0
self.start = 1
self.space = 0
self.a = []
self.astack = []
self.acount = 0
self.list = []
self.blockquote = 0
self.pre = 0
self.startpre = 0
self.lastWasNL = 0
self.abbr_title = None # current abbreviation definition
self.abbr_data = None # last inner HTML (for abbr being defined)
self.abbr_list = {} # stack of abbreviations to write later
self.baseurl = baseurl
def outtextf(self, s):
self.outtext += s
def close(self):
sgmllib.SGMLParser.close(self)
self.pbr()
self.o('', 0, 'end')
return self.outtext
def handle_charref(self, c):
self.o(charref(c))
def handle_entityref(self, c):
self.o(entityref(c))
def unknown_starttag(self, tag, attrs):
self.handle_tag(tag, attrs, 1)
def unknown_endtag(self, tag):
self.handle_tag(tag, None, 0)
def previousIndex(self, attrs):
""" returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None
"""
if not attrs.has_key('href'): return None
i = -1
for a in self.a:
i += 1
match = 0
if a.has_key('href') and a['href'] == attrs['href']:
if a.has_key('title') or attrs.has_key('title'):
if (a.has_key('title') and attrs.has_key('title') and
a['title'] == attrs['title']):
match = True
else:
match = True
if match: return i
def handle_tag(self, tag, attrs, start):
attrs = fixattrs(attrs)
if hn(tag):
self.p()
if start: self.o(hn(tag)*"#" + ' ')
if tag in ['p', 'div']: self.p()
if tag == "br" and start: self.o(" \n")
if tag == "hr" and start:
self.p()
self.o("* * *")
self.p()
if tag in ["head", "style", 'script']:
if start: self.quiet += 1
else: self.quiet -= 1
if tag in ["body"]:
self.quiet = 0 # sites like 9rules.com never close <head>
if tag == "blockquote":
if start:
self.p(); self.o('> ', 0, 1); self.start = 1
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
if tag in ['em', 'i', 'u']: self.o("_")
if tag in ['strong', 'b']: self.o("**")
if tag == "code" and not self.pre: self.o('`') #TODO: `` `this` ``
if tag == "abbr":
if start:
attrsD = {}
for (x, y) in attrs: attrsD[x] = y
attrs = attrsD
self.abbr_title = None
self.abbr_data = ''
if attrs.has_key('title'):
self.abbr_title = attrs['title']
else:
if self.abbr_title != None:
self.abbr_list[self.abbr_data] = self.abbr_title
self.abbr_title = None
self.abbr_data = ''
if tag == "a":
if start:
attrsD = {}
for (x, y) in attrs: attrsD[x] = y
attrs = attrsD
if attrs.has_key('href') and not (SKIP_INTERNAL_LINKS and attrs['href'].startswith('#')):
self.astack.append(attrs)
self.o("[")
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if a:
i = self.previousIndex(a)
if i is not None:
a = self.a[i]
else:
self.acount += 1
a['count'] = self.acount
a['outcount'] = self.outcount
self.a.append(a)
self.o("][" + `a['count']` + "]")
if tag == "img" and start:
attrsD = {}
for (x, y) in attrs: attrsD[x] = y
attrs = attrsD
if attrs.has_key('src'):
attrs['href'] = attrs['src']
alt = attrs.get('alt', '')
i = self.previousIndex(attrs)
if i is not None:
attrs = self.a[i]
else:
self.acount += 1
attrs['count'] = self.acount
attrs['outcount'] = self.outcount
self.a.append(attrs)
self.o("![")
self.o(alt)
self.o("]["+`attrs['count']`+"]")
if tag == 'dl' and start: self.p()
if tag == 'dt' and not start: self.pbr()
if tag == 'dd' and start: self.o(' ')
if tag == 'dd' and not start: self.pbr()
if tag in ["ol", "ul"]:
if start:
self.list.append({'name':tag, 'num':0})
else:
if self.list: self.list.pop()
self.p()
if tag == 'li':
if start:
self.pbr()
if self.list: li = self.list[-1]
else: li = {'name':'ul', 'num':0}
self.o(" "*len(self.list)) #TODO: line up <ol><li>s > 9 correctly.
if li['name'] == "ul": self.o("* ")
elif li['name'] == "ol":
li['num'] += 1
self.o(`li['num']`+". ")
self.start = 1
else:
self.pbr()
if tag in ["table", "tr"] and start: self.p()
if tag == 'td': self.pbr()
if tag == "pre":
if start:
self.startpre = 1
self.pre = 1
else:
self.pre = 0
self.p()
def pbr(self):
if self.p_p == 0: self.p_p = 1
def p(self): self.p_p = 2
def o(self, data, puredata=0, force=0):
if self.abbr_data is not None: self.abbr_data += data
if not self.quiet:
if puredata and not self.pre:
data = re.sub('\s+', ' ', data)
if data and data[0] == ' ':
self.space = 1
data = data[1:]
if not data and not force: return
if self.startpre:
#self.out(" :") #TODO: not output when already one there
self.startpre = 0
bq = (">" * self.blockquote)
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
if self.pre:
bq += " "
data = data.replace("\n", "\n"+bq)
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if force == 'end':
# It's the end.
self.p_p = 0
self.out("\n")
self.space = 0
if self.p_p:
self.out(('\n'+bq)*self.p_p)
self.space = 0
if self.space:
if not self.lastWasNL: self.out(' ')
self.space = 0
if self.a and ((self.p_p == 2 and LINKS_EACH_PARAGRAPH) or force == "end"):
if force == "end": self.out("\n")
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(" ["+`link['count']`+"]: " + urlparse.urljoin(self.baseurl, link['href']))
if link.has_key('title'): self.out(" ("+link['title']+")")
self.out("\n")
else:
newa.append(link)
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
self.a = newa
if self.abbr_list and force == "end":
for abbr, definition in self.abbr_list.items():
self.out(" *[" + abbr + "]: " + definition + "\n")
self.p_p = 0
self.out(data)
self.lastWasNL = data and data[-1] == '\n'
self.outcount += 1
def handle_data(self, data):
if r'\/script>' in data: self.quiet -= 1
self.o(data, 1)
def unknown_decl(self, data): pass
def wrapwrite(text): sys.stdout.write(text.encode('utf8'))
def html2text_file(html, out=wrapwrite, baseurl=''):
h = _html2text(out, baseurl)
h.feed(html)
h.feed("")
return h.close()
def html2text(html, baseurl=''):
return optwrap(html2text_file(html, None, baseurl))
if __name__ == "__main__":
baseurl = ''
if sys.argv[1:]:
arg = sys.argv[1]
if arg.startswith('http://'):
baseurl = arg
j = urllib.urlopen(baseurl)
try:
from feedparser import _getCharacterEncoding as enc
except ImportError:
enc = lambda x, y: ('utf-8', 1)
text = j.read()
encoding = enc(j.headers, text)[0]
if encoding == 'us-ascii': encoding = 'utf-8'
data = text.decode(encoding)
else:
encoding = 'utf8'
if len(sys.argv) > 2:
encoding = sys.argv[2]
data = open(arg, 'r').read().decode(encoding)
else:
data = sys.stdin.read().decode('utf8')
wrapwrite(html2text(data, baseurl))
|
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle
import os
import re
import subprocess
import sys
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import time
import urllib2
from signal import SIGTERM
from error import GitError, UploadError
from trace import Trace
from git_command import GitCommand
from git_command import ssh_sock
from git_command import terminate_ssh_clients
R_HEADS = 'refs/heads/'
R_TAGS = 'refs/tags/'
ID_RE = re.compile('^[0-9a-f]{40}$')
REVIEW_CACHE = dict()
def IsId(rev):
return ID_RE.match(rev)
def _key(name):
parts = name.split('.')
if len(parts) < 2:
return name.lower()
parts[ 0] = parts[ 0].lower()
parts[-1] = parts[-1].lower()
return '.'.join(parts)
class GitConfig(object):
_ForUser = None
@classmethod
def ForUser(cls):
if cls._ForUser is None:
cls._ForUser = cls(file = os.path.expanduser('~/.gitconfig'))
return cls._ForUser
@classmethod
def ForRepository(cls, gitdir, defaults=None):
return cls(file = os.path.join(gitdir, 'config'),
defaults = defaults)
def __init__(self, file, defaults=None, pickleFile=None):
self.file = file
self.defaults = defaults
self._cache_dict = None
self._section_dict = None
self._remotes = {}
self._branches = {}
if pickleFile is None:
self._pickle = os.path.join(
os.path.dirname(self.file),
'.repopickle_' + os.path.basename(self.file))
else:
self._pickle = pickleFile
def Has(self, name, include_defaults = True):
"""Return true if this configuration file has the key.
"""
if _key(name) in self._cache:
return True
if include_defaults and self.defaults:
return self.defaults.Has(name, include_defaults = True)
return False
def GetBoolean(self, name):
"""Returns a boolean from the configuration file.
None : The value was not defined, or is not a boolean.
True : The value was set to true or yes.
False: The value was set to false or no.
"""
v = self.GetString(name)
if v is None:
return None
v = v.lower()
if v in ('true', 'yes'):
return True
if v in ('false', 'no'):
return False
return None
def GetString(self, name, all=False):
"""Get the first value for a key, or None if it is not defined.
This configuration file is used first, if the key is not
defined or all = True then the defaults are also searched.
"""
try:
v = self._cache[_key(name)]
except KeyError:
if self.defaults:
return self.defaults.GetString(name, all = all)
v = []
if not all:
if v:
return v[0]
return None
r = []
r.extend(v)
if self.defaults:
r.extend(self.defaults.GetString(name, all = True))
return r
def SetString(self, name, value):
"""Set the value(s) for a key.
Only this configuration file is modified.
The supplied value should be either a string,
or a list of strings (to store multiple values).
"""
key = _key(name)
try:
old = self._cache[key]
except KeyError:
old = []
if value is None:
if old:
del self._cache[key]
self._do('--unset-all', name)
elif isinstance(value, list):
if len(value) == 0:
self.SetString(name, None)
elif len(value) == 1:
self.SetString(name, value[0])
elif old != value:
self._cache[key] = list(value)
self._do('--replace-all', name, value[0])
for i in xrange(1, len(value)):
self._do('--add', name, value[i])
elif len(old) != 1 or old[0] != value:
self._cache[key] = [value]
self._do('--replace-all', name, value)
def GetRemote(self, name):
"""Get the remote.$name.* configuration values as an object.
"""
try:
r = self._remotes[name]
except KeyError:
r = Remote(self, name)
self._remotes[r.name] = r
return r
def GetBranch(self, name):
"""Get the branch.$name.* configuration values as an object.
"""
try:
b = self._branches[name]
except KeyError:
b = Branch(self, name)
self._branches[b.name] = b
return b
def GetSubSections(self, section):
"""List all subsection names matching $section.*.*
"""
return self._sections.get(section, set())
def HasSection(self, section, subsection = ''):
"""Does at least one key in section.subsection exist?
"""
try:
return subsection in self._sections[section]
except KeyError:
return False
def UrlInsteadOf(self, url):
"""Resolve any url.*.insteadof references.
"""
for new_url in self.GetSubSections('url'):
old_url = self.GetString('url.%s.insteadof' % new_url)
if old_url is not None and url.startswith(old_url):
return new_url + url[len(old_url):]
return url
@property
def _sections(self):
d = self._section_dict
if d is None:
d = {}
for name in self._cache.keys():
p = name.split('.')
if 2 == len(p):
section = p[0]
subsect = ''
else:
section = p[0]
subsect = '.'.join(p[1:-1])
if section not in d:
d[section] = set()
d[section].add(subsect)
self._section_dict = d
return d
@property
def _cache(self):
if self._cache_dict is None:
self._cache_dict = self._Read()
return self._cache_dict
def _Read(self):
d = self._ReadPickle()
if d is None:
d = self._ReadGit()
self._SavePickle(d)
return d
def _ReadPickle(self):
try:
if os.path.getmtime(self._pickle) \
<= os.path.getmtime(self.file):
os.remove(self._pickle)
return None
except OSError:
return None
try:
Trace(': unpickle %s', self.file)
fd = open(self._pickle, 'rb')
try:
return cPickle.load(fd)
finally:
fd.close()
except EOFError:
os.remove(self._pickle)
return None
except IOError:
os.remove(self._pickle)
return None
except cPickle.PickleError:
os.remove(self._pickle)
return None
def _SavePickle(self, cache):
try:
fd = open(self._pickle, 'wb')
try:
cPickle.dump(cache, fd, cPickle.HIGHEST_PROTOCOL)
finally:
fd.close()
except IOError:
if os.path.exists(self._pickle):
os.remove(self._pickle)
except cPickle.PickleError:
if os.path.exists(self._pickle):
os.remove(self._pickle)
def _ReadGit(self):
"""
Read configuration data from git.
This internal method populates the GitConfig cache.
"""
c = {}
d = self._do('--null', '--list')
if d is None:
return c
for line in d.rstrip('\0').split('\0'):
if '\n' in line:
key, val = line.split('\n', 1)
else:
key = line
val = None
if key in c:
c[key].append(val)
else:
c[key] = [val]
return c
def _do(self, *args):
command = ['config', '--file', self.file]
command.extend(args)
p = GitCommand(None,
command,
capture_stdout = True,
capture_stderr = True)
if p.Wait() == 0:
return p.stdout
else:
GitError('git config %s: %s' % (str(args), p.stderr))
class RefSpec(object):
"""A Git refspec line, split into its components:
forced: True if the line starts with '+'
src: Left side of the line
dst: Right side of the line
"""
@classmethod
def FromString(cls, rs):
lhs, rhs = rs.split(':', 2)
if lhs.startswith('+'):
lhs = lhs[1:]
forced = True
else:
forced = False
return cls(forced, lhs, rhs)
def __init__(self, forced, lhs, rhs):
self.forced = forced
self.src = lhs
self.dst = rhs
def SourceMatches(self, rev):
if self.src:
if rev == self.src:
return True
if self.src.endswith('/*') and rev.startswith(self.src[:-1]):
return True
return False
def DestMatches(self, ref):
if self.dst:
if ref == self.dst:
return True
if self.dst.endswith('/*') and ref.startswith(self.dst[:-1]):
return True
return False
def MapSource(self, rev):
if self.src.endswith('/*'):
return self.dst[:-1] + rev[len(self.src) - 1:]
return self.dst
def __str__(self):
s = ''
if self.forced:
s += '+'
if self.src:
s += self.src
if self.dst:
s += ':'
s += self.dst
return s
_master_processes = []
_master_keys = set()
_ssh_master = True
_master_keys_lock = None
def init_ssh():
"""Should be called once at the start of repo to init ssh master handling.
At the moment, all we do is to create our lock.
"""
global _master_keys_lock
assert _master_keys_lock is None, "Should only call init_ssh once"
_master_keys_lock = _threading.Lock()
def _open_ssh(host, port=None):
global _ssh_master
# Acquire the lock. This is needed to prevent opening multiple masters for
# the same host when we're running "repo sync -jN" (for N > 1) _and_ the
# manifest <remote fetch="ssh://xyz"> specifies a different host from the
# one that was passed to repo init.
_master_keys_lock.acquire()
try:
# Check to see whether we already think that the master is running; if we
# think it's already running, return right away.
if port is not None:
key = '%s:%s' % (host, port)
else:
key = host
if key in _master_keys:
return True
if not _ssh_master \
or 'GIT_SSH' in os.environ \
or sys.platform in ('win32', 'cygwin'):
# failed earlier, or cygwin ssh can't do this
#
return False
# We will make two calls to ssh; this is the common part of both calls.
command_base = ['ssh',
'-o','ControlPath %s' % ssh_sock(),
host]
if port is not None:
command_base[1:1] = ['-p',str(port)]
# Since the key wasn't in _master_keys, we think that master isn't running.
# ...but before actually starting a master, we'll double-check. This can
# be important because we can't tell that that 'git@myhost.com' is the same
# as 'myhost.com' where "User git" is setup in the user's ~/.ssh/config file.
check_command = command_base + ['-O','check']
try:
Trace(': %s', ' '.join(check_command))
check_process = subprocess.Popen(check_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
check_process.communicate() # read output, but ignore it...
isnt_running = check_process.wait()
if not isnt_running:
# Our double-check found that the master _was_ infact running. Add to
# the list of keys.
_master_keys.add(key)
return True
except Exception:
# Ignore excpetions. We we will fall back to the normal command and print
# to the log there.
pass
command = command_base[:1] + \
['-M', '-N'] + \
command_base[1:]
try:
Trace(': %s', ' '.join(command))
p = subprocess.Popen(command)
except Exception, e:
_ssh_master = False
print >>sys.stderr, \
'\nwarn: cannot enable ssh control master for %s:%s\n%s' \
% (host,port, str(e))
return False
_master_processes.append(p)
_master_keys.add(key)
time.sleep(1)
return True
finally:
_master_keys_lock.release()
def close_ssh():
global _master_keys_lock
terminate_ssh_clients()
for p in _master_processes:
try:
os.kill(p.pid, SIGTERM)
p.wait()
except OSError:
pass
del _master_processes[:]
_master_keys.clear()
d = ssh_sock(create=False)
if d:
try:
os.rmdir(os.path.dirname(d))
except OSError:
pass
# We're done with the lock, so we can delete it.
_master_keys_lock = None
URI_SCP = re.compile(r'^([^@:]*@?[^:/]{1,}):')
URI_ALL = re.compile(r'^([a-z][a-z+]*)://([^@/]*@?[^/]*)/')
def GetSchemeFromUrl(url):
m = URI_ALL.match(url)
if m:
return m.group(1)
return None
def _preconnect(url):
m = URI_ALL.match(url)
if m:
scheme = m.group(1)
host = m.group(2)
if ':' in host:
host, port = host.split(':')
else:
port = None
if scheme in ('ssh', 'git+ssh', 'ssh+git'):
return _open_ssh(host, port)
return False
m = URI_SCP.match(url)
if m:
host = m.group(1)
return _open_ssh(host)
return False
class Remote(object):
"""Configuration options related to a remote.
"""
def __init__(self, config, name):
self._config = config
self.name = name
self.url = self._Get('url')
self.review = self._Get('review')
self.projectname = self._Get('projectname')
self.fetch = map(lambda x: RefSpec.FromString(x),
self._Get('fetch', all=True))
self._review_protocol = None
def _InsteadOf(self):
globCfg = GitConfig.ForUser()
urlList = globCfg.GetSubSections('url')
longest = ""
longestUrl = ""
for url in urlList:
key = "url." + url + ".insteadOf"
insteadOfList = globCfg.GetString(key, all=True)
for insteadOf in insteadOfList:
if self.url.startswith(insteadOf) \
and len(insteadOf) > len(longest):
longest = insteadOf
longestUrl = url
if len(longest) == 0:
return self.url
return self.url.replace(longest, longestUrl, 1)
def PreConnectFetch(self):
connectionUrl = self._InsteadOf()
return _preconnect(connectionUrl)
@property
def ReviewProtocol(self):
if self._review_protocol is None:
if self.review is None:
return None
u = self.review
if not u.startswith('http:') and not u.startswith('https:'):
u = 'http://%s' % u
if u.endswith('/Gerrit'):
u = u[:len(u) - len('/Gerrit')]
if not u.endswith('/ssh_info'):
if not u.endswith('/'):
u += '/'
u += 'ssh_info'
if u in REVIEW_CACHE:
info = REVIEW_CACHE[u]
self._review_protocol = info[0]
self._review_host = info[1]
self._review_port = info[2]
elif 'REPO_HOST_PORT_INFO' in os.environ:
info = os.environ['REPO_HOST_PORT_INFO']
self._review_protocol = 'ssh'
self._review_host = info.split(" ")[0]
self._review_port = info.split(" ")[1]
REVIEW_CACHE[u] = (
self._review_protocol,
self._review_host,
self._review_port)
else:
try:
info = urllib2.urlopen(u).read()
if info == 'NOT_AVAILABLE':
raise UploadError('%s: SSH disabled' % self.review)
if '<' in info:
# Assume the server gave us some sort of HTML
# response back, like maybe a login page.
#
raise UploadError('%s: Cannot parse response' % u)
self._review_protocol = 'ssh'
self._review_host = info.split(" ")[0]
self._review_port = info.split(" ")[1]
except urllib2.HTTPError, e:
if e.code == 404:
self._review_protocol = 'http-post'
self._review_host = None
self._review_port = None
else:
raise UploadError('Upload over SSH unavailable')
except urllib2.URLError, e:
raise UploadError('%s: %s' % (self.review, str(e)))
REVIEW_CACHE[u] = (
self._review_protocol,
self._review_host,
self._review_port)
return self._review_protocol
def SshReviewUrl(self, userEmail):
if self.ReviewProtocol != 'ssh':
return None
username = self._config.GetString('review.%s.username' % self.review)
if username is None:
username = userEmail.split("@")[0]
return 'ssh://%s@%s:%s/%s' % (
username,
self._review_host,
self._review_port,
self.projectname)
def ToLocal(self, rev):
"""Convert a remote revision string to something we have locally.
"""
if IsId(rev):
return rev
if rev.startswith(R_TAGS):
return rev
if not rev.startswith('refs/'):
rev = R_HEADS + rev
for spec in self.fetch:
if spec.SourceMatches(rev):
return spec.MapSource(rev)
raise GitError('remote %s does not have %s' % (self.name, rev))
def WritesTo(self, ref):
"""True if the remote stores to the tracking ref.
"""
for spec in self.fetch:
if spec.DestMatches(ref):
return True
return False
def ResetFetch(self, mirror=False):
"""Set the fetch refspec to its default value.
"""
if mirror:
dst = 'refs/heads/*'
else:
dst = 'refs/remotes/%s/*' % self.name
self.fetch = [RefSpec(True, 'refs/heads/*', dst)]
def Save(self):
"""Save this remote to the configuration.
"""
self._Set('url', self.url)
self._Set('review', self.review)
self._Set('projectname', self.projectname)
self._Set('fetch', map(lambda x: str(x), self.fetch))
def _Set(self, key, value):
key = 'remote.%s.%s' % (self.name, key)
return self._config.SetString(key, value)
def _Get(self, key, all=False):
key = 'remote.%s.%s' % (self.name, key)
return self._config.GetString(key, all = all)
class Branch(object):
"""Configuration options related to a single branch.
"""
def __init__(self, config, name):
self._config = config
self.name = name
self.merge = self._Get('merge')
r = self._Get('remote')
if r:
self.remote = self._config.GetRemote(r)
else:
self.remote = None
@property
def LocalMerge(self):
"""Convert the merge spec to a local name.
"""
if self.remote and self.merge:
return self.remote.ToLocal(self.merge)
return None
def Save(self):
"""Save this branch back into the configuration.
"""
if self._config.HasSection('branch', self.name):
if self.remote:
self._Set('remote', self.remote.name)
else:
self._Set('remote', None)
self._Set('merge', self.merge)
else:
fd = open(self._config.file, 'ab')
try:
fd.write('[branch "%s"]\n' % self.name)
if self.remote:
fd.write('\tremote = %s\n' % self.remote.name)
if self.merge:
fd.write('\tmerge = %s\n' % self.merge)
finally:
fd.close()
def _Set(self, key, value):
key = 'branch.%s.%s' % (self.name, key)
return self._config.SetString(key, value)
def _Get(self, key, all=False):
key = 'branch.%s.%s' % (self.name, key)
return self._config.GetString(key, all = all)
|
|
#!/usr/bin/env python
"""
Copyright 2015 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import syndicate.util.config as conf
import syndicate.util.crypto as crypto
import syndicate.util.objects as object_stub
import syndicate.ms.jsonrpc as jsonrpc
import syndicate.ms.msconfig as msconfig
from syndicate.util.objects import MissingKeyException
import os
import urlparse
from Crypto.Hash import SHA256 as HashAlg
from Crypto.PublicKey import RSA as CryptoKey
from Crypto import Random
from Crypto.Signature import PKCS1_PSS as CryptoSigner
log = conf.log
def warn_key_change( config ):
"""
return a warning string the user that the MS's public key has changed, and exit.
"""
return """
SECURE VERIFICATION FAILURE!
It's possible that someone is impersonating your Syndicate, to get you to leak sensitive data!
If you are certain this is not the case, you should remove the offending public key.
Offending public key path: %s
""" % conf.object_key_path( config, "syndicate", make_ms_url( config['syndicate_host'], config['syndicate_port'], config['no_tls'] ).strip("https://"), public=True )
def parse_url( url ):
"""
Given a URL, find its host and port,
and determine based on the protocol scheme
whether or not it uses TLS.
Return (host, port, tls) on success
Raise exception if invalid.
"""
if "://" not in url:
log.warning("No scheme for %s. Assuming https://" % url)
url = "https://" + url
parsed = urlparse.urlparse( url )
scheme = parsed.scheme
netloc = parsed.netloc
host = None
port = None
no_tls = None
if ":" in netloc:
host, port = netloc.split(":")
try:
port = int(port)
except:
raise Exception("Invalid URL %s" % config['url'])
else:
host = netloc
if scheme.lower() == 'http':
if port is None:
port = 80
no_tls = True
elif scheme.lower() == 'https':
port = 443
no_tls = False
else:
raise Exception("Unrecognized scheme in URL %s" % config['url'] )
return (host, port, no_tls)
def make_ms_url( syndicate_host, syndicate_port, no_tls, urlpath="" ):
"""
Make a URL to the MS.
Return the URL.
"""
scheme = "https://"
default_port = 80
if no_tls:
default_port = 443
scheme = "http://"
if syndicate_port != default_port:
return scheme + os.path.join( syndicate_host.strip("/") + ":%s" % syndicate_port, urlpath )
else:
return scheme + os.path.join( syndicate_host.strip("/"), urlpath )
def api_call_signer( signing_pkey, method_name, data ):
"""
Sign an RPC call with the user's private key
"""
# sign the data
h = HashAlg.new( data )
signer = CryptoSigner.new( signing_pkey )
signature = signer.sign( h )
return signature
def api_call_verifier( config, pubkey, method_name, data, syndicate_data, rpc_result ):
"""
Verify an RPC call.
"""
# sanity check
if not 'signature' in syndicate_data:
log.error("No signature in server reply")
return False
sig = syndicate_data['signature']
# verify object ID and type
ret = False
if pubkey is not None:
# verify this data
h = HashAlg.new( data )
verifier = CryptoSigner.new(pubkey)
ret = verifier.verify( h, sig )
if not ret:
# verification key has changed on the MS
print warn_key_change( config )
return False
else:
return True
else:
raise Exception("No public key given. Unable to verify result.")
def make_rpc_client( config, caller_username=None ):
"""
Create an RPC client for calling MS methods.
Requires a config dictionary with:
* syndicate_host
* syndicate_port
* no_tls
* syndicate_public_key
* user_pkey
"""
import storage
ms_url = make_ms_url( config['syndicate_host'], config['syndicate_port'], config['no_tls'] ) + "/API"
username = caller_username
if username is None:
username = config['username']
user_private_key = storage.load_private_key( config, "user", username )
if user_private_key is None:
raise MissingKeyException("No private key for '%s'" % username)
syndicate_public_key = config['syndicate_public_key']
if not ms_url.lower().startswith("https://"):
log.warning("MS URL %s is NOT confidential!" % ms_url )
signer = lambda method_name, data: api_call_signer( user_private_key, method_name, data )
verifier = lambda method_name, args, kw, data, syndicate_data, rpc_result: api_call_verifier( config, syndicate_public_key, method_name, data, syndicate_data, rpc_result )
json_client = jsonrpc.Client( ms_url, jsonrpc.VERSION, signer=signer, verifier=verifier, username=username )
json_client.config = config
json_client.caller_username = username
return json_client
def json_stable_serialize( json_data ):
"""
Convert a dict or list into json, ensuring that key-values are serialized in a stable order.
Lifted verbatum from the MS, to remove the dependency.
"""
if isinstance( json_data, list ) or isinstance( json_data, tuple ):
json_serialized_list = []
for json_element in json_data:
json_serialized_list.append( json_stable_serialize( json_element ) )
# json_serialized_list.sort()
return "[" + ", ".join( json_serialized_list ) + "]"
elif isinstance( json_data, dict ):
json_serialized_dict = {}
for key in json_data.keys():
json_serialized_dict[key] = json_stable_serialize( json_data[key] )
key_order = [k for k in json_serialized_dict.keys()]
key_order.sort()
return "{" + ", ".join( ['"%s": %s' % (k, json_serialized_dict[k]) for k in key_order] ) + "}"
elif isinstance( json_data, str ) or isinstance( json_data, unicode ):
return '"' + json_data + '"'
return '"' + str(json_data) + '"'
def ms_rpc( proxy, method_name, *args, **kw ):
"""
Call a method on the MS (method_name).
Take the argument vector *args and dictionary **kw (both taken from sys.argv),
look up the method's parser, parse the arguments, and then issue the
RPC call.
"""
verify_reply = True
config = proxy.config
if config.has_key('verify_reply'):
# do not verify the reply (i.e. we might not know the Syndicate public key)
verify_reply = config['verify_reply']
# parse arguments.
# use lib to load and store temporary data for the argument parsers.
lib = conf.ArgLib()
lib.config = config
try:
args, kw, extras = conf.parse_args( config, method_name, args, kw, lib )
except Exception, e:
log.error("Failed to parse arguments for '%s'" % method_name)
raise e
# make sure we got the right number of arguments
valid = conf.validate_args( method_name, args, kw )
if not valid:
raise Exception("Invalid arguments for %s" % method_name)
method_callable = getattr( proxy, method_name )
# NOTE: might cause an exception
log.debug("As %s, call %s(%s %s)" % (proxy.caller_username, method_name, ", ".join([str(a) for a in args]), ", ".join( ["%s=%s" % (str(k), str(kw[k])) for k in kw.keys()] )))
ret = method_callable( *args, **kw )
# process object-specific extra information, based on the returned value of this method.
for object_cls in object_stub.object_classes:
object_cls.PostProcessResult( extras, config, method_name, args, kw, ret )
return ret
|
|
#!/usr/bin/env python
#
# Copyright 2017 Robot Garden, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# UAV State Model:
# Encapsulates UAV state and abstracts communication
# States:
# - Setpoint pose
# - local_position
# - MAV mode
# - arm
"""This module encapsulates UAV state and abstracts communication."""
#
from datetime import datetime
# import ROS libraries
import rospy
from std_msgs.msg import String
import mavros
#from mavros.utils import *
from mavros import setpoint as SP
import mavros_msgs.msg
import mavros_msgs.srv
from mavros_msgs.msg import Mavlink
from sensor_msgs.msg import BatteryState
from auto_number import AutoNumber
class MODE(AutoNumber):
"""MAV MODE"""
MANUAL = ()
LEARNING = ()
STEERING = ()
HOLD = ()
AUTO = ()
RTL = ()
GUIDED = ()
INITIALISING = ()
class ARM(AutoNumber):
"""MAV ARM STATE"""
ARMED = ()
DISARMED = ()
class _coord:
"""Pose coordinate"""
def __init__(self):
self.xpos = 0
self.ypos = 0
self.zpos = 0
#
class UAV_State:
"""State of UAV"""
def __init__(self):
self.current_pose = _coord()
self.setpoint_pose = _coord()
self.mode = "None"
self.arm = "None"
self.guided = "None"
self.timestamp = float(datetime.utcnow().strftime('%S.%f'))
self.connection_delay = 0.0
self.voltage = 0
self.current = 0
self.wp_reached = None
self.wp_reached_when = None
self.param_count = 0
mavros.set_namespace("/mavros")
# Service Proxies
rospy.wait_for_service('/mavros/cmd/arming')
self.svc_arming = rospy.ServiceProxy(
'/mavros/cmd/arming', mavros_msgs.srv.CommandBool)
rospy.wait_for_service('/mavros/set_mode')
self.svc_set_mode = rospy.ServiceProxy(
'/mavros/set_mode', mavros_msgs.srv.SetMode)
# # Publishers
self.pub_diagnostic = rospy.Publisher(
'/vicky/diagnostic', String, queue_size=10)
# Subscribers
self.local_position_sub = rospy.Subscriber(
mavros.get_topic('local_position', 'pose'),
SP.PoseStamped, self.__local_position_cb)
self.setpoint_local_sub = rospy.Subscriber(
mavros.get_topic('setpoint_raw', 'target_local'),
mavros_msgs.msg.PositionTarget, self.__setpoint_position_cb)
self.state_sub = rospy.Subscriber(
mavros.get_topic('state'),
mavros_msgs.msg.State, self.__state_cb)
self.battery_sub = rospy.Subscriber(
"/mavros/battery", BatteryState, self.__battery_cb)
self.mavlink_sub = rospy.Subscriber(
'/mavlink/from', Mavlink, self.__mavlink_cb)
self._mavlink_observers = []
def __local_position_cb(self, topic):
"""Local position subscriber"""
# rospy.loginfo('__local_position_cb')
self.current_pose.xpos = topic.pose.position.x
self.current_pose.ypos = topic.pose.position.y
self.current_pose.zpos = topic.pose.position.z
def __setpoint_position_cb(self, topic):
"""Pose subscriber"""
# rospy.loginfo('__setpoint_position_cb')
self.setpoint_pose.xpos = topic.position.x
self.setpoint_pose.ypos = topic.position.y
self.setpoint_pose.zpos = topic.position.z
def __state_cb(self, topic):
"""MAV state subscriber"""
# rospy.loginfo('__state_cb')
self.__calculate_delay()
self.mode = topic.mode
self.guided = topic.guided
self.arm = topic.armed
def __battery_cb(self, data):
"""Battery subscriber"""
# rospy.loginfo('__battery_cb')
self.current = round(data.current, 2)
self.voltage = round(data.voltage, 2)
def __mavlink_cb(self, data):
"""Mavlink subscriber"""
unix_time = (int)(data.header.stamp.to_sec())
# ~ Switch statement
while True:
# # Heart beat
# if data.msgid == 0:
# rospy.loginfo(
# '%s msgid %s detected (heart beat)',
# rospy.get_caller_id(),
# data.msgid)
# break
# Camera feedback
# See github.com/mavlink/c_library_v1/ardupilotmega/mavlink_msg_camera_feedback.h
if data.msgid == 180:
rospy.loginfo(
'%s msgid %s detected',
rospy.get_caller_id(),
data.msgid)
## Call callbacks
break
if data.msgid == 22:
self.param_count += 1
break
# Mission item reached #46
# # See github.com/mavlink/c_library_v1/common/mavlink_msg_mission_item_reached.h
if data.msgid == 46:
self.wp_reached = data.payload64[0] & 0xFFFF
self.wp_reached_when = unix_time
self.pubdiag_loginfo(
"{} MsgId {} detected. WP item # {}".
format(
rospy.get_caller_id(),
data.msgid,
str(self.wp_reached)
))
break
else:
break
# Notify observers
for id_observer in self._mavlink_observers:
if data.msgid == id_observer[0]:
id_observer[1](data)
#
#
#
def add_mavlink_observer(self, observer, msgid):
"""Add a mavlink observer"""
self._mavlink_observers.append([msgid, observer])
def clear_mavlink_observers(self, observer, msgid):
"""Clear (all) mavlink observers"""
self._mavlink_observers = None
def __calculate_delay(self):
"""Calculate time delay"""
tmp = float(datetime.utcnow().strftime('%S.%f'))
if tmp < self.timestamp:
# over a minute
self.connection_delay = 60.0 - self.timestamp + tmp
else:
self.connection_delay = tmp - self.timestamp
self.timestamp = tmp
def get_mode(self):
"""Get pixhawk MAV state"""
return self.mode
def set_mode(self, new_mode):
"""Set pixhawk MAV state"""
rospy.loginfo('/mavros/set_mode: '+new_mode)
if self.mode == new_mode:
pass
try:
is_mode_changed = self.svc_set_mode(custom_mode=new_mode)
except rospy.ServiceException, err:
rospy.loginfo(
"Service set_mode call failed: %s. Mode %s could not be set. "
"Check that GPS is enabled.",
err, new_mode)
return is_mode_changed
def get_arm(self):
"""Get pixhawk arm state"""
return self.arm
def set_arm(self, new_arm):
"""Arm pixhawk"""
rospy.loginfo('/mavros/cmd/arming: '+str(new_arm))
if self.arm == new_arm:
return
try:
resp = self.svc_arming(new_arm)
self.arm = new_arm
rospy.loginfo(resp)
return resp
except rospy.ServiceException, err:
rospy.loginfo("Service arm call failed: %s. "
"Attempted to set %s",
err, new_arm)
def get_current_pose(self):
"""Get setpoint pose"""
return self.current_pose
def get_setpoint_pose(self):
"""Get setpoint pose"""
return self.setpoint_pose
def get_guided(self):
"""Get guided"""
return self.guided
def get_delay(self):
"""Get delay"""
return self.connection_delay
def get_current(self):
"""Get battery current"""
return self.current
def get_voltage(self):
"""Get battery voltage"""
return self.voltage
def pubdiag_loginfo(self, astr):
""" Publish to /vicky/diagnostic/ and log to info"""
self.pub_diagnostic.publish(astr)
rospy.loginfo(astr)
|
|
from textwrap import dedent
from .. import base
from ..exceptions import NotEnoughSpace
from ..utils import FormattersTestCase
from . import (CallFormatterWithLinebreakingFallback, LinebreakingAttributeFormatter,
ListOfExpressionsWithSingleLineContinuationsFormatter,
UnbreakableTupleFormatter)
class ListOfExpressionsWithSingleLineContinuationsFormatterTestCase(FormattersTestCase):
formatters_register = base.formatters.copy()
formatters_register.register(ListOfExpressionsWithSingleLineContinuationsFormatter)
def test_line_breaking_can_occure_only_on(self):
code = dedent("""\
(var1 +
var2, var3,
var4)""")
expected = dedent("""\
(var1 + var2,
var3, var4)""")
self.assertFormats(code, expected)
def test_nested_list_wrapping(self):
# REGRESSION
code = dedent("""\
[['/m', 'm'], ['/s',
's']]""")
self.assertFormats(code, code)
class CustomCallFormatterMixedWithListOfExpressionsWithSingleLineContinuationsFormatterTestCase(FormattersTestCase):
formatters_register = base.formatters.copy()
formatters_register.register(ListOfExpressionsWithSingleLineContinuationsFormatter,
LinebreakingAttributeFormatter)
def test_line_continuation_formatter_mixed_with_line_breaking_attribute_formatter(self):
code = dedent("""\
instance.attribute_instance.method(key1=value1, key2=value2, key3=value3,
list_param=['element 1', 'element 2',
'element 3'], key4=v4)""")
# we expected mixed effect: attribute line breaking + line
expected = dedent("""\
(instance.attribute_instance
.method(key1=value1, key2=value2, key3=value3,
list_param=['element 1', 'element 2',
'element 3'],
key4=v4))""")
self.assertFormats(code, expected)
def test_wrapping_of_simple_function_call(self):
# REGRESSION
code = dedent("""\
r.m(_register=register,
parent=parent,
func_formatter=fun)""")
self.assertFormats(code, code, width=37)
class UnbreakableTupleFormatterTestCase(FormattersTestCase):
formatters_register = base.formatters.copy()
formatters_register.register(UnbreakableTupleFormatter)
def test_alignment(self):
code = '( 1, 2, 3)'
expected = '1, 2, 3'
self.assertFormats(code, expected, width=3, force=True)
def test_wrapping(self):
code = dedent("""\
[('Alternative', 'Alternative'),
('Blues', 'Blues'),
('Classical', 'Classical'),
('Country', 'Country'),
('Decades', 'Decades')]""")
self.assertFormats(code, code, width=3, force=True)
class CallFormatterWithLinebreakingFallback(FormattersTestCase):
formatters_register = base.formatters.copy()
formatters_register.register(CallFormatterWithLinebreakingFallback)
def test_wrapping(self):
code = dedent("""\
function(
1, 2)""")
self.assertFormats(code, code)
def test_formats_line_continuation_if_there_is_enough_space(self):
code = 'function(1, 2)'
self.assertFormats(code, code)
def test_empty_argument_list_doesnt_break(self):
code = 'function()'
not_expected = dedent("""\
function(
)""")
self.assertRaises(
NotEnoughSpace, lambda: self.assertFormats(
code, not_expected))
def test_indent_is_counted_from_last_attribute_ref_subexpression(self):
code = 'instance.attr.attr_method(1, 2)'
expected = dedent("""\
instance.attr.attr_method(
1, 2)""")
self.assertFormats(code, expected)
def test_breaking_occur_only_when_there_are_at_least_to_columns_profit(self):
code = dedent("""\
test(
1)""")
self.assertRaises(NotEnoughSpace, lambda: self.assertFormats(code, code))
class LinebreakingAttributeFormatterTestCase(FormattersTestCase):
"""
In general primary expression can produce attributeref expression
primary ::= atom | attributeref ...
According to this here are all cases where attributeref can occure as subexpression:
[5.3.1]
+ attributeref ::= primary "." identifier
[5.3.2]
+ subscription ::= primary "[" expression_list "]"
[5.3.3]
+ slicing ::= simple_slicing | extended_slicing
+ simple_slicing ::= primary "[" short_slice "]"
+ extended_slicing ::= primary "[" slice_list "]"
[5.3.4]
+ call ::= primary "(" [argument_list [","]
"""
formatters_register = base.formatters.copy()
formatters_register.register(LinebreakingAttributeFormatter)
def test_identifiers_wrapping(self):
code = 'fun().identifier1.identifier2'
expected = dedent("""\
(fun().identifier1
.identifier2)""")
self.assertFormats(code, expected)
def test_call_wrapping(self):
code = 'fun().method1().method2().method3()'
expected = dedent("""\
(fun().method1()
.method2()
.method3())""")
self.assertFormats(code, expected)
def test_wrapping_skips_parentheses_inside_function_call(self):
code = 'fun(instance.method1().method2())'
expected = dedent("""\
fun(instance.method1()
.method2())""")
self.assertFormats(code, expected)
def test_wrapping_skips_parentheses_inside_list(self):
code = '[instance.method1().method2()]'
expected = dedent("""\
[instance.method1()
.method2()]""")
self.assertFormats(code, expected)
def test_wrapping_skips_parentheses_inside_nested_binary_operation(self):
code = 'fun(8 + instance.method1().method2())'
expected = dedent("""\
fun(8 + instance.method1()
.method2())""")
self.assertFormats(code, expected)
def test_wrapping_uses_parentheses_inside_binary_operation_when_necessary(self):
code = '8 + instance.method1().method2()'
expected = dedent("""\
8 + (instance.method1()
.method2())""")
self.assertFormats(code, expected)
def test_suffix_passing_for_single_element_chain(self):
code = 'method1(instance2.method2)'
self.assertFormats(code, code)
def test_wrapping_is_done_only_when_necessary(self):
code = 'fun().method1().method2().method3()'
expected = dedent("""\
(fun().method1().method2()
.method3())""")
self.assertFormats(code, expected)
def test_wrapping_uses_parentheses_only_when_necessary(self):
code = 'instance.method1().method2()'
self.assertFormats(code, code)
def test_correct_width_is_used_when_wrapping_subexpressions(self):
code = dedent("""\
super(Collector.BroadcasterInfo,
cls).__new__(cls,
source=cls.source,
**kwargs)""")
expected = dedent("""\
super(Collector.BroadcasterInfo,
cls).__new__(cls, source=cls.source, **kwargs)""")
self.assertFormats(code, expected)
def test_subscription_wrapping(self):
code = 'identifier1[value1].identifier2[value2].identifier3[value3]'
expected = dedent("""\
(identifier1[value1].identifier2[value2]
.identifier3[value3])""")
self.assertFormats(code, expected)
def test_slicing_wrapping(self):
code = 'identifier1[lower1:upper1:step1].identifier2[lower2:upper2:step2].identifier3[lower3:]'
expected = dedent("""\
(identifier1[lower1:upper1:step1].identifier2[lower2:upper2:step2]
.identifier3[lower3:])""")
self.assertFormats(code, expected)
def test_attr_ref_value_wrapping_when_required(self):
code = '[1,2,3].__len__()'
expected = dedent("""\
[1, 2,
3].__len__()""")
self.assertFormats(code, expected)
def test_formatting_preserves_parentheses_of_not_wrapped_subexpression(self):
code = '(x or y).get()'
self.assertFormats(code, code)
class FuzzyTestCase(FormattersTestCase):
def assertFormats(self, code, expected, formatters_register, *args, **kwargs):
super(FuzzyTestCase, self).assertFormats(code, expected,
formatters_register,
*args, **kwargs)
def test_nested_function_call_wrapping(self):
# REGRESSION
formatters_register = base.formatters.copy()
for Formatter in [LinebreakingAttributeFormatter,
ListOfExpressionsWithSingleLineContinuationsFormatter,
UnbreakableTupleFormatter]:
formatters_register.register_formatter(Formatter)
code = dedent("""\
db_session.add(Recipient(ip=ip, country_code=country_code,
region_code=region_code, city=city))""")
expected = dedent("""\
db_session.add(Recipient(ip=ip,
country_code=country_code,
region_code=region_code,
city=city))""")
self.assertFormats(code, expected, width=20, force=True,
formatters_register=formatters_register)
def test_argument_wrapping_in_complex_function_definition_statement(self):
# FIXME: No line breaking at width = 80??
# def test_string_field_processing(self, Form=containers.Dict.of(scalars.String
# .named('test-argument'))):
# REGRESSION
formatters_register = base.formatters.copy()
for Formatter in [LinebreakingAttributeFormatter,
ListOfExpressionsWithSingleLineContinuationsFormatter,
UnbreakableTupleFormatter]:
formatters_register.register_formatter(Formatter)
code = dedent("""\
db_session.add(Recipient(ip=ip, country_code=country_code,
region_code=region_code, city=city))""")
expected = dedent("""\
db_session.add(Recipient(ip=ip,
country_code=country_code,
region_code=region_code,
city=city))""")
self.assertFormats(code, expected, width=20, force=True,
formatters_register=formatters_register)
|
|
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import os
import subprocess
import tempfile
import logging
import errno
git_logger = logging.getLogger('git')
hg_logger = logging.getLogger('hg')
class VCSError(Exception):
def __init__(self, message, returncode=None, command=None):
super(VCSError, self).__init__(message)
self.returncode = returncode
self.command = command
class VCSNotInstalled(VCSError):
pass
class VCS(object):
@classmethod
def cloneToTemporaryDir(cls, remote):
raise NotImplementedError()
@classmethod
def cloneToDirectory(cls, remote, directory, tag=None):
raise NotImplementedError()
def commit(self, message, tag=None):
raise NotImplementedError()
def isClean(self):
raise NotImplementedError()
def tags(self):
raise NotImplementedError()
def markForCommit(self, path):
pass
def remove(self):
raise NotImplementedError()
def getCommitId(self):
raise NotImplementedError()
def getDescription(self):
raise NotImplementedError()
def __nonzero__(self):
raise NotImplementedError()
# python 3 truthiness
def __bool__(self):
return self.__nonzero__()
class Git(VCS):
def __init__(self, path):
self.worktree = path
self.gitdir = os.path.join(path, '.git')
@classmethod
def cloneToTemporaryDir(cls, remote):
return cls.cloneToDirectory(remote, tempfile.mkdtemp())
@classmethod
def cloneToDirectory(cls, remote, directory, tag=None):
commands = [
['git', 'clone', remote, directory]
]
cls._execCommands(commands)
r = Git(directory)
if tag is not None:
r.updateToTag(tag)
return r
def fetchAllBranches(self):
remote_branches = []
local_branches = []
# list remote branches
out, err = self._execCommands([self._gitCmd('branch', '-r')])
for line in out.split(b'\n'):
branch_info = line.split(b' -> ')
# skip HEAD:
if len(branch_info) > 1:
continue
remote_branch = branch_info[0].strip()
branch = b'/'.join(remote_branch.split(b'/')[1:])
remote_branches.append((remote_branch, branch))
# list already-existing local branches
out, err = self._execCommands([self._gitCmd('branch')])
for line in out.split(b'\n'):
local_branches.append(line.strip(b' *'))
for remote, branchname in remote_branches:
# don't try to replace existing local branches
if branchname in local_branches:
continue
try:
out, err = self._execCommands([
self._gitCmd('checkout', '-b', branchname, remote)
])
except VCSError as e:
git_logger.error('failed to fetch remote branch %s %s' % (remote, branchname))
raise
def remove(self):
# fsutils, , misc filesystem utils, internal
from yotta.lib import fsutils
fsutils.rmRf(self.worktree)
def getCommitId(self):
out, err = self._execCommands([self._gitCmd('rev-parse', 'HEAD')])
return out.strip()
def getDescription(self):
out, err = self._execCommands([self._gitCmd('describe', '--always', '--tags')])
return out.strip()
def workingDirectory(self):
return self.worktree
def _gitCmd(self, *args):
return ['git','--work-tree=%s' % self.worktree,'--git-dir=%s'%self.gitdir.replace('\\', '/')] + list(args);
@classmethod
def _execCommands(cls, commands):
out, err = None, None
for cmd in commands:
try:
child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ)
except OSError as e:
if e.errno == errno.ENOENT:
if cmd[0] == 'git':
raise VCSNotInstalled(
'git is not installed, or not in your path. Please follow the installation instructions at http://docs.yottabuild.org/#installing'
)
else:
raise VCSNotInstalled('%s is not installed' % (cmd[0]))
else:
raise VCSError('command failed', command=cmd)
out, err = child.communicate()
returncode = child.returncode
if returncode:
raise VCSError("command failed: %s" % (err or out), returncode=returncode, command=cmd)
return out, err
def isClean(self):
commands = [
self._gitCmd('diff', '--quiet', '--exit-code'),
self._gitCmd('diff', '--cached', '--quiet', '--exit-code'),
]
try:
out, err = self._execCommands(commands)
except VCSError as e:
if e.returncode:
return False
else:
raise
return True
def markForCommit(self, relative_path):
commands = [
self._gitCmd('add', os.path.join(self.worktree, relative_path)),
]
self._execCommands(commands)
def updateToTag(self, tag):
commands = [
self._gitCmd('checkout', tag),
]
self._execCommands(commands)
def tags(self):
commands = [
self._gitCmd('tag', '-l')
]
out, err = self._execCommands(commands)
# I think utf-8 is the right encoding? commit messages are utf-8
# encoded, couldn't find any documentation on tag names.
return out.decode('utf-8').split(u'\n')
def branches(self):
commands = [
self._gitCmd('branch', '--list')
]
out, err = self._execCommands(commands)
return [x.lstrip(' *') for x in out.decode('utf-8').split('\n')]
def commit(self, message, tag=None):
commands = [
self._gitCmd('commit', '-m', message),
]
if tag:
commands.append(
self._gitCmd('tag', tag, '-a', '-m', tag),
)
self._execCommands(commands)
def __nonzero__(self):
return True
# FIXME: hgapi will throw HgException when something goes wrong, it may be worth trying
# to catch that in some methods
class HG(VCS):
hgapi = None
def __init__(self, path):
self._loadHGApi()
self.worktree = path
self.repo = self.hgapi.Repo(path)
@classmethod
def _loadHGApi(cls):
# only import hgapi on demand, since it is rarely needed
if cls.hgapi is None:
import hgapi
cls.hgapi = hgapi
@classmethod
def cloneToTemporaryDir(cls, remote):
return cls.cloneToDirectory(remote, tempfile.mkdtemp())
@classmethod
def cloneToDirectory(cls, remote, directory, tag=None):
cls._loadHGApi()
# hg doesn't automatically create the directories needed by destination
try:
os.makedirs(directory)
except:
pass
hg_logger.debug('will clone %s into %s', remote, directory)
cls.hgapi.Repo.hg_clone(remote, directory)
r = HG(directory)
if tag is not None:
r.updateToTag(tag)
return r
def remove(self):
# fsutils, , misc filesystem utils, internal
from yotta.lib import fsutils
fsutils.rmRf(self.worktree)
def getCommitId(self):
return self.repo.hg_node()
def getDescription(self):
try:
return self.repo.hg_command('log', '--rev', '.', '--template', "{latesttag}{sub('^-0-.*', '', '-{latesttagdistance}-m{node|short}')}")
except self.hgapi.HgException: # old mercurial doesn't support above command, output short hash, m-prefixed
return "m" + self.getCommitId()[:12]
def workingDirectory(self):
return self.worktree
def isClean(self):
return not bool(self.repo.hg_status(empty=True))
def markForCommit(self, relative_path):
self.repo.hg_add(os.path.join(self.worktree, relative_path))
def updateToTag(self, tag):
self.repo.hg_update(tag)
def tags(self):
l = list(self.repo.hg_tags().keys())
l.remove('tip')
return l
def commit(self, message, tag=None):
self.repo.hg_commit(message)
if tag:
self.repo.hg_tag(tag)
def __nonzero__(self):
return True
def getVCS(path):
# crude heuristic, does the job...
if os.path.exists(os.path.join(path, '.git')):
return Git(path)
if os.path.isdir(os.path.join(path, '.hg')):
return HG(path)
return None
|
|
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect
from django.http import HttpResponseBadRequest
from django.urls import reverse
from django.views.decorators.csrf import ensure_csrf_cookie
from django.shortcuts import render
from lab01_authserver_app.models import *
import json
import logging
log = logging.getLogger('lab01_authserver_app')
def auth_controller(request):
log_request(request)
if request.method == 'GET':
oauth_params = oauth_params_auth(request)
response = render(request, 'auth.html', {'oauth': oauth_params})
log.debug('response:\n' + str(response.serialize()))
return response
else:
return HttpResponseBadRequest('400 Malformed request.')
def login_controller(request):
log_request(request)
if request.method == 'POST':
uname = request.POST.get('uname', None)
password = request.POST.get('password', None)
if 'register' in request.POST:
operation = 'register'
elif 'login' in request.POST:
operation = 'login'
else:
return HttpResponseBadRequest('400 Malformed request')
log.debug('request to {}, uname = {} password = {}'.format(operation, uname, password))
if operation == 'login':
user = authenticate(uname, password)
elif operation == 'register':
user = register_user(uname, password)
else:
return HttpResponseBadRequest("400 Malformed request.")
if user:
# logged in OK, let's handle oauth case
oauth = oauth_params_auth(request)
if oauth and oauth['response_type'] == 'code':
grant = issue_grantcode(user)
redirect_uri = oauth['redirect_uri']
redirect_uri += '?code=' + grant.code
response = HttpResponseRedirect(redirect_uri)
else:
response = HttpResponseRedirect(reverse('users'))
response.set_cookie('uname', uname)
response.set_cookie('password', password)
if operation == 'register':
response.status = 201
log.debug('response:\n' + str(response.serialize()))
return response
else:
return HttpResponse('401 Unauthorized', status=401)
else:
return HttpResponseBadRequest('400 Malformed request.')
def users_controller(request):
log_request(request)
user = authorize_request(request)
if not user:
return HttpResponse('401 Unauthorized', status=401)
# now check if we perform some post operations
if request.method == 'POST':
delete_uname = request.POST['delete']
log.debug('request to delete user ' + delete_uname)
delete_user(delete_uname)
return HttpResponseRedirect(reverse('users'))
users = list_users()
return render(request, 'users.html', {'users': users})
def users_controller_me(request):
log_request(request)
try:
user = authorize_request(request)
except AccessTokenExpired:
return HttpResponse('440 Token expired', status=440)
if not user:
return HttpResponse('401 Unauthorized', status=401)
else:
content = {'uname': user.uname}
response = HttpResponse(content = json.dumps(content),
content_type='application/json', status=200)
log.debug('response:\n' + str(response.serialize()))
return response
def users_controller_list(request):
log_request(request)
try:
user = authorize_request(request)
except AccessTokenExpired:
return HttpResponse('440 Token expired', status=440)
if not user:
return HttpResponse('401 Unauthorized', status=401)
else:
users = list_users()
content = [u.uname for u in uers]
response = HttpResponse(content = json.dumps(content),
content_type='application/json', status=200)
log.debug('response:\n' + str(response.serialize()))
return response
def token_controller_issue(request):
# here we need to issue access and refresh tokens via grant code
log_request(request)
oauth = oauth_params_token_issue(request)
if oauth and oauth['grant_type'] == 'authorization_code':
try:
user = authenticate_authcode(oauth['code'])
except AuthGrantCodeExpired:
return HttpResponse('440 grant code expired', status=440)
if user:
tokens = issue_tokens(user, oauth['client_id'])
# now let's form the response
content = {}
content['token_type'] = 'mytokentype'
content['expires_in'] = atoken_lifetime
content['access_token'] = tokens['atoken'].token
content['refresh_token'] = tokens['rtoken'].rtoken
response = HttpResponse(content = json.dumps(content),
content_type='application/json', status=200)
log.debug('response:\n' + str(response.serialize()))
return response
return HttpResponse('401 Unauthorized', status=401)
return HttpResponseBadRequest('400 Malformed request.')
def token_controller_verify(request):
log_request(request)
atoken = request.POST.get('access_token', None)
if atoken:
try:
user = authenticate_token(atoken)
except AccessTokenExpired:
return HttpResponse('Token expired', status=440)
if user:
return HttpResponse('OK', status=200)
else:
return HttpResponse('404 Not found', status=404)
return HttpResponseBadRequest('400 Malformed request.')
def token_controller_refresh(request):
# here we need to issue access and refresh tokens via grant code
log_request(request)
oauth = oauth_params_token_refresh(request)
if oauth and oauth['grant_type'] == 'refresh_token':
user = authenticate_rtoken(oauth['refresh_token'])
if user:
tokens = issue_tokens(user, oauth['client_id'])
# now let's form the response
content = {}
content['token_type'] = 'mytokentype'
content['expires_in'] = atoken_lifetime
content['access_token'] = tokens['atoken'].token
content['refresh_token'] = tokens['rtoken'].rtoken
response = HttpResponse(content = json.dumps(content),
content_type='application/json', status=200)
log.debug('response:\n' + str(response.serialize()))
return response
return HttpResponse('401 Unauthorized', status=401)
return HttpResponseBadRequest('400 Malformed request.')
def token_controller_grant(request):
# this is debug endpoint to issue grant codes without redirect stuff
log_request(request)
uname = request.POST.get('uname', None)
password = request.POST.get('password', None)
user = authenticate(uname, password)
if user:
# logged in OK, let's handle oauth case
grant = issue_grantcode(user)
response = HttpResponse(grant.code, status=200)
log.debug('response:\n' + str(response.serialize()))
return response
else:
return HttpResponse('401 Unauthorized', status=401)
def query_string(params):
str = '?'
for k in params:
if len(str) > 1:
str = str + '&'
str = str + k + '=' + params[k]
return str
def log_request(request):
log.debug(str(request))
log.debug('GET: ' + str(request.GET))
log.debug('POST: ' + str(request.POST))
log.debug('Cookies:\n' + repr(request.COOKIES))
log.debug('Meta:\n' + repr(request.META))
def authorize_request(request):
# fist try cookies
uname = request.COOKIES.get('uname', None)
password = request.COOKIES.get('password', None)
user = authenticate(uname, password)
if user:
return user
else:
# now let's try AccessToken
access_token = request.META.get('HTTP_ACCESSTOKEN', None)
log.debug('header HTTP_ACCESSTOKEN: ' + repr(access_token))
if access_token:
user = authenticate_token(access_token)
return user
return None
def oauth_params_auth(request):
d = rdict(request)
oauth = {}
try:
oauth['response_type'] = d['response_type']
oauth['client_id'] = d['client_id']
oauth['redirect_uri'] = d['redirect_uri']
return oauth
except:
return None
def oauth_params_token_issue(request):
d = rdict(request)
oauth = {}
try:
oauth['grant_type'] = d['grant_type']
oauth['client_id'] = d['client_id']
oauth['client_secret'] = d['client_secret']
oauth['redirect_uri'] = d['redirect_uri']
oauth['code'] = d['code']
return oauth
except:
return None
def oauth_params_token_refresh(request):
d = rdict(request)
oauth = {}
try:
oauth['grant_type'] = d['grant_type']
oauth['client_id'] = d['client_id']
oauth['client_secret'] = d['client_secret']
oauth['refresh_token'] = d['refresh_token']
return oauth
except:
return None
def rdict(request):
if request.method == 'GET':
return request.GET
elif request.method == 'POST':
return request.POST
return None
|
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Utilties to deal with introspecting GGRC models for publishing, creation,
and update from resource format representations, such as JSON."""
from sqlalchemy.sql.schema import UniqueConstraint
from ggrc.utils import get_mapping_rules, get_unmapping_rules
from ggrc.utils import title_from_camelcase
from ggrc.utils import underscore_from_camelcase
ATTRIBUTE_ORDER = (
"slug",
"assessment_template",
"audit",
"control",
"program",
"task_group",
"workflow",
"title",
"description",
"notes",
"test_plan",
"owners",
"related_assessors",
"related_creators",
"related_assignees",
"related_verifiers",
"program_owner",
"program_editor",
"program_reader",
"workflow_owner",
"workflow_member",
"task_type",
"due_on",
"start_date",
"end_date",
"report_start_date",
"report_end_date",
"relative_start_date",
"relative_end_date",
"finished_date",
"verified_date",
"status",
'os_state',
"assertions",
"categories",
"contact",
"design",
"directive",
"fraud_related",
"key_control",
"kind",
"link",
"means",
"network_zone",
"operationally",
"principal_assessor",
"secondary_assessor",
"secondary_contact",
"url",
"reference_url",
"verify_frequency",
"name",
"email",
"is_enabled",
"company",
"user_role",
"recipients",
"send_by_default",
"document_url",
"document_evidence",
"delete",
)
EXCLUDE_CUSTOM_ATTRIBUTES = set([
"AssessmentTemplate",
])
EXCLUDE_MAPPINGS = set([
"AssessmentTemplate",
])
def is_filter_only(alias_properties):
"""Determine if alias is for filter use only.
Prevents alias filters from being exportable.
Args:
alias_properties: Alias properties.
Returns:
Boolean reflecting if it's filter only or not.
"""
if isinstance(alias_properties, dict):
if alias_properties.get("filter_only"):
return True
return False
class PublishOnly(object):
"""Attributes wrapped by ``PublishOnly`` instances should not be considered
to be a part of an inherited list. For example, ``_update_attrs`` can be
inherited from ``_publish_attrs`` if left unspecified. This class provides
a mechanism to use that inheritance while excluding some elements from the
resultant inherited list. For example, this:
.. sourcecode::
_publish_attrs = [
'inherited_attr',
PublishOnly('not_inherited_attr'),
]
is equivalent to this:
.. sourcecode::
_publish_attrs = [
'inherited_attr',
'not_inherited_attr',
]
_update_attrs = [
'inherited_attr',
]
"""
# pylint: disable=too-few-public-methods
def __init__(self, attr_name):
self.attr_name = attr_name
class AttributeInfo(object):
"""Gather model CRUD information by reflecting on model classes. Builds and
caches a list of the publishing properties for a class by walking the
class inheritance tree.
"""
MAPPING_PREFIX = "__mapping__:"
UNMAPPING_PREFIX = "__unmapping__:"
CUSTOM_ATTR_PREFIX = "__custom__:"
OBJECT_CUSTOM_ATTR_PREFIX = "__object_custom__:"
SNAPSHOT_MAPPING_PREFIX = "__snapshot_mapping__:"
class Type(object):
"""Types of model attributes."""
# TODO: change to enum.
# pylint: disable=too-few-public-methods
PROPERTY = "property"
MAPPING = "mapping"
SPECIAL_MAPPING = "special_mapping"
CUSTOM = "custom" # normal custom attribute
OBJECT_CUSTOM = "object_custom" # object level custom attribute
USER_ROLE = "user_role"
def __init__(self, tgt_class):
self._publish_attrs = AttributeInfo.gather_publish_attrs(tgt_class)
self._update_attrs = AttributeInfo.gather_update_attrs(tgt_class)
self._create_attrs = AttributeInfo.gather_create_attrs(tgt_class)
self._include_links = AttributeInfo.gather_include_links(tgt_class)
self._aliases = AttributeInfo.gather_aliases(tgt_class)
@classmethod
def gather_attr_dicts(cls, tgt_class, src_attr):
""" Gather dictionaries from target class parets """
result = {}
for base_class in tgt_class.__bases__:
base_result = cls.gather_attr_dicts(base_class, src_attr)
result.update(base_result)
attrs = getattr(tgt_class, src_attr, {})
result.update(attrs)
return result
@classmethod
def gather_attrs(cls, tgt_class, src_attrs, accumulator=None,
main_class=None):
"""Gathers the attrs to be included in a list for publishing, update, or
some other purpose. Supports inheritance by iterating the list of
``src_attrs`` until a list is found.
Inheritance of some attributes can be circumvented through use of the
``DontPropoagate`` decorator class.
"""
if main_class is None:
main_class = tgt_class
src_attrs = src_attrs if isinstance(src_attrs, list) else [src_attrs]
accumulator = accumulator if accumulator is not None else set()
ignore_publishonly = True
for attr in src_attrs:
attrs = None
# Only get the attribute if it is defined on the target class, but
# get it via `getattr`, to handle `@declared_attr`
if attr in tgt_class.__dict__:
attrs = getattr(tgt_class, attr, None)
if callable(attrs):
attrs = attrs(main_class)
if attrs is not None:
if not ignore_publishonly:
attrs = [a for a in attrs if not isinstance(a, PublishOnly)]
else:
attrs = [a if not isinstance(a, PublishOnly) else a.attr_name for
a in attrs]
accumulator.update(attrs)
break
else:
ignore_publishonly = False
for base in tgt_class.__bases__:
cls.gather_attrs(base, src_attrs, accumulator, main_class=main_class)
return accumulator
@classmethod
def gather_publish_attrs(cls, tgt_class):
return cls.gather_attrs(tgt_class, '_publish_attrs')
@classmethod
def gather_aliases(cls, tgt_class):
return cls.gather_attr_dicts(tgt_class, '_aliases')
@classmethod
def gather_update_attrs(cls, tgt_class):
attrs = cls.gather_attrs(tgt_class, ['_update_attrs', '_publish_attrs'])
return attrs
@classmethod
def gather_create_attrs(cls, tgt_class):
return cls.gather_attrs(tgt_class, [
'_create_attrs', '_update_attrs', '_publish_attrs'])
@classmethod
def gather_include_links(cls, tgt_class):
return cls.gather_attrs(tgt_class, ['_include_links'])
@classmethod
def _generate_mapping_definition(cls, rules, prefix, display_name_tmpl):
"Generate definition from template"
definitions = {}
from ggrc.snapshotter.rules import Types
read_only = Types.parents | Types.scoped
read_only_text = "Read only column and will be ignored on import."
for klass in rules:
klass_name = title_from_camelcase(klass)
key = "{}{}".format(prefix, klass_name)
definitions[key.lower()] = {
"display_name": display_name_tmpl.format(klass_name),
"attr_name": klass.lower(),
"mandatory": False,
"unique": False,
"description": read_only_text if klass in read_only else "",
"type": cls.Type.MAPPING,
}
return definitions
@classmethod
def get_mapping_definitions(cls, object_class):
""" Get column definitions for allowed mappings for object_class """
from ggrc.snapshotter import rules
if object_class.__name__ in rules.Types.scoped:
return cls._generate_mapping_definition(
rules.Types.all, cls.SNAPSHOT_MAPPING_PREFIX, "map:{}",
)
definitions = {}
mapping_rules = get_mapping_rules()
object_mapping_rules = mapping_rules.get(object_class.__name__, [])
definitions.update(cls._generate_mapping_definition(
object_mapping_rules, cls.MAPPING_PREFIX, "map:{}",
))
unmapping_rules = get_unmapping_rules()
object_unmapping_rules = unmapping_rules.get(object_class.__name__, [])
definitions.update(cls._generate_mapping_definition(
object_unmapping_rules, cls.UNMAPPING_PREFIX, "unmap:{}",
))
return definitions
@classmethod
def get_custom_attr_definitions(cls, object_class, ca_cache=None,
include_oca=True):
"""Get column definitions for custom attributes on object_class.
Args:
object_class: Model for which we want the attribute definitions.
ca_cache: dictionary containing custom attribute definitions. If it's set
this function will not look for CAD in the database. This should be
used for bulk operations, and eventually replaced with memcache.
include_oca: Flag for including object level custom attributes. This
should be true only for defenitions needed for csv imports.
returns:
dict of custom attribute definitions.
"""
definitions = {}
if not hasattr(object_class, "get_custom_attribute_definitions"):
return definitions
object_name = underscore_from_camelcase(object_class.__name__)
if isinstance(ca_cache, dict) and object_name:
custom_attributes = ca_cache.get(object_name, [])
else:
custom_attributes = object_class.get_custom_attribute_definitions()
for attr in custom_attributes:
description = attr.helptext or u""
if (attr.attribute_type == attr.ValidTypes.DROPDOWN and
attr.multi_choice_options):
if description:
description += "\n\n"
description += u"Accepted values are:\n{}".format(
attr.multi_choice_options.replace(",", "\n")
)
if attr.definition_id:
ca_type = cls.Type.OBJECT_CUSTOM
attr_name = u"{}{}".format(
cls.OBJECT_CUSTOM_ATTR_PREFIX, attr.title).lower()
else:
ca_type = cls.Type.CUSTOM
attr_name = u"{}{}".format(cls.CUSTOM_ATTR_PREFIX, attr.title).lower()
definition_ids = definitions.get(attr_name, {}).get("definition_ids", [])
definition_ids.append(attr.id)
definitions[attr_name] = {
"display_name": attr.title,
"attr_name": attr.title,
"mandatory": attr.mandatory,
"unique": False,
"description": description,
"type": ca_type,
"definition_ids": definition_ids,
}
return definitions
@classmethod
def get_unique_constraints(cls, object_class):
""" Return a set of attribute names for single unique columns """
constraints = object_class.__table__.constraints
unique = [con for con in constraints if isinstance(con, UniqueConstraint)]
# we only handle single column unique constraints
unique_columns = [u.columns.keys() for u in unique if len(u.columns) == 1]
return set(sum(unique_columns, []))
@classmethod
def get_object_attr_definitions(cls, object_class, ca_cache=None,
include_oca=True):
"""Get all column definitions for object_class.
This function joins custom attribute definitions, mapping definitions and
the extra delete column.
Args:
object_class: Model for which we want the attribute definitions.
ca_cache: dictionary containing custom attribute definitions.
include_oca: Flag for including object level custom attributes.
"""
definitions = {}
aliases = AttributeInfo.gather_aliases(object_class)
filtered_aliases = [
(attr, props) for attr, props in aliases.items()
if props is not None and not is_filter_only(props)
]
# push the extra delete column at the end to override any custom behavior
if hasattr(object_class, "slug"):
filtered_aliases.append(("delete", {
"display_name": "Delete",
"description": "",
}))
unique_columns = cls.get_unique_constraints(object_class)
for key, value in filtered_aliases:
column = object_class.__table__.columns.get(key)
definition = {
"display_name": value,
"attr_name": key,
"mandatory": False if column is None else not column.nullable,
"unique": key in unique_columns,
"description": "",
"type": cls.Type.PROPERTY,
"handler_key": key,
}
if isinstance(value, dict):
definition.update(value)
definitions[key] = definition
if object_class.__name__ not in EXCLUDE_CUSTOM_ATTRIBUTES:
definitions.update(
cls.get_custom_attr_definitions(object_class, ca_cache=ca_cache,
include_oca=include_oca))
if object_class.__name__ not in EXCLUDE_MAPPINGS:
definitions.update(cls.get_mapping_definitions(object_class))
return definitions
@classmethod
def get_attr_definitions_array(cls, object_class, ca_cache=None):
""" get all column definitions containing only json serializable data """
definitions = cls.get_object_attr_definitions(object_class,
ca_cache=ca_cache)
order = cls.get_column_order(definitions.keys())
result = []
for key in order:
item = definitions[key]
item["key"] = key
result.append(item)
return result
@classmethod
def get_column_order(cls, attr_list):
""" Sort attribute list
Attribute list should be sorted with 3 rules:
- attributes in ATTRIBUTE_ORDER variable must be fist and in the same
order as defined in that variable.
- Custom Attributes are sorted alphabetically after default attributes
- mapping attributes are sorted alphabetically and placed last
"""
attr_set = set(attr_list)
default_attrs = [v for v in ATTRIBUTE_ORDER if v in attr_set]
default_set = set(default_attrs)
other_attrs = [v for v in attr_list if v not in default_set]
custom_attrs = [v for v in other_attrs if not v.lower().startswith("map:")]
mapping_attrs = [v for v in other_attrs if v.lower().startswith("map:")]
custom_attrs.sort(key=lambda x: x.lower())
mapping_attrs.sort(key=lambda x: x.lower())
return default_attrs + custom_attrs + mapping_attrs
class SanitizeHtmlInfo(AttributeInfo):
def __init__(self, tgt_class):
self._sanitize_html = SanitizeHtmlInfo.gather_attrs(
tgt_class, '_sanitize_html')
|
|
# Brick Breaker Pygame Version 2.0.1
import sys
import pygame
SCREEN_SIZE = 640,480
# Object dimensions
BRICK_WIDTH = 60
BRICK_HEIGHT = 15
PADDLE_WIDTH = 60
PADDLE_HEIGHT = 12
BALL_DIAMETER = 16
BALL_RADIUS = BALL_DIAMETER / 2
MAX_PADDLE_X = SCREEN_SIZE[0] - PADDLE_WIDTH
MAX_BALL_X = SCREEN_SIZE[0] - BALL_DIAMETER
MAX_BALL_Y = SCREEN_SIZE[1] - BALL_DIAMETER
# Paddle Y coordinate
PADDLE_Y = SCREEN_SIZE[1] - PADDLE_HEIGHT - 10
# Color constants
BLACK = (0,0,0)
WHITE = (255,255,255)
BLUE = (0,0,255)
GOLD = (200,200,0)
GREEN = (0,255,0)
SKY_BLUE = (0,239,255)
ORANGE = (255,154,0)
RED = (255,0,0)
# State constants
STATE_BALL_IN_PADDLE = 0
STATE_PLAYING = 1
STATE_WON = 2
STATE_GAME_OVER = 3
STATE_NEXT_LEVEL = 4
STATE_PAUSE = 5
class Bricka:
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption("Brick Breaker Version 2.0.1")
self.clock = pygame.time.Clock()
if pygame.font:
self.font = pygame.font.Font(None,30)
else:
self.font = None
#These define the initial constants at the very beginning and they are never resetted.
self.lives = 3
self.level = 1
self.score = 0
self.Paddle_Speed = 18
self.init_game()
def init_game(self):
self.state = STATE_BALL_IN_PADDLE
self.paddle = pygame.Rect(300,PADDLE_Y,PADDLE_WIDTH,PADDLE_HEIGHT)
self.ball = pygame.Rect(300,PADDLE_Y - BALL_DIAMETER,BALL_DIAMETER,BALL_DIAMETER)
if self.level == 1:
self.ball_vel = [5,-5]
elif self.level == 2:
self.ball_vel = [6,-6]
elif self.level == 3:
self.ball_vel = [7,-7]
elif self.level == 4:
self.ball_vel = [8,-8]
else:
self.ball_vel = [9,-9]
self.create_bricks()
def create_bricks(self):
y_ofs = 35
self.bricks = []
for i in range(7):
x_ofs = 35
for j in range(8):
self.bricks.append(pygame.Rect(x_ofs,y_ofs,BRICK_WIDTH,BRICK_HEIGHT))
x_ofs += BRICK_WIDTH + 10
y_ofs += BRICK_HEIGHT + 5
def draw_bricks(self):
for brick in self.bricks:
pygame.draw.rect(self.screen, self.BRICK_COLOUR, brick)
def check_input(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
self.paddle.left -= self.Paddle_Speed
if self.paddle.left < 0:
self.paddle.left = 0
if keys[pygame.K_RIGHT]:
self.paddle.left += self.Paddle_Speed
if self.paddle.left > MAX_PADDLE_X:
self.paddle.left = MAX_PADDLE_X
if keys[pygame.K_SPACE] and self.state == STATE_BALL_IN_PADDLE:
self.ball_vel = self.ball_vel
self.state = STATE_PLAYING
elif keys[pygame.K_RETURN] and self.state == STATE_NEXT_LEVEL:
self.level += 1
self.init_game()
self.level_difficulty()
elif keys[pygame.K_RETURN] and (self.state == STATE_GAME_OVER or self.state == STATE_WON):
self.init_game()
self.lives = 3
self.score = 0
self.level = 1
self.Paddle_Speed = 20
self.ball_vel = [5,-5]
if len(self.bricks) == 0:
self.state = STATE_NEXT_LEVEL
if keys[pygame.K_SPACE] and self.ball.top > self.paddle.top:
if self.state == STATE_GAME_OVER and self.lives > 0:
self.state = STATE_BALL_IN_PADDLE
else:
self.state = STATE_GAME_OVER
def move_ball(self):
self.ball.left += self.ball_vel[0]
self.ball.top += self.ball_vel[1]
if self.ball.left <= 0:
self.ball.left = 0
self.ball_vel[0] = -self.ball_vel[0]
elif self.ball.left >= MAX_BALL_X:
self.ball.left = MAX_BALL_X
self.ball_vel[0] = -self.ball_vel[0]
if self.ball.top < 0:
self.ball.top = 0
self.ball_vel[1] = -self.ball_vel[1]
elif self.ball.top >= MAX_BALL_Y:
self.ball.top = MAX_BALL_Y
self.ball_vel[1] = -self.ball_vel[1]
def handle_collisions(self):
for brick in self.bricks:
if self.ball.colliderect(brick):
if self.BRICK_COLOUR == GOLD:
self.score += 3
elif self.BRICK_COLOUR == RED:
self.score += 5
elif self.BRICK_COLOUR == SKY_BLUE:
self.score += 8
elif self.BRICK_COLOUR == ORANGE:
self.score += 10
else:
self.score += (self.level*5)
self.ball_vel[1] = -self.ball_vel[1]
self.bricks.remove(brick)
break
if self.ball.colliderect(self.paddle):
self.ball.top = PADDLE_Y - BALL_DIAMETER
self.ball_vel[1] = -self.ball_vel[1]
elif self.ball.top > self.paddle.top:
self.lives -= 1
if self.lives > 0:
self.state = STATE_BALL_IN_PADDLE
#The Code below shows when the user could win the game.
elif self.lives == 0 and self.score >= 1500:
self.state = STATE_WON
elif self.lives == 0 and self.score < 1500:
self.state = STATE_GAME_OVER
def level_difficulty(self):
if self.level == 2:
self.Paddle_Speed = 16
self.ball_vel = [6,-6]
self.lives += 1
elif self.level == 3:
self.Paddle_Speed = 14
self.ball_vel = [7,-7]
self.lives += 2
elif self.level == 4:
self.Paddle_Speed = 12
self.ball_vel = [8,-8]
self.lives += 3
else:
self.Paddle_Speed = 10
self.ball_vel = [9,-9]
self.lives += 4
def show_stats(self):
if self.font:
font_surface = self.font.render("SCORE: " + str(self.score) + " LIVES: " + str(self.lives) + " LEVEL: " + str(self.level), False, WHITE)
self.screen.blit(font_surface, (205,5))
def show_message(self,message):
if self.font:
size = self.font.size(message)
font_surface = self.font.render(message,False, WHITE)
x = (SCREEN_SIZE[0] - size[0]) / 2
y = (SCREEN_SIZE[1] - size[1]) / 2
self.screen.blit(font_surface, (x,y))
def run(self):
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit
self.clock.tick(50)
self.screen.fill(BLACK)
self.check_input()
if self.level == 1:
self.BRICK_COLOUR = GOLD
elif self.level == 2:
self.BRICK_COLOUR = SKY_BLUE
elif self.level == 3:
self.BRICK_COLOUR = ORANGE
elif self.level == 4:
self.BRICK_COLOUR = RED
else:
self.BRICK_COLOUR = GREEN
if self.state == STATE_PLAYING:
self.move_ball()
self.handle_collisions()
elif self.state == STATE_BALL_IN_PADDLE:
self.ball.left = self.paddle.left + self.paddle.width / 2
self.ball.top = self.paddle.top - self.ball.height
self.show_message("PRESS SPACE TO LAUNCH THE BALL")
elif self.state == STATE_GAME_OVER:
self.show_message("GAME OVER. PRESS ENTER TO PLAY AGAIN")
elif self.state == STATE_WON:
self.show_message("YOU WON! PRESS ENTER TO PLAY AGAIN")
elif self.state == STATE_NEXT_LEVEL:
self.show_message("YOU WON THIS LEVEL! PRESS TO CONTINUE")
self.draw_bricks()
# Draw paddle
pygame.draw.rect(self.screen, BLUE, self.paddle)
# Draw ball
pygame.draw.circle(self.screen, WHITE, (int(self.ball.left + BALL_RADIUS),int(self.ball.top + BALL_RADIUS)),int(BALL_RADIUS))
self.show_stats()
pygame.display.flip()
try:
if __name__ == "__main__":
Bricka().run()
except:
print("The game has quit successfully! Thanks for playing our game!")
|
|
#!/usr/bin/python
# -*- coding: utf-8; -*-
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Eduardo S. Scarpellini
# @author: Luiz Ozaki
__copyright__ = "Copyright 2012, Locaweb IDC"
import re
import os
import sys
import pwd
from multiprocessing import Process
from bottle import ServerAdapter, debug, run, route, get, put, delete, error, request, response, abort
from netl2api.server.http_cache import cached, invalidate_cache
from netl2api.server.http_utils import reply_json, validate_input, context
from netl2api.server.workers import switch_cfg_persistence
from netl2api.server.workers.switch_cfg_persistence_utils import defer_save_switch_cfg
from netl2api.lib.utils import get_switch_instance
from netl2api.lib.config import get_netl2server_cfg, setup_netl2server_logger, get_devices_cfg
cfg = get_netl2server_cfg()
logger = setup_netl2server_logger(cfg)
netl2debug = cfg.get("logger", "level").lower() == "debug"
RE_TYPE_VLAN_TAGGED = re.compile(r"^(?:True|False)$", re.IGNORECASE)
def log_request_ahead(msg=None, msg_args=None):
""" use @log_request_ahead between @authorize and @cached """
def proxy(f):
def log(*args, **kwargs):
if msg is not None:
lmsg = msg
lmsg_args = msg_args
if msg_args is not None:
lmsg = lmsg % tuple([kwargs.get(a) for a in lmsg_args])
logger.info("%s -- context: %s" % (lmsg, request["context"]))
return f(*args, **kwargs)
return log
return proxy
# Force Exception if using devices.cfg and permissions are wrong
dev_cfg = get_devices_cfg()
@get("/devices")
@context
@log_request_ahead("Listing available devices")
@reply_json
def devices_list():
# MUST return ONLY switch names -- for CLI completion purpose
#logger.info("Listing available devices -- context: %s" % request["context"])
return get_devices_cfg().keys()
@get("/info/<device>")
@context
@log_request_ahead("Showing generic information for device %s", ("device",))
@reply_json
@cached(ttl=86400)
def device_info(device=None):
#logger.info("Showing generic information for device %s -- context: %s" %\
# (device, request["context"]))
swinfo = {}
swinst = get_switch_instance(device)
swinfo["hostname"] = swinst.show_hostname()
swinfo["version"] = swinst.show_version()
swinfo["l2api"] = { "device.mgmt-api": "%s.%s" % (swinst.__class__.__module__,
swinst.__class__.__name__),
"device.mgmt-host": swinst.transport.host,
"device.vendor": swinst.__VENDOR__,
"device.hwtype": swinst.__HWTYPE__ }
return swinfo
@get("/version/<device>")
@context
@log_request_ahead("Showing version information from device %s", ("device",))
@reply_json
@cached(ttl=86400)
def show_version(device=None):
#logger.info("Showing version information from device '%s' -- context: %s" %\
# (device, request["context"]))
swinst = get_switch_instance(device)
defer_save_switch_cfg(device)
return swinst.show_version()
@get("/system/<device>")
@context
@log_request_ahead("Showing system information from device '%s'", ("device",))
@reply_json
@cached(ttl=86400)
def show_system(device=None):
#logger.info("Showing system information from device '%s' -- context: %s" %\
# (device, request["context"]))
swinst = get_switch_instance(device)
return swinst.show_system()
RE_ROUTE_INTERFACE_ACTIONS = re.compile(r"^(.+)/((?:at|de)tach_vlan|change_description|(?:dis|en)able)$")
@route(["/interfaces/<device>", "/interfaces/<device>/<remaining_path:path>"], ["get", "put"])
@context
def interfaces_route_actions(device=None, remaining_path=None):
if request.method.lower() == "get":
return show_interfaces(device=device, interface_id=remaining_path)
if request.method.lower() == "put":
m = RE_ROUTE_INTERFACE_ACTIONS.search(remaining_path)
if m is None:
abort(404, "Not Found")
route_act = m.group(2)
interface_id=m.group(1).lower()
if route_act == "attach_vlan":
return interface_attach_vlan(device=device, interface_id=interface_id)
if route_act == "detach_vlan":
return interface_detach_vlan(device=device, interface_id=interface_id)
if route_act == "change_description":
return change_interface_description(device=device, interface_id=interface_id)
if route_act == "enable":
return enable_interface(device=device, interface_id=interface_id)
if route_act == "disable":
return disable_interface(device=device, interface_id=interface_id)
abort(404, "Not Found")
abort(405, "Method Not Allowed")
@log_request_ahead("Showing interfaces informations from device '%s'", ("device",))
@reply_json
@cached(ttl=3600)
def show_interfaces(device=None, interface_id=None):
#logger.info("Showing interfaces informations from device '%s' -- context: %s" %\
# (device, request["context"]))
swinst = get_switch_instance(device)
return swinst.show_interfaces(interface_id=interface_id)
@reply_json
@validate_input(src="forms", vlan_id=int, tagged=RE_TYPE_VLAN_TAGGED)
def interface_attach_vlan(device=None, interface_id=None):
logger.info("Attaching VLAN to the interface '%s' in device '%s' -- context: %s" %\
(interface_id, device, request["context"]))
vlan_id = request.forms.get("vlan_id")
tagged = request.forms.get("tagged", "").lower() == "true"
swinst = get_switch_instance(device)
swinst.interface_attach_vlan(interface_id=interface_id, vlan_id=vlan_id, tagged=tagged)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
@validate_input(src="forms", vlan_id=int, tagged=RE_TYPE_VLAN_TAGGED)
def interface_detach_vlan(device=None, interface_id=None):
logger.info("Detaching VLAN from the interface '%s' in device '%s' -- context: %s" %\
(device, interface_id, request["context"]))
vlan_id = request.forms.get("vlan_id")
tagged = request.forms.get("tagged", "").lower() == "true"
swinst = get_switch_instance(device)
swinst.interface_detach_vlan(interface_id=interface_id, vlan_id=vlan_id, tagged=tagged)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
@validate_input(src="forms", interface_description=str)
def change_interface_description(device=None, interface_id=None):
logger.info("Changing interface '%s' description in device '%s' -- context: %s" %\
(interface_id, device, request["context"]))
interface_description = request.forms.get("interface_description")
swinst = get_switch_instance(device)
swinst.change_interface_description(interface_id=interface_id,
interface_description=interface_description)
defer_save_switch_cfg(device)
invalidate_cache("/interfaces/%s" % device)
@reply_json
def enable_interface(device=None, interface_id=None):
logger.info("Enabling interface '%s' in device '%s' -- context: %s" %\
(interface_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.enable_interface(interface_id=interface_id)
defer_save_switch_cfg(device)
invalidate_cache("/interfaces/%s" % device)
@reply_json
def disable_interface(device=None, interface_id=None):
logger.info("Disabling interface '%s' in device '%s' -- context: %s" %\
(interface_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.disable_interface(interface_id=interface_id)
defer_save_switch_cfg(device)
invalidate_cache("/interfaces/%s" % device)
@put("/vlans/<device>/<vlan_id>")
@context
@reply_json
def create_vlan(device=None, vlan_id=None):
logger.info("Creating new VLAN with id '%s' in device '%s' -- context: %s" %\
(vlan_id, device, request["context"]))
vlan_description = request.forms.get("vlan_description")
swinst = get_switch_instance(device)
swinst.create_vlan(vlan_id=vlan_id, vlan_description=vlan_description)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
response.status = 201
@put("/vlans/<device>/<vlan_id>/change_description")
@context
@reply_json
@validate_input(src="forms", vlan_description=str)
def change_vlan_description(device=None, vlan_id=None):
logger.info("Changing VLAN '%s' description in device '%s' -- context: %s" %\
(vlan_id, device, request["context"]))
vlan_description = request.forms.get("vlan_description")
swinst = get_switch_instance(device)
swinst.change_vlan_description(vlan_id=vlan_id,
vlan_description=vlan_description)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
@delete("/vlans/<device>/<vlan_id>")
@context
@reply_json
def destroy_vlan(device=None, vlan_id=None):
logger.info("Removing VLAN '%s' from device '%s' -- context: %s" %\
(vlan_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.destroy_vlan(vlan_id=vlan_id)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
response.status = 204
@get(["/vlans/<device>", "/vlans/<device>/<vlan_id>"])
@context
@log_request_ahead("Showing VLAN information from device %s", ("device",))
@reply_json
@cached(ttl=3600)
def show_vlans(device=None, vlan_id=None):
#logger.info("Showing VLAN information from device '%s' -- context: %s" %\
# (device, request["context"]))
swinst = get_switch_instance(device)
return swinst.show_vlans(vlan_id=vlan_id)
@put("/vlans/<device>/<vlan_id>/enable")
@context
@reply_json
def enable_vlan(device=None, vlan_id=None):
logger.info("Enabling VLAN '%s' in device '%s' -- context: %s" %\
(vlan_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.enable_vlan(vlan_id=vlan_id)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
@put("/vlans/<device>/<vlan_id>/disable")
@context
@reply_json
def disable_vlan(device=None, vlan_id=None):
logger.info("Disabling VLAN '%s' in device '%s' -- context: %s" %\
(vlan_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.disable_vlan(vlan_id=vlan_id)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
@put("/lags/<device>/<lag_id>")
@context
@reply_json
def create_lag(device=None, lag_id=None):
logger.info("Creating new LAG with id '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
lag_description = request.forms.get("lag_description")
swinst = get_switch_instance(device)
swinst.create_lag(lag_id=lag_id, lag_description=lag_description)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
response.status = 201
@put("/lags/<device>/<lag_id>/change_description")
@context
@reply_json
@validate_input(src="forms", lag_description=str)
def change_lag_description(device=None, lag_id=None):
logger.info("Changing LAG '%s' description in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
lag_description = request.forms.get("lag_description")
swinst = get_switch_instance(device)
swinst.change_lag_description(lag_id=lag_id,
lag_description=lag_description)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
@delete("/lags/<device>/<lag_id>")
@context
@reply_json
def destroy_lag(device=None, lag_id=None):
logger.info("Removing LAG '%s' from device '%s' -- context: %s" %\
(lag_id, device, context))
swinst = get_switch_instance(device)
swinst.destroy_lag(lag_id=lag_id)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
response.status = 204
@get(["/lags/<device>", "/lags/<device>/<lag_id>"])
@context
@log_request_ahead("Showing LAG information from device %s", ("device",))
@reply_json
@cached(ttl=3600)
def show_lags(device=None, lag_id=None):
#logger.info("Showing LAG information from device '%s' -- context: %s" %\
# (device, request["context"]))
swinst = get_switch_instance(device)
return swinst.show_lags(lag_id=lag_id)
@put("/lags/<device>/<lag_id>/enable")
@context
@reply_json
def enable_lag(device=None, lag_id=None):
logger.info("Enabling LAG '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.enable_lag(lag_id=lag_id)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
@put("/lags/<device>/<lag_id>/disable")
@context
@reply_json
def disable_lag(device=None, lag_id=None):
logger.info("Disabling LAG '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
swinst = get_switch_instance(device)
swinst.disable_lag(lag_id=lag_id)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
@put("/lags/<device>/<lag_id>/attach_interface")
@context
@validate_input(src="forms", interface_id=str)
@reply_json
def lag_attach_interface(device=None, lag_id=None):
logger.info("Attaching a new interface to LAG '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
interface_id = request.forms.get("interface_id")
swinst = get_switch_instance(device)
swinst.lag_attach_interface(lag_id=lag_id, interface_id=interface_id)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
@put("/lags/<device>/<lag_id>/detach_interface")
@context
@validate_input(src="forms", interface_id=str)
@reply_json
def lag_detach_interface(device=None, lag_id=None):
logger.info("Detaching an interface from LAG '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
interface_id = request.forms.get("interface_id")
swinst = get_switch_instance(device)
swinst.lag_detach_interface(lag_id=lag_id, interface_id=interface_id)
defer_save_switch_cfg(device)
invalidate_cache("/lags/%s" % device)
@put("/lags/<device>/<lag_id>/attach_vlan")
@context
@validate_input(src="forms", vlan_id=int, tagged=RE_TYPE_VLAN_TAGGED)
@reply_json
def lag_attach_vlan(device=None, lag_id=None):
logger.info("Attaching a new VLAN to LAG '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
vlan_id = request.forms.get("vlan_id")
tagged = request.forms.get("tagged", "").lower() == "true"
swinst = get_switch_instance(device)
swinst.lag_attach_vlan(lag_id=lag_id, vlan_id=vlan_id, tagged=tagged)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
@put("/lags/<device>/<lag_id>/detach_vlan")
@context
@validate_input(src="forms", vlan_id=int, tagged=RE_TYPE_VLAN_TAGGED)
@reply_json
def lag_detach_vlan(device=None, lag_id=None):
logger.info("Detaching a VLAN from LAG '%s' in device '%s' -- context: %s" %\
(lag_id, device, request["context"]))
vlan_id = request.forms.get("vlan_id")
tagged = request.forms.get("tagged", "").lower() == "true"
swinst = get_switch_instance(device)
swinst.lag_detach_vlan(lag_id=lag_id, vlan_id=vlan_id, tagged=tagged)
defer_save_switch_cfg(device)
invalidate_cache("/vlans/%s" % device)
#@get(["/networkpath/<from_device>", "/networkpath/<from_device>/<to_device>"])
#@context
#@log_request_ahead("Tracing network-path from device '%s' to '%s'", ("from_device", "to_device"))
#@reply_json
#@cached(ttl=86400)
#def trace_network_path(from_device=None, to_device=None):
# #logger.info("Tracing network-path from device '%s' to '%s' -- context: %s" %\
# # (from_device, to_device, request["context"]))
# network_paths = find_network_paths(graph_repr(from_device=from_device),
# from_device=from_device, to_device=to_device)
# #logger.debug("Path from device '%s' to device '%s': %s" % (from_device, to_device, network_paths))
# return network_paths
@error(400)
@reply_json
def error400(err):
return {"server.status": err.status, "server.message": err.output}
@error(403)
@reply_json
def error403(err):
return {"server.status": err.status, "server.message": err.output}
@error(404)
@reply_json
def error404(err):
return {"server.status": err.status, "server.message": err.output}
@error(405)
@reply_json
def error405(err):
return {"server.status": err.status, "server.message": err.output}
@error(500)
@reply_json
def error500(err):
err_type = repr(err.exception).split("(")[0]
err_msg = err.exception.message
err_info = { "server.status": err.status,
"app.error.type": err_type,
"app.error.message": err_msg }
#if isinstance(err.exception, L2Exception):
if str(type(err.exception)).find("netl2api.l2api") > -1:
err_info["server.message"] = "L2API Error"
else:
err_info["server.message"] = "Internal Server Error"
return err_info
class PasteServerAdapter(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
if not self.quiet:
from paste.translogger import TransLogger
handler = TransLogger(handler)
httpserver.serve(handler, host=self.host, port=str(self.port), protocol_version="HTTP/1.1",
daemon_threads=True, socket_timeout=600,
use_threadpool=cfg.get("httpd", "use_threadpool").lower() == "true",
threadpool_workers=cfg.getint("httpd", "threadpool_workers"),
threadpool_options={ "spawn_if_under": cfg.getint("httpd", "threadpool_workers")/2,
"hung_check_period": 60,
"kill_thread_limit": 900 },
**self.options)
def start_workers():
if cfg.get("job.switch_cfg_persistence", "enabled") == "true":
p_switch_cfg_persistence = Process(target=switch_cfg_persistence.daemon,
name="netl2api [netl2server:http-daemon/job/switch-cfg-persistence]")
p_switch_cfg_persistence.start()
else:
logger.info("Persistence-control job is disabled")
def start():
debug(netl2debug)
ps_owner = cfg.get("httpd", "user")
if ps_owner:
os.setuid(pwd.getpwnam(ps_owner)[2])
try:
from setproctitle import setproctitle
except ImportError:
pass
else:
setproctitle("netl2api [netl2server:http-daemon]")
logger.info("Starting netl2server...")
start_workers()
run(server=PasteServerAdapter, host=cfg.get("httpd", "host"), port=cfg.getint("httpd", "port"))
def main(action="foreground"):
from supay import Daemon
daemon = Daemon(name="netl2server", catch_all_log=cfg.get("httpd", "logfile"))
if action == "start":
daemon.start()
start()
elif action == "foreground":
start()
elif action == "stop":
daemon.stop()
elif action == "status":
daemon.status()
else:
cli_help()
def cli_help():
print "Usage: %s <start|stop|status|foreground>" % sys.argv[0]
sys.exit(1)
def cli():
if len(sys.argv) < 2:
cli_help()
main(action=sys.argv[1])
if __name__ == '__main__':
cli()
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import click
from .type import QIIME2Type
# Sentinel to avoid the situation where `None` *is* the default value.
NoDefault = {}
class GeneratedOption(click.Option):
def __init__(self, *, prefix, name, repr, ast, multiple, is_bool_flag,
metadata, metavar, default=NoDefault, description=None,
**attrs):
import q2cli.util
if metadata is not None:
prefix = 'm'
if multiple is not None:
multiple = list if multiple == 'list' else set
if is_bool_flag:
yes = q2cli.util.to_cli_name(name)
no = q2cli.util.to_cli_name('no_' + name)
opt = f'--{prefix}-{yes}/--{prefix}-{no}'
elif metadata is not None:
cli_name = q2cli.util.to_cli_name(name)
opt = f'--{prefix}-{cli_name}-file'
if metadata == 'column':
self.q2_extra_dest, self.q2_extra_opts, _ = \
self._parse_decls([f'--{prefix}-{cli_name}-column'], True)
else:
cli_name = q2cli.util.to_cli_name(name)
opt = f'--{prefix}-{cli_name}'
click_type = QIIME2Type(ast, repr, is_output=prefix == 'o')
attrs['metavar'] = metavar
attrs['multiple'] = multiple is not None
attrs['param_decls'] = [opt]
attrs['required'] = default is NoDefault
attrs['help'] = self._add_default(description, default)
if default is not NoDefault:
attrs['default'] = default
# This is to evade clicks __DEBUG__ check
if not is_bool_flag:
attrs['type'] = click_type
else:
attrs['type'] = None
super().__init__(**attrs)
# put things back the way they _should_ be after evading __DEBUG__
self.is_bool_flag = is_bool_flag
self.type = click_type
# attrs we will use elsewhere
self.q2_multiple = multiple
self.q2_prefix = prefix
self.q2_name = name
self.q2_ast = ast
self.q2_metadata = metadata
@property
def meta_help(self):
if self.q2_metadata == 'file':
return 'multiple arguments will be merged'
def _add_default(self, desc, default):
if desc is not None:
desc += ' '
else:
desc = ''
if default is not NoDefault:
if default is None:
desc += '[optional]'
else:
desc += '[default: %r]' % (default,)
return desc
def consume_value(self, ctx, opts):
if self.q2_metadata == 'column':
return self._consume_metadata(ctx, opts)
else:
return super().consume_value(ctx, opts)
def _consume_metadata(self, ctx, opts):
# double consume
md_file = super().consume_value(ctx, opts)
# consume uses self.name, so mutate but backup for after
backup, self.name = self.name, self.q2_extra_dest
md_col = super().consume_value(ctx, opts)
self.name = backup
if (md_col is None) != (md_file is None):
# missing one or the other
if md_file is None:
raise click.MissingParameter(ctx=ctx, param=self)
else:
raise click.MissingParameter(param_hint=self.q2_extra_opts,
ctx=ctx, param=self)
if md_col is None and md_file is None:
return None
else:
return (md_file, md_col)
def get_help_record(self, ctx):
record = super().get_help_record(ctx)
if self.is_bool_flag:
metavar = self.make_metavar()
if metavar:
record = (record[0] + ' ' + self.make_metavar(), record[1])
elif self.q2_metadata == 'column':
opts = (record[0], self.q2_extra_opts[0] + ' COLUMN ')
record = (opts, record[1])
return record
# Override
def add_to_parser(self, parser, ctx):
shared = dict(dest=self.name, nargs=0, obj=self)
if self.q2_metadata == 'column':
parser.add_option(self.opts, action='store', dest=self.name,
nargs=1, obj=self)
parser.add_option(self.q2_extra_opts, action='store',
dest=self.q2_extra_dest, nargs=1, obj=self)
elif self.is_bool_flag:
if self.multiple:
action = 'append_maybe'
else:
action = 'store_maybe'
parser.add_option(self.opts, action=action, const=True,
**shared)
parser.add_option(self.secondary_opts, action=action,
const=False, **shared)
elif self.multiple:
action = 'append_greedy'
parser.add_option(self.opts, action='append_greedy', **shared)
else:
super().add_to_parser(parser, ctx)
def get_default(self, ctx):
if self.required:
raise click.MissingParameter(ctx=ctx, param=self)
return super().get_default(ctx)
def full_process_value(self, ctx, value):
try:
return super().full_process_value(ctx, value)
except click.MissingParameter:
if not (self.q2_prefix == 'o'
and ctx.params.get('output_dir', False)):
raise
def type_cast_value(self, ctx, value):
import sys
import q2cli.util
import qiime2.sdk.util
if self.multiple:
if value == () or value is None:
return None
elif self.q2_prefix == 'i':
value = super().type_cast_value(ctx, value)
if self.q2_multiple is set:
self._check_length(value, ctx)
value = self.q2_multiple(value)
type_expr = qiime2.sdk.util.type_from_ast(self.q2_ast)
args = ', '.join(map(repr, (x.type for x in value)))
if value not in type_expr:
raise click.BadParameter(
'received <%s> as an argument, which is incompatible'
' with parameter type: %r' % (args, type_expr),
ctx=ctx, param=self)
return value
elif self.q2_metadata == 'file':
value = super().type_cast_value(ctx, value)
if len(value) == 1:
return value[0]
else:
try:
return value[0].merge(*value[1:])
except Exception as e:
header = ("There was an issue with merging "
"QIIME 2 Metadata:")
tb = 'stderr' if '--verbose' in sys.argv else None
q2cli.util.exit_with_error(
e, header=header, traceback=tb)
elif self.q2_prefix == 'p':
try:
if self.q2_multiple is set:
self._check_length(value, ctx)
value = qiime2.sdk.util.parse_primitive(self.q2_ast, value)
except ValueError:
args = ', '.join(map(repr, value))
expr = qiime2.sdk.util.type_from_ast(self.q2_ast)
raise click.BadParameter(
'received <%s> as an argument, which is incompatible'
' with parameter type: %r' % (args, expr),
ctx=ctx, param=self)
return value
return super().type_cast_value(ctx, value)
def _check_length(self, value, ctx):
import collections
counter = collections.Counter(value)
dups = ', '.join(map(repr, (v for v, n in counter.items() if n > 1)))
args = ', '.join(map(repr, value))
if dups:
raise click.BadParameter(
'received <%s> as an argument, which contains duplicates'
' of the following: <%s>' % (args, dups), ctx=ctx, param=self)
|
|
from django.db import models
from django.contrib.auth.models import User
import random
import string
import plistlib
from xml.parsers.expat import ExpatError
import base64
import bz2
from datetime import datetime
def GenerateKey():
key = ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(128))
try:
MachineGroup.objects.get(key=key)
return GenerateKey()
except MachineGroup.DoesNotExist:
return key;
def GenerateAPIKey():
key = ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(24))
try:
ApiKey.objects.get(public_key=key)
return GenerateAPIKey()
except ApiKey.DoesNotExist:
return key;
class UserProfile(models.Model):
user = models.OneToOneField(User, unique=True)
LEVEL_CHOICES = (
('SO', 'Stats Only'),
('RO', 'Read Only'),
('RW', 'Read Write'),
('GA', 'Global Admin'),
)
level = models.CharField(max_length=2, choices=LEVEL_CHOICES, default='SO')
def __unicode__(self):
return self.user.username
User.userprofile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
class BusinessUnit(models.Model):
name = models.CharField(max_length=100)
users = models.ManyToManyField(User, blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
class MachineGroup(models.Model):
business_unit = models.ForeignKey(BusinessUnit)
name = models.CharField(max_length=100)
key = models.CharField(max_length=255, unique=True, blank=True, null=True, editable=False)
def save(self):
if not self.id:
self.key = GenerateKey()
super(MachineGroup, self).save()
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
class Machine(models.Model):
OS_CHOICES = (
('Darwin', 'OS X'),
('Windows', 'Windows'),
('Linux', 'Linux'),
)
machine_group = models.ForeignKey(MachineGroup)
serial = models.CharField(max_length=100, unique=True)
hostname = models.CharField(max_length=256, null=True, blank=True)
operating_system = models.CharField(max_length=256, null=True, blank=True)
memory = models.CharField(max_length=256, null=True, blank=True)
memory_kb = models.IntegerField(default=0)
munki_version = models.CharField(max_length=256, null=True, blank=True)
manifest = models.CharField(max_length=256, null=True, blank=True)
hd_space = models.CharField(max_length=256, null=True, blank=True)
hd_total = models.CharField(max_length=256, null=True, blank=True)
hd_percent = models.CharField(max_length=256, null=True, blank=True)
console_user = models.CharField(max_length=256, null=True, blank=True)
machine_model = models.CharField(max_length=256, null=True, blank=True)
cpu_type = models.CharField(max_length=256, null=True, blank=True)
cpu_speed = models.CharField(max_length=256, null=True, blank=True)
os_family = models.CharField(max_length=256, choices=OS_CHOICES, verbose_name="OS Family", default="Darwin")
last_checkin = models.DateTimeField(blank=True,null=True)
first_checkin = models.DateTimeField(blank=True,null=True, auto_now_add=True)
report = models.TextField(editable=True, null=True)
errors = models.IntegerField(default=0)
warnings = models.IntegerField(default=0)
activity = models.TextField(editable=False, null=True, blank=True)
puppet_version = models.TextField(null=True, blank=True)
sal_version = models.TextField(null=True, blank=True)
last_puppet_run = models.DateTimeField(blank=True,null=True)
puppet_errors = models.IntegerField(default=0)
def encode(self, plist):
string = plistlib.writePlistToString(plist)
bz2data = bz2.compress(string)
b64data = base64.b64encode(bz2data)
return b64data
def decode(self, data):
# this has some sucky workarounds for odd handling
# of UTF-8 data in sqlite3
try:
plist = plistlib.readPlistFromString(data)
return plist
except:
try:
plist = plistlib.readPlistFromString(data.encode('UTF-8'))
return plist
except:
try:
return self.b64bz_decode(data)
except:
return dict()
def b64bz_decode(self, data):
try:
bz2data = base64.b64decode(data)
string = bz2.decompress(bz2data)
plist = plistlib.readPlistFromString(string)
return plist
except Exception:
return {}
def get_report(self):
return self.decode(self.report)
def get_activity(self):
return self.decode(self.activity)
def update_report(self, base64bz2report):
# Save report.
try:
base64bz2report = base64bz2report.replace(" ", "+")
plist = self.b64bz_decode(base64bz2report)
#self.report = base64bz2report
self.report = plistlib.writePlistToString(plist)
except:
plist = None
self.report = ''
if plist is None:
self.activity = None
self.errors = 0
self.warnings = 0
self.console_user = "<None>"
return
# Check activity.
activity = dict()
for section in ("ItemsToInstall",
"InstallResults",
"ItemsToRemove",
"RemovalResults",
"AppleUpdates"):
if (section in plist) and len(plist[section]):
activity[section] = plist[section]
if activity:
#self.activity = self.encode(activity)
self.activity = plistlib.writePlistToString(activity)
else:
self.activity = None
# Check errors and warnings.
if "Errors" in plist:
self.errors = len(plist["Errors"])
else:
self.errors = 0
if "Warnings" in plist:
self.warnings = len(plist["Warnings"])
else:
self.warnings = 0
# Check console user.
self.console_user = "unknown"
if "ConsoleUser" in plist:
self.console_user = unicode(plist["ConsoleUser"])
def __unicode__(self):
if self.hostname:
return self.hostname
else:
return self.serial
class Meta:
ordering = ['hostname']
def save(self, *args, **kwargs):
self.serial = self.serial.replace('/', '')
self.serial = self.serial.replace('+', '')
super(Machine, self).save()
class Fact(models.Model):
machine = models.ForeignKey(Machine, related_name='facts')
fact_name = models.TextField()
fact_data = models.TextField()
def __unicode__(self):
return '%s: %s' % (self.fact_name, self.fact_data)
class Meta:
ordering = ['fact_name']
class HistoricalFact(models.Model):
machine = models.ForeignKey(Machine, related_name='historical_facts')
fact_name = models.TextField()
fact_data = models.TextField()
fact_recorded = models.DateTimeField(db_index=True)
def __unicode__(self):
return self.fact_name
class Meta:
ordering = ['fact_name', 'fact_recorded']
class Condition(models.Model):
machine = models.ForeignKey(Machine, related_name='conditions')
condition_name = models.TextField()
condition_data = models.TextField()
def __unicode__(self):
return self.condition_name
class Meta:
ordering = ['condition_name']
class OSQueryResult(models.Model):
machine = models.ForeignKey(Machine, related_name='osquery_results')
name = models.CharField(max_length=255)
hostidentifier = models.CharField(max_length=255, null=True, blank=True)
unix_time = models.IntegerField()
def __unicode__(self):
return self.name
class Meta:
ordering = ['unix_time']
class OSQueryColumn(models.Model):
osquery_result = models.ForeignKey(OSQueryResult, related_name='osquery_columns')
column_name = models.TextField()
column_data = models.TextField(null=True, blank=True)
action = models.CharField(max_length=255, null=True, blank=True)
def __unicode__(self):
return self.column_name
class PendingUpdate(models.Model):
machine = models.ForeignKey(Machine, related_name='pending_updates')
update = models.CharField(max_length=255, null=True, blank=True)
update_version = models.CharField(max_length=255, null=True, blank=True)
display_name = models.CharField(max_length=255, null=True, blank=True)
def __unicode__(self):
return self.update
class Meta:
ordering = ['display_name']
unique_together = ("machine", "update")
class PendingAppleUpdate(models.Model):
machine = models.ForeignKey(Machine, related_name='pending_apple_updates')
update = models.CharField(max_length=255, null=True, blank=True)
update_version = models.CharField(max_length=256, null=True, blank=True)
display_name = models.CharField(max_length=256, null=True, blank=True)
def __unicode__(self):
return unicode(self.update) or u''
class Meta:
ordering = ['display_name']
unique_together = ("machine", "update")
class Plugin(models.Model):
PLUGIN_TYPES = (
('facter', 'Facter'),
('munkicondition', 'Munki Condition'),
('osquery', 'osquery'),
('builtin', 'Built In'),
)
name = models.CharField(max_length=255, unique=True)
order = models.IntegerField()
type = models.CharField(max_length=255, choices=PLUGIN_TYPES, default='facter')
def __unicode__(self):
return self.name
class Meta:
ordering = ['order']
class SalSetting(models.Model):
name = models.CharField(max_length=255, unique=True)
value = models.TextField()
def __unicode__(self):
return self.name
class ApiKey(models.Model):
public_key = models.CharField(max_length=255)
private_key = models.CharField(max_length=255)
name = models.CharField(max_length=255)
has_been_seen = models.BooleanField(default=False)
def save(self):
if not self.id:
self.public_key = GenerateAPIKey()
self.private_key = ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(64))
super(ApiKey, self).save()
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
unique_together = ("public_key", "private_key")
|
|
import logging
logger = logging.getLogger(__name__)
import itertools
import struct
import math
from .noise import white_noise
from .noise import white_noise_samples
from .noise import red_noise
import sampler
def crop(gens, seconds=5, cropper=None):
'''
Crop the generator to a finite number of frames
Return a generator which outputs the provided generator limited
to enough samples to produce seconds seconds of audio (default 5s)
at the provided frame rate.
'''
if hasattr(gens, "next"):
# single generator
gens = (gens,)
if cropper == None:
cropper = lambda gen: itertools.islice(gen, 0, seconds * sampler.FRAME_RATE)
cropped = [cropper(gen) for gen in gens]
return cropped[0] if len(cropped) == 1 else cropped
def crop_with_fades(gen, seconds, fade_in=0.01, fade_out=0.01):
source = iter(gen)
total_samples = int(seconds * sampler.FRAME_RATE)
fade_in_samples = int(fade_in * sampler.FRAME_RATE)
fade_out_samples = int(fade_out * sampler.FRAME_RATE)
start = itertools.islice(source, 0, fade_in_samples)
middle = itertools.islice(source, 0, total_samples - (fade_in_samples + fade_out_samples))
end = itertools.islice(source, 0, fade_out_samples)
def linear_fade(samples, direction="in"):
for i in xrange(samples):
if direction == "in":
yield ( 1.0 / samples ) * i + 0
elif direction == "out":
yield ( -1.0 / samples ) * i + 1
else:
raise Exception('Fade direction must be "in" or "out"')
while True:
yield 0
for sample in multiply(start, linear_fade(fade_in_samples, direction="in")):
yield sample
for sample in middle:
yield sample
for sample in multiply(end, linear_fade(fade_out_samples, direction="out")):
yield sample
def crop_with_fade_out(gen, seconds, fade=.01):
source = iter(gen)
total_samples = int(seconds * sampler.FRAME_RATE)
fade_samples = int(fade * sampler.FRAME_RATE)
start = itertools.islice(source, 0, total_samples - fade_samples)
end = itertools.islice(source, 0, fade_samples)
def fader():
for i in xrange(fade_samples):
yield 1 - (float(i) / fade_samples) ** 1
while True:
yield 0
for sample in start:
yield sample
for sample in multiply(end, fader()):
yield sample
def crop_at_zero_crossing(gen, seconds=5, error=0.1):
'''
Crop the generator, ending at a zero-crossing
Crop the generator to produce approximately seconds seconds
(default 5s) of audio at the provided FRAME_RATE, attempting
to end the clip at a zero crossing point to avoid clicking.
'''
source = iter(gen)
buffer_length = int(2 * error * sampler.FRAME_RATE)
# split the source into two iterators:
# - start, which contains the bulk of the sound clip
# - and end, which contains the final 100ms, plus 100ms past
# the desired clip length. We may cut the clip anywhere
# within this +/-100ms end buffer.
start = itertools.islice(source, 0, int((seconds - error) * sampler.FRAME_RATE))
end = itertools.islice(source, 0, buffer_length)
for sample in start:
yield sample
# pull end buffer generator into memory so we can work with it
end = list(end)
# find min by sorting buffer samples, first by abs of sample, then by distance from optimal
best = sorted(enumerate(end), key=lambda x: (math.fabs(x[1]),abs((buffer_length/2)-x[0])))
print best[:10]
print best[0][0]
# todo: better logic when we don't have a perfect zero crossing
#if best[0][1] != 0:
# # we don't have a perfect zero crossing, so let's look for best fit?
# pass
# crop samples at index of best zero crossing
for sample in end[:best[0][0] + 1]:
yield sample
def normalize(generator, min_in=0, max_in=256, min_out=-1, max_out=1):
scale = float(max_out - min_out) / (max_in - min_in)
return ((sample - min_in) * scale + min_out for sample in generator)
def hard_clip(generator, min=-1, max=1):
while True:
sample = generator.next()
if sample > max:
logger.warn("Warning, clipped value %f > max %f" % (sample, max))
yield max
elif sample < min:
logger.warn("Warning, clipped value %f < min %f" % (sample, min))
yield min
else:
yield sample
def vector_reduce(op, generators):
while True:
yield reduce(op, [g.next() for g in generators])
def vector_reduce1(op, generators):
while True:
yield reduce(op, [g.next() for g in generators])
def sum(*generators):
return vector_reduce(lambda a,b: a + b, generators)
def multiply(*generators):
return vector_reduce1(lambda a,b: a * b, generators)
def constant(value):
while True:
yield value
# filters
def volume(gen, dB=0):
'''Change the volume of gen by dB decibles'''
if not hasattr(dB, 'next'):
# not a generator
scale = 10 ** (dB / 20.)
else:
def scale_gen():
while True:
yield 10 ** (next(dB) / 20.)
scale = scale_gen()
return envelope(gen, scale)
def clip(gen, limit):
if not hasattr(limit, 'next'):
limit = constant(limit)
while True:
sample = gen.next()
current_limit = limit.next()
if math.fabs(sample) > current_limit:
yield current_limit * (math.fabs(sample) / sample if sample != 0 else 0)
else:
yield sample
def envelope(gen, volume):
if not hasattr(volume, 'next'):
volume = constant(volume)
while True:
sample = gen.next()
current_volume = volume.next()
yield current_volume * sample
def loop(*gens):
loops = [list(gen) for gen in gens]
while True:
for loop in loops:
for sample in loop:
yield sample
def mixer(inputs, mix=None):
'''
Mix `inputs` together based on `mix` tuple
`inputs` should be a tuple of *n* generators.
`mix` should be a tuple of *m* tuples, one per desired
output channel. Each of the *m* tuples should contain
*n* generators, corresponding to the time-sequence of
the desired mix levels for each of the *n* input channels.
That is, to make an ouput channel contain a 50/50 mix of the
two input channels, the tuple would be:
(constant(0.5), constant(0.5))
The mix generators need not be constant, allowing for time-varying
mix levels:
# 50% from input 1, pulse input 2 over a two second cycle
(constant(0.5), tone(0.5))
The mixer will return a list of *m* generators, each containing
the data from the inputs mixed as specified.
If no `mix` tuple is specified, all of the *n* input channels
will be mixed together into one generator, with the volume of
each reduced *n*-fold.
Example:
# three in, two out;
# 10Hz binaural beat with white noise across both channels
mixer(
(white_noise(), tone(440), tone(450)),
(
(constant(.5), constant(1), constant(0)),
(constant(.5), constant(0), constant(1)),
)
)
'''
if mix == None:
# by default, mix all inputs down to one channel
mix = ([constant(1.0 / len(inputs))] * len(inputs),)
duped_inputs = zip(*[itertools.tee(i, len(mix)) for i in inputs])
# second zip is backwards
return [\
sum(*[multiply(m,i) for m,i in zip(channel_mix, channel_inputs)])\
for channel_mix, channel_inputs in zip(mix, duped_inputs) \
]
def channelize(gen, channels):
'''
Break multi-channel generator into one sub-generator per channel
Takes a generator producing n-tuples of samples and returns n generators,
each producing samples for a single channel.
Since multi-channel generators are the only reasonable way to synchronize samples
across channels, and the sampler functions only take tuples of generators,
you must use this function to process synchronized streams for output.
'''
def pick(g, channel):
for samples in g:
yield samples[channel]
return [pick(gen_copy, channel) for channel, gen_copy in enumerate(itertools.tee(gen, channels))]
def play(filename):
import subprocess
subprocess.call(["afplay", filename])
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Search module that uses Google App Engine's full text search."""
__author__ = 'Ellis Michael (emichael@google.com)'
import collections
import gettext
import logging
import math
import mimetypes
import os
import time
import traceback
import jinja2
import resources
import webapp2
import appengine_config
from common import crypto
from common import safe_dom
from controllers import sites
from controllers import utils
from models import config
from models import counters
from models import courses
from models import custom_modules
from models import jobs
from models import transforms
from modules.dashboard import dashboard
from google.appengine.api import namespace_manager
from google.appengine.api import search
from google.appengine.ext import db
MODULE_NAME = 'Full Text Search'
CAN_INDEX_ALL_COURSES_IN_CRON = config.ConfigProperty(
'gcb_can_index_automatically', bool, safe_dom.Text(
'Whether the search module can automatically index the course daily '
'using a cron job. If enabled, this job would index the course '
'incrementally so that only new items or items which have not been '
'recently indexed are indexed.'),
default_value=False, label='Automatically index search')
SEARCH_QUERIES_MADE = counters.PerfCounter(
'gcb-search-queries-made',
'The number of student queries made to the search module.')
SEARCH_RESULTS_RETURNED = counters.PerfCounter(
'gcb-search-results-returned',
'The number of search results returned across all student queries.')
SEARCH_FAILURES = counters.PerfCounter(
'gcb-search-failures',
'The number of search failure messages returned across all student '
'queries.')
INDEX_NAME = 'gcb_search_index_loc_%s'
RESULTS_LIMIT = 10
GCB_SEARCH_FOLDER_NAME = os.path.normpath('/modules/search/')
MAX_RETRIES = 5
# I18N: Message displayed on search results page when error occurs.
SEARCH_ERROR_TEXT = gettext.gettext('Search is currently unavailable.')
class ModuleDisabledException(Exception):
"""Exception thrown when the search module is disabled."""
pass
def get_index(namespace, locale):
assert locale, 'Must have a non-null locale'
return search.Index(name=INDEX_NAME % locale, namespace=namespace)
def index_all_docs(course, incremental):
"""Index all of the docs for a given models.Course object.
Args:
course: models.courses.Course. the course to index.
incremental: boolean. whether or not to index only new or out-of-date
items.
Returns:
A dict with three keys.
'num_indexed_docs' maps to an int, the number of documents added to the
index.
'doc_type' maps to a counter with resource types as keys mapping to the
number of that resource added to the index.
'indexing_time_secs' maps to a float representing the number of seconds
the indexing job took.
Raises:
ModuleDisabledException: The search module is currently disabled.
"""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
start_time = time.time()
index = get_index(
course.app_context.get_namespace_name(),
course.app_context.get_current_locale())
timestamps, doc_types = (_get_index_metadata(index) if incremental
else ({}, {}))
for doc in resources.generate_all_documents(course, timestamps):
retry_count = 0
while retry_count < MAX_RETRIES:
try:
index.put(doc)
timestamps[doc.doc_id] = doc['date'][0].value
doc_types[doc.doc_id] = doc['type'][0].value
break
except search.Error, e:
if e.results[0].code == search.OperationResult.TRANSIENT_ERROR:
retry_count += 1
if retry_count >= MAX_RETRIES:
logging.error(
'Multiple transient errors indexing doc_id: %s',
doc.doc_id)
else:
logging.error('Failed to index doc_id: %s', doc.doc_id)
break
indexed_doc_types = collections.Counter()
for type_name in doc_types.values():
indexed_doc_types[type_name] += 1
return {'num_indexed_docs': len(timestamps),
'doc_types': indexed_doc_types,
'indexing_time_secs': time.time() - start_time}
def clear_index(namespace, locale):
"""Delete all docs in the index for a given models.Course object."""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
index = get_index(namespace, locale)
doc_ids = [document.doc_id for document in index.get_range(ids_only=True)]
total_docs = len(doc_ids)
while doc_ids:
index.delete(doc_ids)
doc_ids = [document.doc_id
for document in index.get_range(ids_only=True)]
return {'deleted_docs': total_docs}
def _get_index_metadata(index):
"""Returns dict from doc_id to timestamp and one from doc_id to doc_type."""
timestamps = []
doc_types = []
cursor = search.Cursor()
while cursor:
options = search.QueryOptions(
limit=1000,
cursor=cursor,
returned_fields=['date', 'type'])
query = search.Query(query_string='', options=options)
current_docs = index.search(query)
cursor = current_docs.cursor
for doc in current_docs:
timestamps.append((doc.doc_id, doc['date'][0].value))
doc_types.append((doc.doc_id, doc['type'][0].value))
return dict(timestamps), dict(doc_types)
def fetch(course, query_string, offset=0, limit=RESULTS_LIMIT):
"""Return an HTML fragment with the results of a search for query_string.
Args:
course: models.courses.Course. the course to search.
query_string: str. the user's specified query.
offset: int. the number of results to skip.
limit: int. the number of results to return.
Returns:
A dict with two keys.
'results' maps to an ordered list of resources.Result objects.
'total_found' maps to the total number of results in the index which
match query_string.
Raises:
ModuleDisabledException: The search module is currently disabled.
"""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
index = get_index(
course.app_context.get_namespace_name(),
course.app_context.get_current_locale())
try:
# TODO(emichael): Don't compute these for every query
returned_fields = resources.get_returned_fields()
snippeted_fields = resources.get_snippeted_fields()
options = search.QueryOptions(
limit=limit,
offset=offset,
returned_fields=returned_fields,
number_found_accuracy=100,
snippeted_fields=snippeted_fields)
query = search.Query(query_string=query_string, options=options)
results = index.search(query)
except search.Error:
logging.info('Failed searching for: %s', query_string)
return {'results': None, 'total_found': 0}
processed_results = resources.process_results(results)
return {'results': processed_results, 'total_found': results.number_found}
class SearchHandler(utils.BaseHandler):
"""Handler for generating the search results page."""
def get(self):
"""Process GET request."""
# TODO(emichael): move timing to Javascript
if not custom_module.enabled:
self.error(404)
return
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
try:
start = time.time()
# TODO(emichael): Don't use get because it can't handle utf-8
query = self.request.get('query')
offset = self.request.get('offset')
self.template_value['navbar'] = {}
if query:
try:
offset = int(offset)
except (ValueError, TypeError):
offset = 0
self.template_value['query'] = query
SEARCH_QUERIES_MADE.inc()
response = fetch(self.get_course(), query, offset=offset)
response = self.filter(response, student)
self.template_value['time'] = '%.2f' % (time.time() - start)
self.template_value['search_results'] = response['results']
total_found = response['total_found']
if offset + RESULTS_LIMIT < total_found:
self.template_value['next_link'] = (
'search?query=%s&offset=%d' %
(query, offset + RESULTS_LIMIT))
if offset - RESULTS_LIMIT >= 0:
self.template_value['previous_link'] = (
'search?query=%s&offset=%d' %
(query, offset - RESULTS_LIMIT))
self.template_value['page_number'] = offset / RESULTS_LIMIT + 1
self.template_value['total_pages'] = int(math.ceil(
float(total_found) / RESULTS_LIMIT))
if response['results']:
SEARCH_RESULTS_RETURNED.inc(len(response['results']))
# TODO(emichael): Remove this check when the unicode issue is fixed in
# dev_appserver.
except UnicodeEncodeError as e:
SEARCH_FAILURES.inc()
if not appengine_config.PRODUCTION_MODE:
# This message will only be displayed to the course author in
# dev, so it does not need to be I18N'd
self.template_value['search_error'] = (
'There is a known issue in App Engine\'s SDK '
'(code.google.com/p/googleappengine/issues/detail?id=9335) '
'which causes an error when generating search snippets '
'which contain non-ASCII characters. This error does not '
'occur in the production environment, so you can safely '
'run your course with unicode characters on appspot.com.')
logging.error('[Unicode/Dev server issue] Error rendering the '
'search page: %s.', e)
else:
self.template_value['search_error'] = SEARCH_ERROR_TEXT
logging.error('Error rendering the search page: %s. %s',
e, traceback.format_exc())
except Exception as e: # pylint: disable=broad-except
SEARCH_FAILURES.inc()
self.template_value['search_error'] = SEARCH_ERROR_TEXT
logging.error('Error rendering the search page: %s. %s',
e, traceback.format_exc())
finally:
path = sites.abspath(self.app_context.get_home_folder(),
GCB_SEARCH_FOLDER_NAME)
template = self.get_template('search.html', additional_dirs=[path])
self.template_value['navbar'] = {}
self.response.out.write(template.render(self.template_value))
def filter(self, response, student):
if not response['results']:
return response
filtered_results = []
available_unit_ids = set(
str(unit.unit_id) for unit in
self.get_course().get_track_matching_student(student))
for result in response['results']:
if not result.unit_id or str(result.unit_id) in available_unit_ids:
filtered_results.append(result)
return {
'results': filtered_results,
'total_found': len(filtered_results)
}
class AssetsHandler(webapp2.RequestHandler):
"""Content handler for assets associated with search."""
def get(self):
"""Respond to HTTP GET methods."""
if not custom_module.enabled:
self.error(404)
return
path = self.request.path
if path.startswith('/'):
path = path[1:]
path = os.path.normpath(path)
if os.path.basename(os.path.dirname(path)) != 'assets':
self.error(404)
return
resource_file = os.path.join(appengine_config.BUNDLE_ROOT, path)
mimetype = mimetypes.guess_type(resource_file)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
try:
sites.set_static_resource_cache_control(self)
self.response.status = 200
stream = open(resource_file)
content = stream.read()
self.response.headers['Content-Type'] = mimetype
self.response.write(content)
except IOError:
self.error(404)
def _get_search(handler):
"""Renders course indexing view."""
template_values = {'page_title': handler.format_title('Search')}
mc_template_value = {}
mc_template_value['module_enabled'] = custom_module.enabled
indexing_job = IndexCourse(handler.app_context).load()
clearing_job = ClearIndex(handler.app_context).load()
if indexing_job and (not clearing_job or
indexing_job.updated_on > clearing_job.updated_on):
if indexing_job.status_code in [jobs.STATUS_CODE_STARTED,
jobs.STATUS_CODE_QUEUED]:
mc_template_value['status_message'] = 'Indexing in progress.'
mc_template_value['job_in_progress'] = True
elif indexing_job.status_code == jobs.STATUS_CODE_COMPLETED:
mc_template_value['indexed'] = True
mc_template_value['last_updated'] = (
indexing_job.updated_on.strftime(
utils.HUMAN_READABLE_DATETIME_FORMAT))
mc_template_value['index_info'] = transforms.loads(
indexing_job.output)
elif indexing_job.status_code == jobs.STATUS_CODE_FAILED:
mc_template_value['status_message'] = (
'Indexing job failed with error: %s' % indexing_job.output)
elif clearing_job:
if clearing_job.status_code in [jobs.STATUS_CODE_STARTED,
jobs.STATUS_CODE_QUEUED]:
mc_template_value['status_message'] = 'Clearing in progress.'
mc_template_value['job_in_progress'] = True
elif clearing_job.status_code == jobs.STATUS_CODE_COMPLETED:
mc_template_value['status_message'] = (
'The index has been cleared.')
elif clearing_job.status_code == jobs.STATUS_CODE_FAILED:
mc_template_value['status_message'] = (
'Clearing job failed with error: %s' % clearing_job.output)
else:
mc_template_value['status_message'] = (
'No indexing job has been run yet.')
mc_template_value['index_course_xsrf_token'] = (
crypto.XsrfTokenManager.create_xsrf_token('index_course'))
mc_template_value['clear_index_xsrf_token'] = (
crypto.XsrfTokenManager.create_xsrf_token('clear_index'))
template_values['main_content'] = jinja2.Markup(handler.get_template(
'search_dashboard.html', [os.path.dirname(__file__)]
).render(mc_template_value, autoescape=True))
return template_values
def _post_index_course(handler):
"""Submits a new indexing operation."""
try:
incremental = handler.request.get('incremental') == 'true'
check_jobs_and_submit(IndexCourse(handler.app_context, incremental),
handler.app_context)
except db.TransactionFailedError:
# Double submission from multiple browsers, just pass
pass
handler.redirect('/dashboard?action=settings_search')
def _post_clear_index(handler):
"""Submits a new indexing operation."""
try:
check_jobs_and_submit(ClearIndex(handler.app_context),
handler.app_context)
except db.TransactionFailedError:
# Double submission from multiple browsers, just pass
pass
handler.redirect('/dashboard?action=settings_search')
class CronHandler(utils.BaseHandler):
"""Iterates through all courses and starts an indexing job for each one.
All jobs should be submitted through the transactional check_jobs_and_submit
method to prevent multiple index operations from running at the same time.
If an index job is currently running when this cron job attempts to start
one, this operation will be a noop for that course.
"""
def get(self):
"""Start an index job for each course."""
cron_logger = logging.getLogger('modules.search.cron')
self.response.headers['Content-Type'] = 'text/plain'
if CAN_INDEX_ALL_COURSES_IN_CRON.value:
counter = 0
for context in sites.get_all_courses():
namespace = context.get_namespace_name()
counter += 1
try:
check_jobs_and_submit(IndexCourse(context), context)
except db.TransactionFailedError as e:
cron_logger.info(
'Failed to submit job #%s in namespace %s: %s',
counter, namespace, e)
else:
cron_logger.info(
'Index job #%s submitted for namespace %s.',
counter, namespace)
cron_logger.info('All %s indexing jobs started; cron job complete.',
counter)
else:
cron_logger.info('Automatic indexing disabled. Cron job halting.')
self.response.write('OK\n')
@db.transactional(xg=True)
def check_jobs_and_submit(job, app_context):
"""Determines whether an indexing job is running and submits if not."""
indexing_job = IndexCourse(app_context).load()
clearing_job = ClearIndex(app_context).load()
bad_status_codes = [jobs.STATUS_CODE_STARTED, jobs.STATUS_CODE_QUEUED]
if ((indexing_job and indexing_job.status_code in bad_status_codes) or
(clearing_job and clearing_job.status_code in bad_status_codes)):
raise db.TransactionFailedError('Index job is currently running.')
else:
job.non_transactional_submit()
class IndexCourse(jobs.DurableJob):
"""A job that indexes the course."""
@staticmethod
def get_description():
return 'course index'
def __init__(self, app_context, incremental=True):
super(IndexCourse, self).__init__(app_context)
self.incremental = incremental
def run(self):
"""Index the course."""
namespace = namespace_manager.get_namespace()
logging.info('Running indexing job for namespace %s. Incremental: %s',
namespace_manager.get_namespace(), self.incremental)
app_context = sites.get_app_context_for_namespace(namespace)
# Make a request URL to make sites.get_course_for_current_request work
sites.set_path_info(app_context.slug)
indexing_stats = {
'num_indexed_docs': 0,
'doc_types': collections.Counter(),
'indexing_time_secs': 0,
'locales': []
}
for locale in app_context.get_allowed_locales():
app_context.set_current_locale(locale)
course = courses.Course(None, app_context=app_context)
stats = index_all_docs(course, self.incremental)
indexing_stats['num_indexed_docs'] += stats['num_indexed_docs']
indexing_stats['doc_types'] += stats['doc_types']
indexing_stats['indexing_time_secs'] += stats['indexing_time_secs']
indexing_stats['locales'].append(locale)
return indexing_stats
class ClearIndex(jobs.DurableJob):
"""A job that clears the index for a course."""
@staticmethod
def get_description():
return 'clear course index'
def run(self):
"""Clear the index."""
namespace = namespace_manager.get_namespace()
logging.info('Running clearing job for namespace %s.', namespace)
app_context = sites.get_app_context_for_namespace(namespace)
clear_stats = {
'deleted_docs': 0,
'locales': []
}
for locale in app_context.get_allowed_locales():
stats = clear_index(namespace, locale)
clear_stats['deleted_docs'] += stats['deleted_docs']
clear_stats['locales'].append(locale)
return clear_stats
# Module registration
custom_module = None
def register_module():
"""Registers this module in the registry."""
global_routes = [
('/modules/search/assets/.*', AssetsHandler),
('/cron/search/index_courses', CronHandler)
]
namespaced_routes = [
('/search', SearchHandler)
]
def notify_module_enabled():
dashboard.DashboardHandler.add_sub_nav_mapping(
'publish', 'search', 'Search', action='settings_search',
contents=_get_search, placement=1000)
dashboard.DashboardHandler.add_custom_post_action(
'index_course', _post_index_course)
dashboard.DashboardHandler.add_custom_post_action(
'clear_index', _post_clear_index)
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
MODULE_NAME,
'Provides search capabilities for courses',
global_routes, namespaced_routes,
notify_module_enabled=notify_module_enabled)
return custom_module
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base class for tests that need to update the policies enforced by Chrome.
Subclasses can call SetUserPolicy (ChromeOS, Linux, Windows) and
SetDevicePolicy (ChromeOS only) with a dictionary of the policies to install.
The current implementation depends on the platform. The implementations might
change in the future, but tests relying on the above calls will keep working.
"""
# On ChromeOS, a mock DMServer is started and enterprise enrollment faked
# against it. The mock DMServer then serves user and device policy to Chrome.
#
# For this setup to work, the DNS, GAIA and TPM (if present) are mocked as well:
#
# * The mock DNS resolves all addresses to 127.0.0.1. This allows the mock GAIA
# to handle all login attempts. It also eliminates the impact of flaky network
# connections on tests. Beware though that no cloud services can be accessed
# due to this DNS redirect.
#
# * The mock GAIA permits login with arbitrary credentials and accepts any OAuth
# tokens sent to it for verification as valid.
#
# * When running on a real device, its TPM is disabled. If the TPM were enabled,
# enrollment could not be undone without a reboot. Disabling the TPM makes
# cryptohomed behave as if no TPM was present, allowing enrollment to be
# undone by removing the install attributes.
#
# To disable the TPM, 0 must be written to /sys/class/misc/tpm0/device/enabled.
# Since this file is not writeable, a tpmfs is mounted that shadows the file
# with a writeable copy.
import json
import logging
import os
import subprocess
import pyauto
if pyauto.PyUITest.IsChromeOS():
import sys
import warnings
import pyauto_paths
# Ignore deprecation warnings, they make our output more cluttered.
warnings.filterwarnings('ignore', category=DeprecationWarning)
# Find the path to the pyproto and add it to sys.path.
# Prepend it so that google.protobuf is loaded from here.
for path in pyauto_paths.GetBuildDirs():
p = os.path.join(path, 'pyproto')
if os.path.isdir(p):
sys.path = [p, os.path.join(p, 'chrome', 'browser', 'policy',
'proto')] + sys.path
break
sys.path.append('/usr/local') # to import autotest libs.
import dbus
import device_management_backend_pb2 as dm
import pyauto_utils
import string
import tempfile
import urllib
import urllib2
import uuid
from autotest.cros import auth_server
from autotest.cros import constants
from autotest.cros import cros_ui
from autotest.cros import dns_server
elif pyauto.PyUITest.IsWin():
import _winreg as winreg
elif pyauto.PyUITest.IsMac():
import getpass
import plistlib
# ASN.1 object identifier for PKCS#1/RSA.
PKCS1_RSA_OID = '\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01'
TPM_SYSFS_PATH = '/sys/class/misc/tpm0'
TPM_SYSFS_ENABLED_FILE = os.path.join(TPM_SYSFS_PATH, 'device/enabled')
class PolicyTestBase(pyauto.PyUITest):
"""A base class for tests that need to set up and modify policies.
Subclasses can use the methods SetUserPolicy (ChromeOS, Linux, Windows) and
SetDevicePolicy (ChromeOS only) to set the policies seen by Chrome.
"""
if pyauto.PyUITest.IsChromeOS():
# TODO(bartfab): Extend the C++ wrapper that starts the mock DMServer so
# that an owner can be passed in. Without this, the server will assume that
# the owner is user@example.com and for consistency, so must we.
owner = 'user@example.com'
# Subclasses may override these credentials to fake enrollment into another
# mode or use different device and machine IDs.
mode = 'enterprise'
device_id = string.upper(str(uuid.uuid4()))
machine_id = 'CROSTEST'
_auth_server = None
_dns_server = None
@staticmethod
def _Call(command, check=False):
"""Invokes a subprocess and optionally asserts the return value is zero."""
with open(os.devnull, 'w') as devnull:
if check:
return subprocess.check_call(command.split(' '), stdout=devnull)
else:
return subprocess.call(command.split(' '), stdout=devnull)
def _WriteFile(self, path, content):
"""Writes content to path, creating any intermediary directories."""
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
f = open(path, 'w')
f.write(content)
f.close()
def _GetTestServerPoliciesFilePath(self):
"""Returns the path of the cloud policy configuration file."""
assert self.IsChromeOS()
return os.path.join(self._temp_data_dir, 'device_management')
def _GetHttpURLForDeviceManagement(self):
"""Returns the URL at which the TestServer is serving user policy."""
assert self.IsChromeOS()
return self._http_server.GetURL('device_management').spec()
def _RemoveIfExists(self, filename):
"""Removes a file if it exists."""
if os.path.exists(filename):
os.remove(filename)
def _StartSessionManagerAndChrome(self):
"""Starts the session manager and Chrome.
Requires that the session manager be stopped already.
"""
# Ugly hack: session manager will not spawn Chrome if this file exists. That
# is usually a good thing (to keep the automation channel open), but in this
# case we really want to restart chrome. PyUITest.setUp() will be called
# after session manager and chrome have restarted, and will setup the
# automation channel.
restore_magic_file = False
if os.path.exists(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE):
logging.debug('DISABLE_BROWSER_RESTART_MAGIC_FILE found. '
'Removing temporarily for the next restart.')
restore_magic_file = True
os.remove(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE)
assert not os.path.exists(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE)
logging.debug('Starting session manager again')
cros_ui.start()
# cros_ui.start() waits for the login prompt to be visible, so Chrome has
# already started once it returns.
if restore_magic_file:
open(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE, 'w').close()
assert os.path.exists(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE)
def _WritePolicyOnChromeOS(self):
"""Updates the mock DMServer's input file with current policy."""
assert self.IsChromeOS()
policy_dict = {
'google/chromeos/device': self._device_policy,
'google/chromeos/user': {
'mandatory': self._user_policy,
'recommended': {},
},
'managed_users': ['*'],
}
self._WriteFile(self._GetTestServerPoliciesFilePath(),
json.dumps(policy_dict, sort_keys=True, indent=2) + '\n')
@staticmethod
def _IsCryptohomedReadyOnChromeOS():
"""Checks whether cryptohomed is running and ready to accept DBus calls."""
assert pyauto.PyUITest.IsChromeOS()
try:
bus = dbus.SystemBus()
proxy = bus.get_object('org.chromium.Cryptohome',
'/org/chromium/Cryptohome')
dbus.Interface(proxy, 'org.chromium.CryptohomeInterface')
except dbus.DBusException:
return False
return True
def _ClearInstallAttributesOnChromeOS(self):
"""Resets the install attributes."""
assert self.IsChromeOS()
self._RemoveIfExists('/home/.shadow/install_attributes.pb')
self._Call('restart cryptohomed', check=True)
assert self.WaitUntil(self._IsCryptohomedReadyOnChromeOS)
def _DMPostRequest(self, request_type, request, headers):
"""Posts a request to the mock DMServer."""
assert self.IsChromeOS()
url = self._GetHttpURLForDeviceManagement()
url += '?' + urllib.urlencode({
'deviceid': self.device_id,
'oauth_token': 'dummy_oauth_token_that_is_not_checked_anyway',
'request': request_type,
'devicetype': 2,
'apptype': 'Chrome',
'agent': 'Chrome',
})
response = dm.DeviceManagementResponse()
response.ParseFromString(urllib2.urlopen(urllib2.Request(
url, request.SerializeToString(), headers)).read())
return response
def _DMRegisterDevice(self):
"""Registers with the mock DMServer and returns the DMToken."""
assert self.IsChromeOS()
dm_request = dm.DeviceManagementRequest()
request = dm_request.register_request
request.type = dm.DeviceRegisterRequest.DEVICE
request.machine_id = self.machine_id
dm_response = self._DMPostRequest('register', dm_request, {})
return dm_response.register_response.device_management_token
def _DMFetchPolicy(self, dm_token):
"""Fetches device policy from the mock DMServer."""
assert self.IsChromeOS()
dm_request = dm.DeviceManagementRequest()
policy_request = dm_request.policy_request
request = policy_request.request.add()
request.policy_type = 'google/chromeos/device'
request.signature_type = dm.PolicyFetchRequest.SHA1_RSA
headers = {'Authorization': 'GoogleDMToken token=' + dm_token}
dm_response = self._DMPostRequest('policy', dm_request, headers)
response = dm_response.policy_response.response[0]
assert response.policy_data
assert response.policy_data_signature
assert response.new_public_key
return response
def ExtraChromeFlags(self):
"""Sets up Chrome to use cloud policies on ChromeOS."""
flags = pyauto.PyUITest.ExtraChromeFlags(self)
if self.IsChromeOS():
while '--skip-oauth-login' in flags:
flags.remove('--skip-oauth-login')
url = self._GetHttpURLForDeviceManagement()
flags.append('--device-management-url=' + url)
flags.append('--disable-sync')
return flags
def _SetUpWithSessionManagerStopped(self):
"""Sets up the test environment after stopping the session manager."""
assert self.IsChromeOS()
logging.debug('Stopping session manager')
cros_ui.stop(allow_fail=True)
# Start mock GAIA server.
self._auth_server = auth_server.GoogleAuthServer()
self._auth_server.run()
# Disable TPM if present.
if os.path.exists(TPM_SYSFS_PATH):
self._Call('mount -t tmpfs -o size=1k tmpfs %s'
% os.path.realpath(TPM_SYSFS_PATH), check=True)
self._WriteFile(TPM_SYSFS_ENABLED_FILE, '0')
# Clear install attributes and restart cryptohomed to pick up the change.
self._ClearInstallAttributesOnChromeOS()
# Set install attributes to mock enterprise enrollment.
bus = dbus.SystemBus()
proxy = bus.get_object('org.chromium.Cryptohome',
'/org/chromium/Cryptohome')
install_attributes = {
'enterprise.device_id': self.device_id,
'enterprise.domain': string.split(self.owner, '@')[-1],
'enterprise.mode': self.mode,
'enterprise.owned': 'true',
'enterprise.user': self.owner
}
interface = dbus.Interface(proxy, 'org.chromium.CryptohomeInterface')
for name, value in install_attributes.iteritems():
interface.InstallAttributesSet(name, '%s\0' % value)
interface.InstallAttributesFinalize()
# Start mock DNS server that redirects all traffic to 127.0.0.1.
self._dns_server = dns_server.LocalDns()
self._dns_server.run()
# Start mock DMServer.
source_dir = os.path.normpath(pyauto_paths.GetSourceDir())
self._temp_data_dir = tempfile.mkdtemp(dir=source_dir)
logging.debug('TestServer input path: %s' % self._temp_data_dir)
relative_temp_data_dir = os.path.basename(self._temp_data_dir)
self._http_server = self.StartHTTPServer(relative_temp_data_dir)
# Initialize the policy served.
self._device_policy = {}
self._user_policy = {}
self._WritePolicyOnChromeOS()
# Register with mock DMServer and retrieve initial device policy blob.
dm_token = self._DMRegisterDevice()
policy = self._DMFetchPolicy(dm_token)
# Write the initial device policy blob.
self._WriteFile(constants.OWNER_KEY_FILE, policy.new_public_key)
self._WriteFile(constants.SIGNED_POLICY_FILE, policy.SerializeToString())
# Remove any existing vaults.
self.RemoveAllCryptohomeVaultsOnChromeOS()
# Restart session manager and Chrome.
self._StartSessionManagerAndChrome()
def _tearDownWithSessionManagerStopped(self):
"""Resets the test environment after stopping the session manager."""
assert self.IsChromeOS()
logging.debug('Stopping session manager')
cros_ui.stop(allow_fail=True)
# Stop mock GAIA server.
if self._auth_server:
self._auth_server.stop()
# Reenable TPM if present.
if os.path.exists(TPM_SYSFS_PATH):
self._Call('umount %s' % os.path.realpath(TPM_SYSFS_PATH))
# Clear install attributes and restart cryptohomed to pick up the change.
self._ClearInstallAttributesOnChromeOS()
# Stop mock DNS server.
if self._dns_server:
self._dns_server.stop()
# Stop mock DMServer.
self.StopHTTPServer(self._http_server)
# Clear the policy served.
pyauto_utils.RemovePath(self._temp_data_dir)
# Remove the device policy blob.
self._RemoveIfExists(constants.OWNER_KEY_FILE)
self._RemoveIfExists(constants.SIGNED_POLICY_FILE)
# Remove any existing vaults.
self.RemoveAllCryptohomeVaultsOnChromeOS()
# Restart session manager and Chrome.
self._StartSessionManagerAndChrome()
def setUp(self):
"""Sets up the platform for policy testing.
On ChromeOS, part of the setup involves restarting the session manager to
inject an initial device policy blob.
"""
if self.IsChromeOS():
# Perform the remainder of the setup with the device manager stopped.
try:
self.WaitForSessionManagerRestart(
self._SetUpWithSessionManagerStopped)
except:
# Destroy the non re-entrant services.
if self._auth_server:
self._auth_server.stop()
if self._dns_server:
self._dns_server.stop()
raise
pyauto.PyUITest.setUp(self)
self._branding = self.GetBrowserInfo()['properties']['branding']
def tearDown(self):
"""Cleans up the policies and related files created in tests."""
if self.IsChromeOS():
# Perform the cleanup with the device manager stopped.
self.WaitForSessionManagerRestart(self._tearDownWithSessionManagerStopped)
else:
# On other platforms, there is only user policy to clear.
self.SetUserPolicy(refresh=False)
pyauto.PyUITest.tearDown(self)
def LoginWithTestAccount(self, account='prod_enterprise_test_user'):
"""Convenience method for logging in with one of the test accounts."""
assert self.IsChromeOS()
credentials = self.GetPrivateInfo()[account]
self.Login(credentials['username'], credentials['password'])
assert self.GetLoginInfo()['is_logged_in']
def _GetCurrentLoginScreenId(self):
return self.ExecuteJavascriptInOOBEWebUI(
"""window.domAutomationController.send(
String(cr.ui.Oobe.getInstance().currentScreen.id));
""")
def _WaitForLoginScreenId(self, id):
self.assertTrue(
self.WaitUntil(function=self._GetCurrentLoginScreenId,
expect_retval=id),
msg='Expected login screen "%s" to be visible.' % id)
def _CheckLoginFormLoading(self):
return self.ExecuteJavascriptInOOBEWebUI(
"""window.domAutomationController.send(
cr.ui.Oobe.getInstance().currentScreen.loading);
""")
def PrepareToWaitForLoginFormReload(self):
self.assertEqual('gaia-signin',
self._GetCurrentLoginScreenId(),
msg='Expected the login form to be visible.')
self.assertTrue(
self.WaitUntil(function=self._CheckLoginFormLoading,
expect_retval=False),
msg='Expected the login form to finish loading.')
# Set up a sentinel variable that is false now and will toggle to true when
# the login form starts reloading.
self.ExecuteJavascriptInOOBEWebUI(
"""var screen = cr.ui.Oobe.getInstance().currentScreen;
if (!('reload_started' in screen)) {
screen.orig_loadAuthExtension_ = screen.loadAuthExtension_;
screen.loadAuthExtension_ = function(data) {
this.orig_loadAuthExtension_(data);
if (this.loading)
this.reload_started = true;
}
}
screen.reload_started = false;
window.domAutomationController.send(true);""")
def _CheckLoginFormReloaded(self):
return self.ExecuteJavascriptInOOBEWebUI(
"""window.domAutomationController.send(
cr.ui.Oobe.getInstance().currentScreen.reload_started &&
!cr.ui.Oobe.getInstance().currentScreen.loading);
""")
def WaitForLoginFormReload(self):
self.assertEqual('gaia-signin',
self._GetCurrentLoginScreenId(),
msg='Expected the login form to be visible.')
self.assertTrue(
self.WaitUntil(function=self._CheckLoginFormReloaded),
msg='Expected the login form to finish reloading.')
def _SetUserPolicyChromeOS(self, user_policy=None):
"""Writes the given user policy to the mock DMServer's input file."""
self._user_policy = user_policy or {}
self._WritePolicyOnChromeOS()
def _SetUserPolicyWin(self, user_policy=None):
"""Writes the given user policy to the Windows registry."""
def SetValueEx(key, sub_key, value):
if isinstance(value, int):
winreg.SetValueEx(key, sub_key, 0, winreg.REG_DWORD, int(value))
elif isinstance(value, basestring):
winreg.SetValueEx(key, sub_key, 0, winreg.REG_SZ, value.encode('ascii'))
elif isinstance(value, list):
k = winreg.CreateKey(key, sub_key)
for index, v in list(enumerate(value)):
SetValueEx(k, str(index + 1), v)
winreg.CloseKey(k)
else:
raise TypeError('Unsupported data type: "%s"' % value)
assert self.IsWin()
if self._branding == 'Google Chrome':
reg_base = r'SOFTWARE\Policies\Google\Chrome'
else:
reg_base = r'SOFTWARE\Policies\Chromium'
if subprocess.call(
r'reg query HKEY_LOCAL_MACHINE\%s' % reg_base) == 0:
logging.debug(r'Removing %s' % reg_base)
subprocess.call(r'reg delete HKLM\%s /f' % reg_base)
if user_policy is not None:
root_key = winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE, reg_base)
for k, v in user_policy.iteritems():
SetValueEx(root_key, k, v)
winreg.CloseKey(root_key)
def _SetUserPolicyLinux(self, user_policy=None):
"""Writes the given user policy to the JSON policy file read by Chrome."""
assert self.IsLinux()
sudo_cmd_file = os.path.join(os.path.dirname(__file__),
'policy_posix_util.py')
if self._branding == 'Google Chrome':
policies_location_base = '/etc/opt/chrome'
else:
policies_location_base = '/etc/chromium'
if os.path.exists(policies_location_base):
logging.debug('Removing directory %s' % policies_location_base)
subprocess.call(['suid-python', sudo_cmd_file,
'remove_dir', policies_location_base])
if user_policy is not None:
self._WriteFile('/tmp/chrome.json',
json.dumps(user_policy, sort_keys=True, indent=2) + '\n')
policies_location = '%s/policies/managed' % policies_location_base
subprocess.call(['suid-python', sudo_cmd_file,
'setup_dir', policies_location])
subprocess.call(['suid-python', sudo_cmd_file,
'perm_dir', policies_location])
# Copy chrome.json file to the managed directory
subprocess.call(['suid-python', sudo_cmd_file,
'copy', '/tmp/chrome.json', policies_location])
os.remove('/tmp/chrome.json')
def _SetUserPolicyMac(self, user_policy=None):
"""Writes the given user policy to the plist policy file read by Chrome."""
assert self.IsMac()
sudo_cmd_file = os.path.join(os.path.dirname(__file__),
'policy_posix_util.py')
if self._branding == 'Google Chrome':
policies_file_base = 'com.google.Chrome.plist'
else:
policies_file_base = 'org.chromium.Chromium.plist'
policies_location = os.path.join('/Library', 'Managed Preferences',
getpass.getuser())
if os.path.exists(policies_location):
logging.debug('Removing directory %s' % policies_location)
subprocess.call(['suid-python', sudo_cmd_file,
'remove_dir', policies_location])
if user_policy is not None:
policies_tmp_file = os.path.join('/tmp', policies_file_base)
plistlib.writePlist(user_policy, policies_tmp_file)
subprocess.call(['suid-python', sudo_cmd_file,
'setup_dir', policies_location])
# Copy policy file to the managed directory
subprocess.call(['suid-python', sudo_cmd_file,
'copy', policies_tmp_file, policies_location])
os.remove(policies_tmp_file)
def SetUserPolicy(self, user_policy=None, refresh=True):
"""Sets the user policy provided as a dict.
Args:
user_policy: The user policy to set. None clears it.
refresh: If True, Chrome will refresh and apply the new policy.
Requires Chrome to be alive for it.
"""
if self.IsChromeOS():
self._SetUserPolicyChromeOS(user_policy=user_policy)
elif self.IsWin():
self._SetUserPolicyWin(user_policy=user_policy)
elif self.IsLinux():
self._SetUserPolicyLinux(user_policy=user_policy)
elif self.IsMac():
self._SetUserPolicyMac(user_policy=user_policy)
else:
raise NotImplementedError('Not available on this platform.')
if refresh:
self.RefreshPolicies()
def SetDevicePolicy(self, device_policy=None, refresh=True):
"""Sets the device policy provided as a dict.
Args:
device_policy: The device policy to set. None clears it.
refresh: If True, Chrome will refresh and apply the new policy.
Requires Chrome to be alive for it.
"""
assert self.IsChromeOS()
self._device_policy = device_policy or {}
self._WritePolicyOnChromeOS()
if refresh:
self.RefreshPolicies()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tensor_array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.ops import tensor_array_ops
class TensorArrayCPUTest(tf.test.TestCase):
_use_gpu = False
def testTensorArrayWriteRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
def _testTensorArrayWritePack(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype == tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.pack()
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0.eval())
def testTensorArrayWritePack(self):
self._testTensorArrayWritePack(tf.float32)
self._testTensorArrayWritePack(tf.float64)
self._testTensorArrayWritePack(tf.int32)
self._testTensorArrayWritePack(tf.int64)
self._testTensorArrayWritePack(tf.complex64)
self._testTensorArrayWritePack(tf.string)
def _testTensorArrayWriteConcat(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype == tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.concat()
self.assertAllEqual(
convert([[4.0, 5.0],
[104.0, 105.0],
[204.0, 205.0],
[6.0, 7.0],
[106.0, 107.0],
[8.0, 9.0]]), c0.eval())
def testTensorArrayWriteConcat(self):
self._testTensorArrayWriteConcat(tf.float32)
self._testTensorArrayWriteConcat(tf.float64)
self._testTensorArrayWriteConcat(tf.int32)
self._testTensorArrayWriteConcat(tf.int64)
self._testTensorArrayWriteConcat(tf.complex64)
self._testTensorArrayWriteConcat(tf.string)
def testTensorArrayUnpackWrongMajorSizeFails(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
r"Input value must have first dimension "
r"equal to the array size \(2 vs. 3\)"):
ta.unpack([1.0, 2.0]).flow.eval()
def testTensorArrayPackNotAllValuesAvailableFails(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not read from TensorArray index 1 "
"because it has not yet been written to."):
ta.write(0, [[4.0, 5.0]]).pack().eval()
def _testTensorArrayUnpackRead(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype is tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
# Unpack a vector into scalars
w0 = ta.unpack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
# Unpack a matrix into vectors
w1 = ta.unpack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 1.1]), d0)
self.assertAllEqual(convert([2.0, 2.1]), d1)
self.assertAllEqual(convert([3.0, 3.1]), d2)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackRead(tf.float32)
self._testTensorArrayUnpackRead(tf.float64)
self._testTensorArrayUnpackRead(tf.int32)
self._testTensorArrayUnpackRead(tf.int64)
self._testTensorArrayUnpackRead(tf.complex64)
self._testTensorArrayUnpackRead(tf.string)
def _testTensorArraySplitRead(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype == tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
# Split an empty vector
lengths = tf.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
# Split a vector
lengths = tf.constant([2, 0, 1])
w0 = ta.split(
convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 2.0]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([3.0]), d2)
# Split a matrix
lengths = tf.constant([2, 0, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([[1.0, 101.0], [2.0, 201.0]]), d0)
self.assertAllEqual(convert([]).reshape(0, 2), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
def testTensorArraySplitRead(self):
self._testTensorArraySplitRead(tf.float32)
self._testTensorArraySplitRead(tf.float64)
self._testTensorArraySplitRead(tf.int32)
self._testTensorArraySplitRead(tf.int64)
self._testTensorArraySplitRead(tf.complex64)
self._testTensorArraySplitRead(tf.string)
def testTensorGradArrayWriteRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
g_ta = ta.grad("grad")
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2 = session.run([r0, r1, r2, g_r0, g_r1, g_r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
def testTensorGradArrayDynamicWriteRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run([
r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
def testTensorGradAccessTwiceReceiveSameObject(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
with tf.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
t_g_ta_0, t_g_ta_1, d_r1_0 = session.run(
[g_ta_0.handle, g_ta_1.handle, r1_0])
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
# Test writing the wrong datatype
with self.assertRaisesOpError(
"TensorArray dtype is float but Op is trying to write dtype string"):
ta.write(-1, "wrong_type_scalar").flow.eval()
# Test writing to a negative index
with self.assertRaisesOpError(
"Tried to write to index -1 but array is not "
"resizeable and size is: 3"):
ta.write(-1, 3.0).flow.eval()
# Test reading from too large an index
with self.assertRaisesOpError(
"Tried to write to index 3 but array is not "
"resizeable and size is: 3"):
ta.write(3, 3.0).flow.eval()
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
# Test reading wrong datatype
r0_bad = gen_data_flow_ops._tensor_array_read(
handle=w0.handle, index=0, dtype=tf.int64, flow_in=w0.flow)
with self.assertRaisesOpError(
"TensorArray dtype is float but Op requested dtype int64."):
r0_bad.eval()
# Test reading from a different index than the one we wrote to
r1 = w0.read(1)
with self.assertRaisesOpError(
"Could not read from TensorArray index 1 because "
"it has not yet been written to."):
r1.eval()
# Test reading from a negative index
with self.assertRaisesOpError(
r"Tried to read from index -1 but array size is: 3"):
ta.read(-1).eval()
# Test reading from too large an index
with self.assertRaisesOpError(
"Tried to read from index 3 but array size is: 3"):
ta.read(3).eval()
def testTensorArrayWriteMultipleFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not write to TensorArray index 2 because "
"it has already been written to."):
ta.write(2, 3.0).write(2, 3.0).flow.eval()
def testTensorArrayConcatIncompatibleShapesFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
w1 = ta.write(0, 3.0)
w2 = w1.write(1, 4.0)
w3 = w2.write(2, [3.0])
with self.assertRaisesOpError(
"Concat saw a scalar shape at index 0 but requires at least vectors"):
w3.concat().eval()
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
w1 = ta.write(0, [3.0])
w2 = w1.write(1, [4.0])
w3 = w2.write(2, [[3.0]])
with self.assertRaisesOpError(
r"TensorArray has inconsistent shapes. Index 0 has "
r"\(excepting dimension 0\) shape: \[\] but index 2 has \(excepting "
r"dimension 0\) shape: \[1\]"):
w3.concat().eval()
def testTensorArraySplitIncompatibleShapesFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
r"Expected lengths to be a vector, received shape: \[\]"):
lengths = tf.placeholder(tf.int64)
ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})
with self.assertRaisesOpError(
r"Expected sum of lengths to be equal to values.shape\[0\], "
r"but sum of lengths is 1 and value's shape is: \[3\]"):
ta.split([1.0, 2.0, 3.0], [1]).flow.eval()
with self.assertRaisesOpError(
r"Expected value to be at least a vector, but received shape: \[\]"):
ta.split(1.0, [1]).flow.eval()
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2)
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(2 vs. 1\), and the TensorArray is not marked as "
r"dynamically resizeable"):
ta.split([1.0], [1]).flow.eval()
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3)
ta_grad = ta.grad("grad")
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), w2_grad.read(2).eval())
# Assert that if multiple_writes_aggregate is not enabled,
# multiple writes raise an exception.
with self.assertRaisesOpError(
r"TensorArray foo: Could not write to TensorArray index 2 because "
r"it has already been written to."):
w1.flow.eval()
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
with self.assertRaisesOpError(
r"Could not aggregate to TensorArray index 1 because the "
r"existing shape is \[\] but the new input shape is \[1\]"):
wb1_grad.flow.eval()
def testTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64, tf.complex64]:
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
def testMultiTensorArray(self):
with self.test_session(use_gpu=self._use_gpu):
h1 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
r = r1 + r2
self.assertAllClose(9.0, r.eval())
def testDuplicateTensorArrayFails(self):
with self.test_session(use_gpu=self._use_gpu) as session:
h1 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="foo")
c1 = h1.write(0, 4.0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="foo")
c2 = h2.write(0, 5.0)
with self.assertRaises(errors.AlreadyExistsError):
session.run([c1.flow, c2.flow])
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.as_dtype(dtype), tensor_array_name="foo", size=3)
c = lambda x: np.array(x, dtype=dtype)
value_0 = tf.constant(c([[4.0, 5.0]]))
value_1 = tf.constant(c(3.0))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = tf.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_just_r0_vals = session.run(grad_just_r0)
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
grad_r0_r0_2 = tf.gradients(
ys=[r0, r0_2], xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_r0_r0_2_vals = session.run(grad_r0_r0_2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
grad_just_r1 = tf.gradients(
ys=[r1], xs=[value_1], grad_ys=[c(-2.0)])
grad_just_r1_vals = session.run(grad_just_r1)
self.assertAllEqual(c(-2.0), grad_just_r1_vals[0])
# Test combined gradients
grad = tf.gradients(
ys=[r0, r0_2, r1], xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]]), c(-2.0)])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c(-2.0), grad_vals[1])
def testTensorArrayGradientWriteRead(self):
for dtype in (np.float32, np.float64, np.int32, np.int64, np.complex64):
self._testTensorArrayGradientWriteReadType(dtype)
def testTensorArrayGradientWritePackConcatAndRead(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2,
clear_after_read=False)
value_0 = tf.constant([-1.0, 1.0])
value_1 = tf.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.pack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat()
with tf.control_dependencies([p0, r0, s0]):
grad_r = tf.gradients(
ys=[p0, r0, s0], xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # pack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0]]) # concat gradient
grad_vals = sess.run(grad_r) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
def testTensorArrayReadTwice(self):
with self.test_session(use_gpu=self._use_gpu):
value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readonce = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2)
w_readonce = ta_readonce.unpack(value)
r0_readonce = w_readonce.read(0)
with tf.control_dependencies([r0_readonce]):
r1_readonce = w_readonce.read(0)
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"):
r1_readonce.eval()
ta_readtwice = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unpack(value)
r0_readtwice = w_readtwice.read(0)
with tf.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], r1_readtwice.eval())
def testTensorArrayGradientUnpackRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2,
clear_after_read=False)
value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unpack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = tf.gradients(
ys=[r0, r0_1, r1], xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientSplitConcat(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2)
value = tf.constant([[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w = ta.split(value, [2, 1])
r = w.concat()
# Test combined gradients
grad = tf.gradients(
ys=[r], xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual(
[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]], grad_vals[0])
def testTensorArrayGradientDynamicUnpackRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True)
value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unpack(value)
r0 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = tf.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
def testCloseTensorArray(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
c1 = ta.close()
session.run(c1)
def testSizeTensorArray(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, s.eval())
def testWriteCloseTensorArray(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [3.0])
w1.close().run() # Expected to run without problems
with self.assertRaisesOpError(
r"TensorArray foo has already been closed."):
with tf.control_dependencies([w1.close()]):
w1.write(2, 3.0).flow.eval()
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
np_dtype = dtype.as_numpy_dtype
with self.test_session(use_gpu=self._use_gpu) as session:
v0 = tf.identity(np.arange(3*5, dtype=np_dtype).reshape(3, 5))
var = tf.Variable(np.arange(100, 105, dtype=np_dtype))
state0 = tf.identity(np.array([1] * 5, dtype=np_dtype))
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo",
size=0 if dynamic_size else 3, dynamic_size=dynamic_size)
time_0 = tf.identity(0)
def body(time, ta_t, state):
sliced = tf.slice(v0, begin=tf.pack([time, 0]), size=[1, -1])
sliced = tf.squeeze(sliced)
out = sliced + var + state
state += sliced
ta_t = ta_t.write(time, out)
return (time+1, ta_t, state)
(unused_0, h_final, unused_2) = tf.while_loop(
cond=lambda time, unused_1, unused_2: time < 3,
body=body,
loop_vars=(time_0, ta, state0),
parallel_iterations=3)
vout = h_final.pack()
grad_val = -np.arange(3*5, dtype=np_dtype).reshape(3, 5)
v0_grad = tf.gradients([vout], [v0], [grad_val])[0]
state0_grad = tf.gradients([vout], [state0], [grad_val])[0]
var_grad = tf.gradients([vout], [var], [grad_val])[0]
tf.initialize_all_variables().run()
state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
session.run([state0, var, v0, vout, v0_grad, var_grad, state0_grad]))
just_v0_grad_t, = session.run([v0_grad])
# state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# vout = [ v0[0] + var + state[0] |
# v0[1] + var + state[1] |
# v0[2] + var + state[2] ]
# = [ v0[0] + var + state0 |
# v0[1] + var + state0 + v0[0] |
# v0[2] + var + state0 + v0[0] + v0[1] ]
#
# d(vout[0])/d(v0) = [1 | 0 | 0 ]
# d(vout[1])/d(v0) = [1 | 1 | 0 ]
# d(vout[2])/d(v0) = [1 | 1 | 1 ]
# d(vout)/d(var) = [1 | 1 | 1]
# d(vout)/d(state0) = [ 1 | 1 | 1 ]
state_per_time = np.array([
state0_t,
state0_t + v0_t[0, :],
state0_t + v0_t[0, :] + v0_t[1, :]])
# Compare forward prop
self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# Compare backward prop
expected_v0_grad_t = np.array([
grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
grad_val[1, :] + grad_val[2, :],
grad_val[2, :]])
self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
def testWhileLoopWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=False, dtype=tf.float32)
# TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=tf.int64)
def testWhileLoopDynamicWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=True, dtype=tf.float32)
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.test_session(use_gpu=self._use_gpu) as session:
a = tf.identity(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1)
b = tf.identity(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1 + 3*5)
ta = tensor_array_ops.TensorArray(dtype=tf.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (ta.read(0, name="read_a_0") + # a + b
ta.read(1, name="read_b_0"))
g0 = -(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1)
grad_a = tf.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = tf.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = session.run([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly
joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def _grad_source_for_name(self, name):
return tensor_array_grad._GetGradSource(tf.constant(0, name=name))
def testGetGradSource_Invalid(self):
with self.assertRaises(ValueError):
self._grad_source_for_name("")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo/bar")
def testGetGradSource_NoEnclosingScope(self):
self.assertEqual("gradients:0", self._grad_source_for_name("gradients"))
self.assertEqual("gradients_0:0", self._grad_source_for_name("gradients_0"))
self.assertEqual("gradients", self._grad_source_for_name("gradients/foo"))
self.assertEqual(
"gradients_0", self._grad_source_for_name("gradients_0/foo"))
self.assertEqual(
"gradients", self._grad_source_for_name("gradients/foo/bar"))
self.assertEqual(
"gradients_0", self._grad_source_for_name("gradients_0/foo/bar"))
def testGetGradSource_EnclosingScope(self):
self.assertEqual(
"foo/gradients:0", self._grad_source_for_name("foo/gradients"))
self.assertEqual(
"foo/gradients_0:0", self._grad_source_for_name("foo/gradients_0"))
self.assertEqual(
"foo/gradients", self._grad_source_for_name("foo/gradients/bar"))
self.assertEqual(
"foo/gradients_0", self._grad_source_for_name("foo/gradients_0/bar"))
self.assertEqual(
"foo/bar/gradients",
self._grad_source_for_name("foo/bar/gradients/baz"))
self.assertEqual(
"foo/bar/gradients_0",
self._grad_source_for_name("foo/bar/gradients_0/baz"))
def testGetGradSource_NestedUsesInnermost(self):
self.assertEqual(
"foo/gradients/bar/gradients_0",
self._grad_source_for_name("foo/gradients/bar/gradients_0/baz"))
def testWriteShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=True)
c0 = tf.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
c1 = tf.constant([6.0, 7.0])
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
c2 = tf.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w0.write(0, c2)
def testUnpackShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo",
size=0, dynamic_size=True, infer_shape=True)
value = tf.constant([[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w0 = ta.unpack(value)
r0 = w0.read(0)
self.assertAllEqual((2,), r0.get_shape())
c1 = tf.constant([4.0, 5.0])
w1 = w0.write(3, c1)
r1 = w1.read(0)
self.assertAllEqual(c1.get_shape(), r1.get_shape())
c2 = tf.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w1.write(4, c2)
def testSplitShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo",
size=0, dynamic_size=True, infer_shape=True)
value = tf.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
w0 = ta.split(value, [1, 1, 1])
r0 = w0.read(0)
self.assertAllEqual((1, 2), r0.get_shape())
ta1 = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo1",
size=0, dynamic_size=True, infer_shape=True)
w0 = ta1.split(value, [1, 2])
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def testWriteUnknownShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=True)
c0 = tf.placeholder(tf.float32)
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def testGradientWhenNotAllComponentsRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(dtype=tf.float32, size=2)
x = tf.constant([2.0, 3.0])
w = ta.unpack(x)
r0 = w.read(0)
# calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0 = tf.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = session.run(grad_r0)[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
class TensorArrayGPUTest(TensorArrayCPUTest):
_use_gpu = True
if __name__ == "__main__":
tf.test.main()
|
|
# Univesity of Chicago,
# Feb 2, 2008
# Eric Olson
# adapted from home version written by Eric.
from OpenGL.GL import *
from flapp.pmath.vec3 import *
class ScreenClearer:
def __init__(self, color=None, clearDepth=True):
if color == None:
self.color = (.8, .8, .8, 0.0)
else:
if len(color) < 4:
self.color = (color[0], color[1], color[2], 0.0)
else:
self.color = color
self.clearDepth = clearDepth
def draw(self, renderer):
glClearColor(self.color[0], self.color[1], self.color[2], self.color[3])
if self.clearDepth:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
else:
glClear(GL_COLOR_BUFFER_BIT)
class pointList:
P_POINT = 0
# P_CROSS = 1
# P_CIRCLE = 2
def __init__(self):
self.visible = True
self.points = [ (0.5, 0.5) ]
self.color = (0.4, 0.4, 0.9)
self.diameter = 5
self.drawType = P_POINT
def update(self, app, secs):
pass
def draw(self, renderer):
glColor3fv(self.color)
if self.drawType == P_POINT:
glPointSize(self.diameter)
glBegin(GL_POINTS)
for p in self.points:
glVertex2f(p[0] * renderer.width, p[1] * renderer.height)
glEnd()
else:
raise Exception("Unimplemented")
def DrawAxis(length, lineWidth=1):
glPushAttrib(GL_ENABLE_BIT)
glLineWidth(lineWidth)
glDisable(GL_LIGHTING)
glDisable(GL_TEXTURE_2D)
glColor3f(1.0, 0.0, 0.0)
glBegin(GL_LINES)
glVertex3f(0.0, 0.0, 0.0)
glVertex3f(length, 0.0, 0.0)
glEnd()
glColor3f(0.0, 1.0, 0.0)
glBegin(GL_LINES)
glVertex3f(0.0, 0.0, 0.0)
glVertex3f(0.0, length, 0.0)
glEnd()
glColor3f(0.0, 0.0, 1.0)
glBegin(GL_LINES)
glVertex3f(0.0, 0.0, 0.0)
glVertex3f(0.0, 0.0, length)
glEnd()
glPopAttrib()
def DrawTxtrd2dSquareIn2DFromCorner(pt, width, height, texXMinMax=(0.0, 1.0), texYMinMax=(0.0, 1.0), vflipTexture=False):
# print "tex coords:", texXMinMax, texYMinMax
if not vflipTexture:
glBegin(GL_TRIANGLE_STRIP);
glTexCoord2f(texXMinMax[0], texYMinMax[1]);
glVertex2f(pt.x, pt.y+height);
glTexCoord2f(texXMinMax[0], texYMinMax[0]);
glVertex2f(pt.x, pt.y);
glTexCoord2f(texXMinMax[1], texYMinMax[1]);
glVertex2f(pt.x+width, pt.y+height);
glTexCoord2f(texXMinMax[1], texYMinMax[0]);
glVertex2f(pt.x+width, pt.y);
glEnd();
else: # assumes the tex coord is between 0.0 and 1.0
glBegin(GL_TRIANGLE_STRIP);
glTexCoord2f(texXMinMax[0], 1.0 - texYMinMax[1]);
glVertex2f(pt.x, pt.y+height);
glTexCoord2f(texXMinMax[0], 1.0 - texYMinMax[0]);
glVertex2f(pt.x, pt.y);
glTexCoord2f(texXMinMax[1], 1.0 - texYMinMax[1]);
glVertex2f(pt.x+width, pt.y+height);
glTexCoord2f(texXMinMax[1], 1.0 - texYMinMax[0]);
glVertex2f(pt.x+width, pt.y);
glEnd();
def Draw2dSquareIn2D(pt, width, height):
glBegin(GL_TRIANGLE_STRIP);
#a = (pt + size * edge_vec_a);
#glVertex2fv( a.getDataPtr() );
glVertex2f( pt.x, pt.y + height)
#b = (pt + size * edge_vec_b);
#glVertex2fv( b.getDataPtr() );
glVertex2f( pt.x, pt.y)
#c = (pt - size * edge_vec_b);
#glVertex2fv( c.getDataPtr() );
glVertex2f( pt.x+width, pt.y+height)
#d = (pt - size * edge_vec_a);
#glVertex2fv( d.getDataPtr() );
glVertex2f( pt.x+width, pt.y)
glEnd();
def OrthoSetupPush(screenWidth, screenHeight):
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluOrtho2D(0, screenWidth, 0, screenHeight)
glMatrixMode(GL_MODELVIEW)
def OrthoSetupPop():
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
def BlitOne(destPos, destSize):
glBegin(GL_TRIANGLE_STRIP)
glVertex2f(destPos.x, destPos.y + destSize[1])
glVertex2f(destPos.x, destPos.y)
glVertex2f(destPos.x + destSize[0], destPos.y + destSize[1])
glVertex2f(destPos.x + destSize[0], destPos.y)
glEnd()
def BlitColor(destPos, destSize, (screenWidth, screenHeight), color=(0.5, 0.5, 0.5), blend=False, orthoSetup=True):
glPushAttrib(GL_ENABLE_BIT) # texture, blend
glDisable(GL_TEXTURE_2D)
if orthoSetup:
OrthoSetupPush(screenWidth, screenHeight)
if len(color) == 3:
glColor3fv(color)
elif len(color) == 4:
glColor4fv(color)
BlitOne(destPos, destSize)
if orthoSetup:
OrthoSetupPop()
glPopAttrib()
def DrawTxtrd3dSquareIn3DFromCorner(pt, width, height, texXMinMax=(0.0, 1.0), texYMinMax=(0.0, 1.0)):
# print "tex coords:", texXMinMax, texYMinMax
glBegin(GL_TRIANGLE_STRIP);
glTexCoord2f(texXMinMax[0], texYMinMax[1]);
glVertex3f(pt.x, pt.y+height, 0);
glTexCoord2f(texXMinMax[0], texYMinMax[0]);
glVertex3f(pt.x, pt.y, 0);
glTexCoord2f(texXMinMax[1], texYMinMax[1]);
glVertex3f(pt.x+width, pt.y+height, 0);
glTexCoord2f(texXMinMax[1], texYMinMax[0]);
glVertex3f(pt.x+width, pt.y, 0);
print (pt.x, pt.y+height, 0), (pt.x, pt.y, 0), (pt.x+width, pt.y+height, 0), (pt.x+width, pt.y, 0)
glEnd();
def DrawTxtrd3dSquareIn3DFromCenter(pt, width, height, texXMinMax=(0.0, 1.0), texYMinMax=(0.0, 1.0)):
# print "tex coords:", texXMinMax, texYMinMax
glBegin(GL_TRIANGLE_STRIP);
glTexCoord2f(texXMinMax[0], texYMinMax[1]);
glVertex3f(pt.x-width/2., pt.y+height/2., 0);
glTexCoord2f(texXMinMax[0], texYMinMax[0]);
glVertex3f(pt.x-width/2., pt.y-height/2., 0);
glTexCoord2f(texXMinMax[1], texYMinMax[1]);
glVertex3f(pt.x+width/2., pt.y+height/2., 0);
glTexCoord2f(texXMinMax[1], texYMinMax[0]);
glVertex3f(pt.x+width/2., pt.y-height/2., 0);
#print (pt.x, pt.y+height, 0), (pt.x, pt.y, 0), (pt.x+width, pt.y+height, 0), (pt.x+width, pt.y, 0)
#print "pt:", pt
#print (pt.x-width /2., pt.y+height/2., 0), (pt.x-width/2., pt.y-width/2., 0), (pt.x+width/2., pt.y+height/2., 0), (pt.x+width/2., pt.y-height/2., 0)
glEnd();
def DrawBillboard3D(pt, width, height, normVec, upVec, texXMinMax=(0.0, 1.0), texYMinMax=(0.0, 1.0) ):
sideVec = scaleV3(normV3(crossV3(upVec, normVec)), width/2.0)
upVecB = scaleV3(normV3(upVec), height/2.0)
#print "pt:", pt
#print "norm vec:", normVec
#print "side vec:", sideVec
#print "up vec:", upVec
pt1 = addV3(pt, addV3(negV3(sideVec), upVecB))
pt2 = addV3(pt, subV3(negV3(sideVec), upVecB))
pt3 = addV3(pt, addV3(sideVec, upVecB))
pt4 = addV3(pt, subV3(sideVec, upVecB))
#print pt1, pt2, pt3, pt4
glBegin(GL_TRIANGLE_STRIP);
glTexCoord2f(texXMinMax[0], texYMinMax[1]);
glVertex3f(pt1.x, pt1.y, pt1.z)
glTexCoord2f(texXMinMax[0], texYMinMax[0]);
glVertex3f(pt2.x, pt2.y, pt2.z)
glTexCoord2f(texXMinMax[1], texYMinMax[1]);
glVertex3f(pt3.x, pt3.y, pt3.z)
glTexCoord2f(texXMinMax[1], texYMinMax[0]);
glVertex3f(pt4.x, pt4.y, pt4.z)
glEnd();
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2014 NetApp, Inc.
# Copyright 2014 Mirantis, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using queue
"""
import ddt
import mock
from oslo_config import cfg
from oslo_service import wsgi
from manila import context
from manila import db
from manila import exception
from manila import manager
from manila import service
from manila import test
from manila import utils
test_service_opts = [
cfg.StrOpt("fake_manager",
default="manila.tests.test_service.FakeManager",
help="Manager for testing"),
cfg.StrOpt("test_service_listen",
help="Host to bind test service to"),
cfg.IntOpt("test_service_listen_port",
default=0,
help="Port number to bind test service to"),
]
CONF = cfg.CONF
CONF.register_opts(test_service_opts)
class FakeManager(manager.Manager):
"""Fake manager for tests."""
RPC_API_VERSION = "1.0"
def __init__(self, host=None, db_driver=None, service_name=None):
super(FakeManager, self).__init__(host=host, db_driver=db_driver)
def test_method(self):
return 'manager'
class ExtendedService(service.Service):
def test_method(self):
return 'service'
class ServiceManagerTestCase(test.TestCase):
"""Test cases for Services."""
def test_message_gets_to_manager(self):
serv = service.Service('test', 'test', 'test', CONF.fake_manager)
serv.start()
self.assertEqual('manager', serv.test_method())
def test_override_manager_method(self):
serv = ExtendedService('test', 'test', 'test', CONF.fake_manager)
serv.start()
self.assertEqual('service', serv.test_method())
class ServiceFlagsTestCase(test.TestCase):
def test_service_enabled_on_create_based_on_flag(self):
self.flags(enable_new_services=True)
host = 'foo'
binary = 'manila-fake'
app = service.Service.create(host=host, binary=binary)
app.start()
app.stop()
ref = db.service_get(context.get_admin_context(), app.service_id)
db.service_destroy(context.get_admin_context(), app.service_id)
self.assertFalse(ref['disabled'])
def test_service_disabled_on_create_based_on_flag(self):
self.flags(enable_new_services=False)
host = 'foo'
binary = 'manila-fake'
app = service.Service.create(host=host, binary=binary)
app.start()
app.stop()
ref = db.service_get(context.get_admin_context(), app.service_id)
db.service_destroy(context.get_admin_context(), app.service_id)
self.assertTrue(ref['disabled'])
def fake_service_get_by_args(*args, **kwargs):
raise exception.NotFound()
def fake_service_get(*args, **kwargs):
raise Exception()
host = 'foo'
binary = 'bar'
topic = 'test'
service_create = {
'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova',
}
service_ref = {
'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': {'name': 'nova'},
'id': 1,
}
@ddt.ddt
class ServiceTestCase(test.TestCase):
"""Test cases for Services."""
def test_create(self):
app = service.Service.create(host='foo',
binary='manila-fake',
topic='fake')
self.assertTrue(app)
@ddt.data(True, False)
def test_periodic_tasks(self, raise_on_error):
serv = service.Service(host, binary, topic, CONF.fake_manager)
self.mock_object(
context,
'get_admin_context',
mock.Mock(side_effect=context.get_admin_context))
self.mock_object(serv.manager, 'periodic_tasks')
serv.periodic_tasks(raise_on_error=raise_on_error)
context.get_admin_context.assert_called_once_with()
serv.manager.periodic_tasks.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
raise_on_error=raise_on_error)
@mock.patch.object(service.db, 'service_get_by_args',
mock.Mock(side_effect=fake_service_get_by_args))
@mock.patch.object(service.db, 'service_create',
mock.Mock(return_value=service_ref))
@mock.patch.object(service.db, 'service_get',
mock.Mock(side_effect=fake_service_get))
def test_report_state_newly_disconnected(self):
serv = service.Service(host, binary, topic, CONF.fake_manager)
serv.start()
serv.report_state()
self.assertTrue(serv.model_disconnected)
service.db.service_get_by_args.assert_called_once_with(
mock.ANY, host, binary)
service.db.service_create.assert_called_once_with(
mock.ANY, service_create)
service.db.service_get.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch.object(service.db, 'service_get_by_args',
mock.Mock(side_effect=fake_service_get_by_args))
@mock.patch.object(service.db, 'service_create',
mock.Mock(return_value=service_ref))
@mock.patch.object(service.db, 'service_get',
mock.Mock(return_value=service_ref))
@mock.patch.object(service.db, 'service_update',
mock.Mock(return_value=service_ref.
update({'report_count': 1})))
def test_report_state_newly_connected(self):
serv = service.Service(host, binary, topic, CONF.fake_manager)
serv.start()
serv.model_disconnected = True
serv.report_state()
self.assertFalse(serv.model_disconnected)
service.db.service_get_by_args.assert_called_once_with(
mock.ANY, host, binary)
service.db.service_create.assert_called_once_with(
mock.ANY, service_create)
service.db.service_get.assert_called_once_with(
mock.ANY, service_ref['id'])
service.db.service_update.assert_called_once_with(
mock.ANY, service_ref['id'], mock.ANY)
class TestWSGIService(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.mock_object(wsgi.Loader, 'load_app')
self.test_service = service.WSGIService("test_service")
def test_service_random_port(self):
self.assertEqual(0, self.test_service.port)
self.test_service.start()
self.assertNotEqual(0, self.test_service.port)
self.test_service.stop()
wsgi.Loader.load_app.assert_called_once_with("test_service")
def test_reset_pool_size_to_default(self):
self.test_service.start()
# Stopping the service, which in turn sets pool size to 0
self.test_service.stop()
self.assertEqual(0, self.test_service.server._pool.size)
# Resetting pool size to default
self.test_service.reset()
self.test_service.start()
self.assertGreater(self.test_service.server._pool.size, 0)
wsgi.Loader.load_app.assert_called_once_with("test_service")
@mock.patch('oslo_service.wsgi.Server')
@mock.patch('oslo_service.wsgi.Loader')
def test_ssl_enabled(self, mock_loader, mock_server):
self.override_config('osapi_share_use_ssl', True)
service.WSGIService("osapi_share")
mock_server.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
port=mock.ANY, host=mock.ANY,
use_ssl=True)
self.assertTrue(mock_loader.called)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_log import log
from pycadf import cadftaxonomy as taxonomy
from six.moves.urllib import parse
from keystone import auth
from keystone.auth import plugins as auth_plugins
from keystone.common import dependency
from keystone.contrib.federation import constants as federation_constants
from keystone.contrib.federation import utils
from keystone import exception
from keystone.i18n import _
from keystone.models import token_model
from keystone import notifications
LOG = log.getLogger(__name__)
METHOD_NAME = 'mapped'
@dependency.requires('assignment_api', 'federation_api', 'identity_api',
'token_provider_api')
class Mapped(auth.AuthMethodHandler):
def _get_token_ref(self, auth_payload):
token_id = auth_payload['id']
response = self.token_provider_api.validate_token(token_id)
return token_model.KeystoneToken(token_id=token_id,
token_data=response)
def authenticate(self, context, auth_payload, auth_context):
"""Authenticate mapped user and return an authentication context.
:param context: keystone's request context
:param auth_payload: the content of the authentication for a
given method
:param auth_context: user authentication context, a dictionary
shared by all plugins.
In addition to ``user_id`` in ``auth_context``, this plugin sets
``group_ids``, ``OS-FEDERATION:identity_provider`` and
``OS-FEDERATION:protocol``
"""
if 'id' in auth_payload:
token_ref = self._get_token_ref(auth_payload)
handle_scoped_token(context, auth_payload, auth_context, token_ref,
self.federation_api,
self.identity_api,
self.token_provider_api)
else:
handle_unscoped_token(context, auth_payload, auth_context,
self.assignment_api, self.federation_api,
self.identity_api)
def handle_scoped_token(context, auth_payload, auth_context, token_ref,
federation_api, identity_api, token_provider_api):
utils.validate_expiration(token_ref)
token_audit_id = token_ref.audit_id
identity_provider = token_ref.federation_idp_id
protocol = token_ref.federation_protocol_id
user_id = token_ref.user_id
group_ids = token_ref.federation_group_ids
send_notification = functools.partial(
notifications.send_saml_audit_notification, 'authenticate',
context, user_id, group_ids, identity_provider, protocol,
token_audit_id)
utils.assert_enabled_identity_provider(federation_api, identity_provider)
try:
mapping = federation_api.get_mapping_from_idp_and_protocol(
identity_provider, protocol)
utils.validate_groups(group_ids, mapping['id'], identity_api)
except Exception:
# NOTE(topol): Diaper defense to catch any exception, so we can
# send off failed authentication notification, raise the exception
# after sending the notification
send_notification(taxonomy.OUTCOME_FAILURE)
raise
else:
send_notification(taxonomy.OUTCOME_SUCCESS)
auth_context['user_id'] = user_id
auth_context['group_ids'] = group_ids
auth_context[federation_constants.IDENTITY_PROVIDER] = identity_provider
auth_context[federation_constants.PROTOCOL] = protocol
def handle_unscoped_token(context, auth_payload, auth_context,
assignment_api, federation_api, identity_api):
def is_ephemeral_user(mapped_properties):
return mapped_properties['user']['type'] == utils.UserType.EPHEMERAL
def build_ephemeral_user_context(auth_context, user, mapped_properties,
identity_provider, protocol):
auth_context['user_id'] = user['id']
auth_context['group_ids'] = mapped_properties['group_ids']
auth_context[federation_constants.IDENTITY_PROVIDER] = (
identity_provider)
auth_context[federation_constants.PROTOCOL] = protocol
def build_local_user_context(auth_context, mapped_properties):
user_info = auth_plugins.UserAuthInfo.create(mapped_properties,
METHOD_NAME)
auth_context['user_id'] = user_info.user_id
assertion = extract_assertion_data(context)
identity_provider = auth_payload['identity_provider']
protocol = auth_payload['protocol']
utils.assert_enabled_identity_provider(federation_api, identity_provider)
group_ids = None
# NOTE(topol): The user is coming in from an IdP with a SAML assertion
# instead of from a token, so we set token_id to None
token_id = None
# NOTE(marek-denis): This variable is set to None and there is a
# possibility that it will be used in the CADF notification. This means
# operation will not be mapped to any user (even ephemeral).
user_id = None
try:
mapped_properties, mapping_id = apply_mapping_filter(
identity_provider, protocol, assertion, assignment_api,
federation_api, identity_api)
if is_ephemeral_user(mapped_properties):
user = setup_username(context, mapped_properties)
user_id = user['id']
group_ids = mapped_properties['group_ids']
utils.validate_groups_cardinality(group_ids, mapping_id)
build_ephemeral_user_context(auth_context, user,
mapped_properties,
identity_provider, protocol)
else:
build_local_user_context(auth_context, mapped_properties)
except Exception:
# NOTE(topol): Diaper defense to catch any exception, so we can
# send off failed authentication notification, raise the exception
# after sending the notification
outcome = taxonomy.OUTCOME_FAILURE
notifications.send_saml_audit_notification('authenticate', context,
user_id, group_ids,
identity_provider,
protocol, token_id,
outcome)
raise
else:
outcome = taxonomy.OUTCOME_SUCCESS
notifications.send_saml_audit_notification('authenticate', context,
user_id, group_ids,
identity_provider,
protocol, token_id,
outcome)
def extract_assertion_data(context):
assertion = dict(utils.get_assertion_params_from_env(context))
return assertion
def apply_mapping_filter(identity_provider, protocol, assertion,
assignment_api, federation_api, identity_api):
idp = federation_api.get_idp(identity_provider)
utils.validate_idp(idp, protocol, assertion)
mapped_properties, mapping_id = federation_api.evaluate(
identity_provider, protocol, assertion)
# NOTE(marek-denis): We update group_ids only here to avoid fetching
# groups identified by name/domain twice.
# NOTE(marek-denis): Groups are translated from name/domain to their
# corresponding ids in the auth plugin, as we need information what
# ``mapping_id`` was used as well as idenity_api and assignment_api
# objects.
group_ids = mapped_properties['group_ids']
utils.validate_groups_in_backend(group_ids,
mapping_id,
identity_api)
group_ids.extend(
utils.transform_to_group_ids(
mapped_properties['group_names'], mapping_id,
identity_api, assignment_api))
mapped_properties['group_ids'] = list(set(group_ids))
return mapped_properties, mapping_id
def setup_username(context, mapped_properties):
"""Setup federated username.
Function covers all the cases for properly setting user id, a primary
identifier for identity objects. Initial version of the mapping engine
assumed user is identified by ``name`` and his ``id`` is built from the
name. We, however need to be able to accept local rules that identify user
by either id or name/domain.
The following use-cases are covered:
1) If neither user_name nor user_id is set raise exception.Unauthorized
2) If user_id is set and user_name not, set user_name equal to user_id
3) If user_id is not set and user_name is, set user_id as url safe version
of user_name.
:param context: authentication context
:param mapped_properties: Properties issued by a RuleProcessor.
:type: dictionary
:raises: exception.Unauthorized
:returns: dictionary with user identification
:rtype: dict
"""
user = mapped_properties['user']
user_id = user.get('id')
user_name = user.get('name') or context['environment'].get('REMOTE_USER')
if not any([user_id, user_name]):
raise exception.Unauthorized(_("Could not map user"))
elif not user_name:
user['name'] = user_id
elif not user_id:
user['id'] = parse.quote(user_name)
return user
|
|
# microperi.py
# Part of MicroPeri https://github.com/c0d3st0rm/microperi
#
# See LICENSE file for copyright and license details
# MicroPeri is a library for using the BBC micro:bit with MicroPython as an
# external peripheral device or sensor, using an API which closely replicates
# the micro:bit's MicroPython API.
import sys
if __name__ == "__main__":
# this shouldn't be run as a file
print("Use me as a module:\n from microperi import Microbit" % (name))
sys.exit(1)
import os
import pickle
# make sure we import from our local serial package
os.sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
import serial
import utils
from time import sleep as delaysecs
from logging import debug, info, warning, basicConfig, INFO, DEBUG, WARNING
basicConfig(level=INFO)
class _microbit_connection:
"""
Class which handles the sending and receiving of data to and from the
micro:bit over the serial connection.
"""
conn = None
def __init__(self, port=None, reraise_exceptions=False):
"""
Constructor. Attempts to find the micro:bit, and raises an Exception
if one can't be found. If one is found, but there is an error connecting
to it, depending on the error (and platform), microperi may output a
message to stderr, and then raise an exception.
"""
if not isinstance(port, str):
port = self.guess_port()
if port is None:
raise Exception("Could not find micro:bit!")
try:
self.conn = serial.Serial(port, 115200, timeout=1)
except serial.SerialException as e:
if e.errno == 13:
# possible invalid priviledges for the current user?
print("\nmicro:bit located, but permission to connect to it was denied.", file=sys.stderr)
if sys.platform.startswith("linux"):
import pwd
print("Perhaps your user account does not have sufficient privileges to open the serial connection? Try running the command:", file=sys.stderr)
print(" sudo usermod -a -G dialout %s" % (pwd.getpwuid(os.getuid()).pw_name), file=sys.stderr)
print("Then log out, log back in, and see if that works.\n", file=sys.stderr)
else:
print("")
if reraise_exceptions:
raise e
sys.exit(1)
elif e.errno == 16:
# device busy
print("\nmicro:bit located, but it seems to be busy. This can happen if another program is attempting to communicate with it at the same time (do you have an open serial connection to it?).", file=sys.stderr)
print("Wait up to 20 seconds, then try again. If that doesn't work, attempt a hard-reset of the device by pressing the reset button on the back of the board. If that doesn't work, then try a reboot.\n", file=sys.stderr)
if reraise_exceptions:
raise e
sys.exit(1)
else:
print("\nAn error occurred while trying to connect to the micro:bit:\n %s" % (str(e)))
if reraise_exceptions:
raise e
sys.exit(1)
#info("Connected to micro:bit, port: %s" % (self.conn.port))
# perform a soft reset to make sure that we have a clean environment
self.soft_reset()
delaysecs(0.1)
self.flush_input()
def handle_potential_invalid_data(self, data):
"""
Routine which looks for the "Traceback" string at the start of every
line of output, in case an exception was raised by the micro:bit.
"""
lines = data.replace("\r", "").strip().split("\n")
if len(lines) <= 0:
return
for x in range(len(lines) - 1):
if lines[x].startswith("Traceback"):
# look for the exception raised. this is going to be on the very
# last line.
name = lines[-1].split(" ")[0][:-1]
msg = lines[-1][len(name)+2:]
warning("the micro:bit raised an exception (%s: %s) with the following traceback:\n[TRACEBACK START]\n%s\n[TRACEBACK END]" % (name, msg, "\n".join(lines[x:])))
raise Exception("[the micro:bit raised the following exception] %s: %s" % (name, msg))
def guess_port(self):
"""
Returns the address of the first available connected micro:bit, or None
if none were found.
"""
devices = utils.connected_microbits()
if len(devices) <= 0:
return None
return devices[0]
def write(self, data, lognotes=""):
"""
Writes a string of data plus a carriage return ("\r") to the serial
connection, after encoding it.
"""
debug(" Sending: " + str(data + "\r") + lognotes)
self.conn.write(str(data + "\r").encode())
def readlines(self, strip=True, decode=True, look_for_exceptions=True, flush_after_input=True, until_sequence=b">>> "):
"""
Continuously reads data from the serial connection until a ">>>" is
encountered.
"""
debug(" Received (command echo line, ignoring): " + str(self.conn.readline()))
data = self.conn.read_until(until_sequence)
if flush_after_input:
self.flush_input()
try:
dataStr = data.decode()
debug(" Received (decoded): " + str(dataStr))
if decode:
if strip:
dataStr = dataStr.replace(until_sequence.decode(), "").strip()
if look_for_exceptions:
self.handle_potential_invalid_data(dataStr)
return dataStr
return data
except UnicodeDecodeError:
# Random data received, try again to read.
self.readlines(strip, decode, look_for_exceptions)
def execute(self, command, strip=True, decode=True, look_for_exceptions=True, timeout=None, flush_after_input=True):
"""
Executes the specified command, and returns the result. `strip`
specifies whether to strip the whole of the output, or just the
carriage return at the very end.
"""
backup_timeout = self.conn.timeout
if timeout is not None:
self.conn.timeout = timeout
self.flush_input()
self.write(command)
data = self.readlines(strip, decode, look_for_exceptions, flush_after_input)
self.conn.timeout = backup_timeout
return data
def soft_reset(self, do_post_reset=True):
self.write("\x04", lognotes="(ctrl+c)") # ctrl+c (KeyboardInterrupt)
self.write("")
self.flush_input()
self.write("\x03", lognotes="(ctrl+d)") # ctrl+d (EOF; soft reset)
if do_post_reset:
self.post_reset()
def post_reset(self):
"""
Function executed after a device reset is called.
"""
self.execute("")
self.flush_input()
def flush_input(self):
"""
Routine to manually flush the serial input buffer.
"""
n = self.conn.inWaiting()
while n > 0:
self.conn.read(n)
n = self.conn.inWaiting()
def _determine_variable_type(s):
""" Function that attempts to guess what kind of variable was specified in s, and returns the appropriate representation of it. """
if s.startswith("'") and s.endswith("'"):
# string
return s[1:-1]
elif s.isdigit():
# integer
return int(s)
else:
# float?
# TODO refine
try:
return float(s)
except:
pass
raise Exception("*** FIXME: do something here ***")
class _shim_class:
""" Wrapper for classes. """
__dict__ = None
def __init__(self):
self.__dict__ = {}
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("Shim class has no attribute '%s'" % (attr))
def _set_attr(self, attr, val):
self.__dict__[attr] = val
class _shim_function:
_conn = None
_path = None
def __init__(self, conn, path):
self._conn = conn
self._path = path
def call(self, *args, **kwargs):
""" Wrapper for a function call. """
# assemble the function string
# TODO replace info with something else (not an array)?
s = self._path + "("
arg_count = len(args) + len(kwargs)
for arg in args:
arg = repr(arg)
s = s + arg
arg_count -= 1
if arg_count > 0:
s = s + ","
for key in kwargs.keys():
val = kwargs[key]
val = repr(val)
s = s + "%s=%s" % (key, val)
arg_count -= 1
if arg_count > 0:
s = s + ","
s = s + ")"
return self._conn.execute(s)
class _microbit_wrapper:
""" Overall micro:bit wrapper. """
_conn = None
_cache_path = None
_module_list = ["microbit"]
_members = {}
def __init__(self):
self._conn = _microbit_connection()
home = os.getenv("HOME")
if home is None:
raise OSError("Error: could not get home environment variable!")
self._cache_path= home + "/.microperi_cache"
self._load_ubit_module_cache()
def _scan_member_of(self, module_to_process, member_to_process):
"""\
Scan a member of a module (or a member of a member - this routine is recursive).
NOTES:
- members with members themselves are encapsulated in the _shim_class class.
"""
# assemble a string which represents the "path" to the current
# member/module which we're processing (e.g: "microbit.display")
my_path = module_to_process
if member_to_process is not None:
my_path = my_path + "." + member_to_process
debug("processing %s" % (my_path))
members = self._conn.execute("dir(%s)" % (my_path))[2:-2].split("', '")
me = _shim_class()
debug("got members: " + str(members))
for member in members:
member = member.strip()
if len(member) <= 0:
continue
debug("processing member " + member)
info = self._conn.execute("repr(%s.%s)" % (my_path, member))[1:-1]
member_str = my_path + member
if info.startswith("<") and info.endswith(">"):
# this member is a class or function
s = info[1:-1]
if s == "function" or s == "bound_method":
# function
debug(" %s is a function" % (member_str))
# add a function wrapper class, then point its member
# function to the attribute
shim_func = _shim_function(self._conn, member_str)
me._set_attr("_shim_function_" + member, shim_func)
me._set_attr(member, me.__getattr__("_shim_function_" + member).call)
else:
# assume class
debug(" %s is a class" % (member_str))
new_member = self._scan_member_of(my_path, member)
me._set_attr(member, new_member)
elif (info.startswith('"') and info.endswith('"')) or \
(info.startswith("'") and info.endswith("'")):
debug(" %s is a function" % (member_str))
me._set_attr(member, str(info[1:-1]))
elif info.isnumeric():
# assume integer
debug(" %s is an integer" % (member_str))
me._set_attr(member, int(info))
else:
# huh?
# TODO raise exception?
debug("unrecognised member type (member path: %s). value: %s" % (my_path, info))
return me
def _scan_modules(self, micropython_commit_hash):
self._conn.execute("\x04") # ctrl+c
self._conn.flush_input()
cache = {"ver": micropython_commit_hash}
# FIXME: the below string outputs after a short delay. stderr is
# nonblocking - this delay should be nonexistent (without the
# sys.stderr.flush() call, it doesn't output at all until the very end
# of the function).
print("Please wait while microperi indexes the micro:bit . . . ", end="", file=sys.stderr)
sys.stderr.flush()
for module in self._module_list:
# try to import the module
try:
self._conn.execute("import " + module)
except:
# error importing module - assume it doesn't exist
warning("warning: module %s could not be imported. skipping . . ." % (module))
continue
self._conn.flush_input()
debug("processing module " + module + "")
cache[module] = self._scan_member_of(module, None)
try:
f = open(self._cache_path, "wb")
pickle.dump(cache, f, protocol=4)
f.close()
except BaseException as e:
print("")
raise e
self._members = cache
print("done!", file=sys.stderr)
def _load_ubit_module_cache(self):
# get the commit hash for this build
self._conn.flush_input()
self._conn.soft_reset(do_post_reset=False)
lines = "".join(self._conn.readlines())
index = lines.find("MicroPython v")
if index < 0:
raise Exception("Error: could not determine MicroPython build version")
index += 13
# locate the commit hash for this build, used to cache the module map
# TODO: find a more efficient way of doing this
# expected format: N.N-N-hash
while lines[index].isnumeric(): # first N
index += 1
index += 1 # "."
while lines[index].isnumeric(): # second N
index += 1
index += 1 # "-"
while lines[index].isnumeric(): # third N
index += 1
index += 1 # "-"
sindex = lines[index:].find(" ")
if sindex < 0:
raise Exception("Error: could not determine MicroPython build version")
micropython_commit_hash = lines[index:index + sindex]
try:
debug(" attemting to load cache from file")
f = open(self._cache_path, "rb")
cache = pickle.load(f)
f.close()
if "ver" in cache:
cache_ver = cache["ver"]
print("cache ver: " + cache_ver)
if cache_ver == micropython_commit_hash:
# use this cache
self._members = cache
return
except BaseException as e:
print("err: " + str(e))
pass
debug("failed to load cache from file. reverting to scanning")
# error. scan the micro:bit manually
self._scan_modules(micropython_commit_hash)
def __getattr__(self, attr):
if attr in self._members:
return self._members[attr]
raise AttributeError("Shim class has no attribute '%s'" % (attr))
Microbit = _microbit_wrapper
|
|
"""
Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Authors and contributors: Eric Shook (eshook@kent.edu); Zhengliang Feng (odayfans@gmail.com, zfeng2@kent.edu)
"""
from ..util.SharedMemory import *
from ..util.Messaging import *
from .PCMLPrims import *
import PCMLConfig as PCMLConfig
import copy
import multiprocessing as mp
try:
PCMLConfig.scipyenabled = 1
from scipy.spatial import cKDTree
except ImportError as e:
PCMLConfig.scipyenabled = 0
class BoundingBox(object):
"""
BoundingBox defines a rectangular location (y,x) + (h,w) and may contain data describing something within its boundaries.
This class is also used as the parent class of Layer and Subdomain.
"""
def __init__(self, y, x, h, w):
"""Create a new BoundingBox.
:param y (double): The y location (lower left corner typically) of the :class:`BoundingBox`.
:param x (double): The x location (lower left corner typically) of the :class:`BoundingBox`.
:param h (double): The height of the :class:`BoundingBox`.
:param w (double): The width of the :class:`BoundingBox`.
"""
self.y = y
self.x = x
self.h = h
self.w = w
# Adding buffer data for points
self.buffx = None
self.buffy = None
self.buffh = None
self.buffw = None
# Sanity check
if(h <= 0):
raise PCMLInvalidInput("BoundingBox does not support a negative or zero height", h)
if(w <= 0):
raise PCMLInvalidInput("BoundingBox does not support a negative or zero width", w)
# Data is held within the internal structure _data
# and the data structure (e.g., array, list) and type (e.g., location, float, int) must also be described
self._data = None
self.data_structure = Datastructure.array # FIXME: For now we assume the data_structure is an array
self.data_type = None
self.tree = None
# TODO: This should be looked at with dependency on data_structure/type. Perhaps a new class should be created to encapsulate data.
# TODO: If data_structure is an array, then must describe the cellsize and set a nodata_value
# By default set to none
self.nodata_value = None
self.cellsize = None
self.nrows = None
self.ncols = None
def __repr__(self):
return "<BoundingBox: (%f,%f) [%f,%f]>" % (self.y, self.x, self.h, self.w)
def set_nparray(self, nparr, cellsize, nodata_value):
if nparr is None:
raise PCMLInvalidInput("BoundingBox.set_nparray does not support a nparr of None", nparr)
if cellsize is None:
raise PCMLInvalidInput("BoundingBox.set_nparray does not support a cellsize of None", cellsize)
if cellsize <= 0:
raise PCMLInvalidInput("BoundingBox.set_nparray does not support cellsize<=0", cellsize)
self.data_structure = Datastructure.array
PCMLNotImplemented("self.data_type is not set")
# Unfortunately you cannot hide shared memory in another module as rawarray will return the same reference
# It may have to do with saving the __shmem_data variable, could explore this later
self.__shmem_data = mp.RawArray(ctypes.c_double, nparr.size)
self._data = shmem_as_ndarray(self.__shmem_data).reshape(nparr.shape)
self._data[:, :] = nparr
self.cellsize = cellsize
self.nodata_value = nodata_value
self._reset_dim()
def get_nparray(self):
return self._data
def set_pointlist(self, pointlist, ref=False):
self.data_structure = Datastructure.pointlist
# FIXME: Should check if pointlist is a list datastructure
if not ref:
self._data = pointlist
else:
self._data = mp.Manager().list(pointlist)
def get_pointlist(self):
assert(self.data_structure == Datastructure.pointlist), "Cannot get point list if datastructure is not a point list"
return self._data
# Bounding box check
def isinsidebounds(self, point, usehalo=False):
x, y, w, h = self.x, self.y, self.w, self.h
if usehalo:
x, y, w, h = self.buffx, self.buffy, self.buffw, self.buffh
a = y
b = y + h
c = point['y']
if point['x'] < x+w and point['x'] >= x and point['y'] < y+h and point['y'] >= y:
return True
else:
return False
# Get points with out including halozone
def get_pointlistwithouthalozone(self):
pointswithouthalozone = []
for point in self._data:
if self.isinsidebounds(point):
pointswithouthalozone.append(point)
return pointswithouthalozone
def _reset_dim(self):
nparr = self.get_nparray()
# Set nrows and ncols based on dimensions of array
self.nrows = len(nparr)
self.ncols = len(nparr[0])
h = self.nrows * self.cellsize
w = self.ncols * self.cellsize
if h != self.h:
PCMLUserInformation("Updating height from " + str(self.h) + " to " + str(h))
self.h = h
if w != self.w:
PCMLUserInformation("Updating width from " + str(self.w) + " to " + str(w))
self.w = w
def set_data_ref(self, ref):
"""
Set the _data variable to a reference (used for shared memory accesses - particularly subdomains)
"""
self._data = ref
self._reset_dim()
def get_locval(self, loc):
newloc = copy.copy(loc)
newloc['v'] = self._data[loc['r'] - self.r][loc['c'] - self.c]
return newloc
def get_ind_from_loc(self, loc):
return {
'r': loc['r'] - self.r,
'c': loc['c'] - self.c}
def get_yxloc(self, locind):
return {
'y': self.y + self.cellsize * locind['r'],
'x': self.x + self.cellsize * locind['c'],
'z': self._data[locind['r']][locind['c']]}
def bufferedlocgetarr(self, loc, buffersize):
ind = self.get_ind_from_loc(loc)
# FIXME: NEED TO DOUBLE CHECK THIS LOGIC, NEED TO RECORD RDIFF,CDIFF FOR H/W
r = max(0, ind['r'] - buffersize)
c = max(0, ind['c'] - buffersize)
h = buffersize + (ind['r'] - r) + 1
if (r + h > self.nrows):
h = self.nrows - r
w = buffersize + (ind['c'] - c) + 1
if (c + w > self.ncols):
w = self.ncols - c
return self.slice_nparray(r, c, h, w)
def slice_nparray(self, r, c, h, w):
# IMPORTANT: Notice slice uses r,c NOT x,y!
return self._data[r:r + h, c:c + w]
def print_data(self):
"""Print out all data."""
print(self._data)
# Get neighbors for points using cKDTree
def getneighbors(self, location, count=1, radius=np.inf, excludesearchlocation=False, distreq=False):
if PCMLConfig.scipyenabled == 0:
PCMLNotSupported("SciPy module required for getneighbors()")
if excludesearchlocation:
count += 1
if self.tree is None:
pointlist = self.get_pointlist()
pointdata = []
if len(pointlist) == 0:
if not distreq:
return []
else:
return [[], []]
for point in pointlist:
pointdata.append([point['x'], point['y']])
self.tree = cKDTree(pointdata)
points = [location['x'], location['y']]
dist, neighbors = self.tree.query(points, k=count, distance_upper_bound=radius)
if count == 1:
if neighbors == self.tree.n:
if not distreq:
return []
else:
return [[], []]
else:
if not distreq:
return [neighbors]
else:
return [[neighbors], [dist]]
if not distreq:
return neighbors[neighbors != self.tree.n]
else:
return [neighbors[neighbors != self.tree.n], dist[dist != np.inf]]
|
|
# -*- coding: utf-8 -*-
from reportlab.lib.enums import TA_LEFT
from reportlab.lib.fonts import addMapping
from reportlab.lib.pagesizes import landscape, A4
from reportlab.lib.styles import ParagraphStyle
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus.frames import Frame
from reportlab.platypus.paraparser import ParaFrag, ps2tt, tt2ps
from xhtml2pdf.util import getSize, getCoords, getFile, pisaFileObject, \
getFrameDimensions
from xhtml2pdf.w3c import css
from xhtml2pdf.xhtml2pdf_reportlab import PmlPageTemplate, PmlTableOfContents, \
PmlParagraph, PmlParagraphAndImage, PmlPageCount
import copy
import logging
import os
import re
import reportlab
import types
import urlparse
import xhtml2pdf.default
import xhtml2pdf.parser
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
reportlab.rl_config.warnOnMissingFontGlyphs = 0
log = logging.getLogger("xhtml2pdf")
sizeDelta = 2 # amount to reduce font size by for super and sub script
subFraction = 0.4 # fraction of font size that a sub script should be lowered
superFraction = 0.4
NBSP = u"\u00a0"
def clone(self, **kwargs):
n = ParaFrag(**self.__dict__)
if kwargs:
d = n.__dict__
d.update(kwargs)
# This else could cause trouble in Paragraphs with images etc.
if "cbDefn" in d:
del d["cbDefn"]
n.bulletText = None
return n
ParaFrag.clone = clone
def getParaFrag(style):
frag = ParaFrag()
frag.sub = 0
frag.super = 0
frag.rise = 0
frag.underline = 0 # XXX Need to be able to set color to fit CSS tests
frag.strike = 0
frag.greek = 0
frag.link = None
frag.text = ""
# frag.lineBreak = 0
#if bullet:
# frag.fontName, frag.bold, frag.italic = ps2tt(style.bulletFontName)
# frag.fontSize = style.bulletFontSize
# frag.textColor = hasattr(style,'bulletColor') and style.bulletColor or style.textColor
#else:
frag.fontName = "Times-Roman"
frag.fontName, frag.bold, frag.italic = ps2tt(style.fontName)
frag.fontSize = style.fontSize
frag.textColor = style.textColor
# Extras
frag.leading = 0
frag.leadingSource = "150%"
frag.leadingSpace = 0
frag.backColor = None
frag.spaceBefore = 0
frag.spaceAfter = 0
frag.leftIndent = 0
frag.rightIndent = 0
frag.firstLineIndent = 0
frag.keepWithNext = False
frag.alignment = TA_LEFT
frag.vAlign = None
frag.borderWidth = 1
frag.borderStyle = None
frag.borderPadding = 0
frag.borderColor = None
frag.borderLeftWidth = frag.borderWidth
frag.borderLeftColor = frag.borderColor
frag.borderLeftStyle = frag.borderStyle
frag.borderRightWidth = frag.borderWidth
frag.borderRightColor = frag.borderColor
frag.borderRightStyle = frag.borderStyle
frag.borderTopWidth = frag.borderWidth
frag.borderTopColor = frag.borderColor
frag.borderTopStyle = frag.borderStyle
frag.borderBottomWidth = frag.borderWidth
frag.borderBottomColor = frag.borderColor
frag.borderBottomStyle = frag.borderStyle
frag.paddingLeft = 0
frag.paddingRight = 0
frag.paddingTop = 0
frag.paddingBottom = 0
frag.listStyleType = None
frag.listStyleImage = None
frag.whiteSpace = "normal"
frag.wordWrap = None
frag.pageNumber = False
frag.pageCount = False
frag.height = None
frag.width = None
frag.bulletIndent = 0
frag.bulletText = None
frag.bulletFontName = "Helvetica"
frag.zoom = 1.0
frag.outline = False
frag.outlineLevel = 0
frag.outlineOpen = False
frag.insideStaticFrame = 0
return frag
def getDirName(path):
parts = urlparse.urlparse(path)
if parts.scheme:
return path
else:
return os.path.dirname(os.path.abspath(path))
class pisaCSSBuilder(css.CSSBuilder):
def atFontFace(self, declarations):
" Embed fonts "
result = self.ruleset([self.selector('*')], declarations)
data = result[0].values()[0]
names = data["font-family"]
# Font weight
fweight = str(data.get("font-weight", "normal")).lower()
bold = fweight in ("bold", "bolder", "500", "600", "700", "800", "900")
if not bold and fweight <> "normal":
log.warn(self.c.warning("@fontface, unknown value font-weight '%s'", fweight))
# Font style
italic = str(data.get("font-style", "")).lower() in ("italic", "oblique")
src = self.c.getFile(data["src"])
self.c.loadFont(
names,
src,
bold=bold,
italic=italic)
return {}, {}
def _pisaAddFrame(self, name, data, first=False, border=None, size=(0,0)):
c = self.c
if not name:
name = "-pdf-frame-%d" % c.UID()
x, y, w, h = getFrameDimensions(data, size[0], size[1])
# print name, x, y, w, h
#if not (w and h):
# return None
if first:
return (name, None, data.get("-pdf-frame-border", border), x, y, w, h, data)
return (name, data.get("-pdf-frame-content", None),
data.get("-pdf-frame-border", border), x, y, w, h, data)
def atPage(self, name, pseudopage, declarations):
c = self.c
data = {}
name = name or "body"
pageBorder = None
if declarations:
result = self.ruleset([self.selector('*')], declarations)
# print "@PAGE", name, pseudopage, declarations, result
if declarations:
data = result[0].values()[0]
pageBorder = data.get("-pdf-frame-border", None)
if c.templateList.has_key(name):
log.warn(self.c.warning("template '%s' has already been defined", name))
if data.has_key("-pdf-page-size"):
c.pageSize = xhtml2pdf.default.PML_PAGESIZES.get(str(data["-pdf-page-size"]).lower(), c.pageSize)
isLandscape = False
if data.has_key("size"):
size = data["size"]
# print size, c.pageSize
if type(size) is not types.ListType:
size = [size]
sizeList = []
for value in size:
valueStr = str(value).lower()
if type(value) is types.TupleType:
sizeList.append(getSize(value))
elif valueStr == "landscape":
isLandscape = True
elif xhtml2pdf.default.PML_PAGESIZES.has_key(valueStr):
c.pageSize = xhtml2pdf.default.PML_PAGESIZES[valueStr]
else:
log.warn(c.warning("Unknown size value for @page"))
if len(sizeList) == 2:
c.pageSize = sizeList
if isLandscape:
c.pageSize = landscape(c.pageSize)
for prop in ["margin-top", "margin-left", "margin-right", "margin-bottom",
"top", "left", "right", "bottom", "width", "height"]:
if data.has_key(prop):
c.frameList.append(self._pisaAddFrame(name, data, first=True, border=pageBorder, size=c.pageSize))
break
# self._drawing = PmlPageDrawing(self._pagesize)
#if not c.frameList:
# c.warning("missing frame definitions for template")
# return {}, {}
# Frames have to be calculated after we know the pagesize
frameList = []
staticList = []
for fname, static, border, x, y, w, h, fdata in c.frameList:
#fix frame sizing problem.
if static:
x, y, w, h = getFrameDimensions(fdata, c.pageSize[0], c.pageSize[1])
x, y, w, h = getCoords(x, y, w, h, c.pageSize)
if w <= 0 or h <= 0:
log.warn(self.c.warning("Negative width or height of frame. Check @frame definitions."))
frame = Frame(
x, y, w, h,
id=fname,
leftPadding=0,
rightPadding=0,
bottomPadding=0,
topPadding=0,
showBoundary=border or pageBorder)
if static:
frame.pisaStaticStory = []
c.frameStatic[static] = [frame] + c.frameStatic.get(static, [])
staticList.append(frame)
else:
frameList.append(frame)
background = data.get("background-image", None)
if background:
background = self.c.getFile(background)
# print background
# print frameList
if not frameList:
# print 999
log.warn(c.warning("missing explicit frame definition for content or just static frames"))
fname, static, border, x, y, w, h, data = self._pisaAddFrame(name, data, first=True, border=pageBorder, size=c.pageSize)
x, y, w, h = getCoords(x, y, w, h, c.pageSize)
if w <= 0 or h <= 0:
log.warn(c.warning("Negative width or height of frame. Check @page definitions."))
frameList.append(Frame(
x, y, w, h,
id=fname,
leftPadding=0,
rightPadding=0,
bottomPadding=0,
topPadding=0,
showBoundary=border or pageBorder))
pt = PmlPageTemplate(
id=name,
frames=frameList,
pagesize=c.pageSize,
)
pt.pisaStaticList = staticList
pt.pisaBackground = background
pt.pisaBackgroundList = c.pisaBackgroundList
if isLandscape:
pt.pageorientation = pt.LANDSCAPE
# self._pagesize)
# pt.pml_statics = self._statics
# pt.pml_draw = self._draw
# pt.pml_drawing = self._drawing
# pt.pml_background = attrs.background
# pt.pml_bgstory = self._bgstory
c.templateList[name] = pt
c.template = None
c.frameList = []
c.frameStaticList = []
return {}, {}
def atFrame(self, name, declarations):
if declarations:
result = self.ruleset([self.selector('*')], declarations)
# print "@BOX", name, declarations, result
data = result[0]
if data:
data = data.values()[0]
self.c.frameList.append(
self._pisaAddFrame(name, data, size=self.c.pageSize))
return {}, {} # TODO: It always returns empty dicts?
class pisaCSSParser(css.CSSParser):
def parseExternal(self, cssResourceName):
# print "@import", self.rootPath, cssResourceName
oldRootPath = self.rootPath
cssFile = self.c.getFile(cssResourceName, relative=self.rootPath)
if not cssFile:
return None
if self.rootPath and urlparse.urlparse(self.rootPath).scheme:
self.rootPath = urlparse.urljoin(self.rootPath, cssResourceName)
else:
self.rootPath = getDirName(cssFile.uri)
# print "###", self.rootPath
result = self.parse(cssFile.getData())
self.rootPath = oldRootPath
return result
class pisaContext(object):
"""
Helper class for creation of reportlab story and container for
varoius data.
"""
def __init__(self, path, debug=0, capacity=-1):
self.fontList = copy.copy(xhtml2pdf.default.DEFAULT_FONT)
self.path = []
self.capacity=capacity
self.node = None
self.toc = PmlTableOfContents()
self.story = []
self.indexing_story = None
self.text = []
self.log = []
self.err = 0
self.warn = 0
self.text = u""
self.uidctr = 0
self.multiBuild = False
self.pageSize = A4
self.template = None
self.templateList = {}
self.frameList = []
self.frameStatic = {}
self.frameStaticList = []
self.pisaBackgroundList = []
self.keepInFrameIndex = None
self.baseFontSize = getSize("12pt")
self.anchorFrag = []
self.anchorName = []
self.tableData = None
self.frag = self.fragBlock = getParaFrag(ParagraphStyle('default%d' % self.UID()))
self.fragList = []
self.fragAnchor = []
self.fragStack = []
self.fragStrip = True
self.listCounter = 0
self.cssText = ""
self.image = None
self.imageData = {}
self.force = False
self.pathCallback = None # External callback function for path calculations
# Store path to document
self.pathDocument = path or "__dummy__"
parts = urlparse.urlparse(self.pathDocument)
if not parts.scheme:
self.pathDocument = os.path.abspath(self.pathDocument)
self.pathDirectory = getDirName(self.pathDocument)
self.meta = dict(
author="",
title="",
subject="",
keywords="",
pagesize=A4,
)
def UID(self):
self.uidctr += 1
return self.uidctr
# METHODS FOR CSS
def addCSS(self, value):
value = value.strip()
if value.startswith("<![CDATA["):
value = value[9: - 3]
if value.startswith("<!--"):
value = value[4: - 3]
self.cssText += value.strip() + "\n"
def parseCSS(self):
#print repr(self.cssText)
# self.debug(9, self.cssText)
# XXX Must be handled in a better way!
#self.cssText = self.cssText.replace("<!--", "\n")
#self.cssText = self.cssText.replace("-->", "\n")
#self.cssText = self.cssText.replace("<![CDATA[", "\n")
#self.cssText = self.cssText.replace("]]>", "\n")
#self.debug(9, self.cssText)
# print repr(self.cssText)
# file("pisa.css", "wb").write(self.cssText.encode("utf8"))
# self.cssText = re.compile(r"url\((.*?\))", re.M).sub('"\1"', self.cssText)
# self.cssText = re.compile(r"\-moz\-.*?([\;\}]+)", re.M).sub(r"\1", self.cssText)
# XXX Import has to be implemented!
# self.cssText = re.compile(r"\@import.*;", re.M).sub("", self.cssText)
# if 0:
# try:
# # Sanitize CSS
# import cssutils
# import logging
# cssutils.log.setlog(logging.getLogger('csslog'))
# cssutils.log.setloglevel(logging.DEBUG)
# sheet = cssutils.parseString(self.cssText)
# self.cssText = sheet.cssText
# #err = csslog.getvalue()
# except ImportError, e:
# pass
# except Exception, e:
# log.exception(self.error("Error parsing CSS by cssutils"))
# print self.cssText
# file("pisa-sanitized.css", "w").write(self.cssText.encode("utf8"))
# print self.cssText
self.cssBuilder = pisaCSSBuilder(mediumSet=["all", "print", "pdf"])
self.cssBuilder.c = self
self.cssParser = pisaCSSParser(self.cssBuilder)
self.cssParser.rootPath = self.pathDirectory
self.cssParser.c = self
self.css = self.cssParser.parse(self.cssText)
self.cssCascade = css.CSSCascadeStrategy(self.css)
self.cssCascade.parser = self.cssParser
# METHODS FOR STORY
def addStory(self, data):
self.story.append(data)
def swapStory(self, story=[]):
self.story, story = copy.copy(story), copy.copy(self.story)
return story
def toParagraphStyle(self, first):
style = ParagraphStyle('default%d' % self.UID(), keepWithNext=first.keepWithNext)
style.fontName = first.fontName
style.fontSize = first.fontSize
style.leading = max(first.leading + first.leadingSpace, first.fontSize * 1.25)
style.backColor = first.backColor
style.spaceBefore = first.spaceBefore
style.spaceAfter = first.spaceAfter
style.leftIndent = first.leftIndent
style.rightIndent = first.rightIndent
style.firstLineIndent = first.firstLineIndent
style.textColor = first.textColor
style.alignment = first.alignment
style.bulletFontName = first.bulletFontName or first.fontName
style.bulletFontSize = first.fontSize
style.bulletIndent = first.bulletIndent
style.wordWrap = first.wordWrap
# Border handling for Paragraph
# Transfer the styles for each side of the border, *not* the whole
# border values that reportlab supports. We'll draw them ourselves in
# PmlParagraph.
style.borderTopStyle = first.borderTopStyle
style.borderTopWidth = first.borderTopWidth
style.borderTopColor = first.borderTopColor
style.borderBottomStyle = first.borderBottomStyle
style.borderBottomWidth = first.borderBottomWidth
style.borderBottomColor = first.borderBottomColor
style.borderLeftStyle = first.borderLeftStyle
style.borderLeftWidth = first.borderLeftWidth
style.borderLeftColor = first.borderLeftColor
style.borderRightStyle = first.borderRightStyle
style.borderRightWidth = first.borderRightWidth
style.borderRightColor = first.borderRightColor
# If no border color is given, the text color is used (XXX Tables!)
if (style.borderTopColor is None) and style.borderTopWidth:
style.borderTopColor = first.textColor
if (style.borderBottomColor is None) and style.borderBottomWidth:
style.borderBottomColor = first.textColor
if (style.borderLeftColor is None) and style.borderLeftWidth:
style.borderLeftColor = first.textColor
if (style.borderRightColor is None) and style.borderRightWidth:
style.borderRightColor = first.textColor
style.borderPadding = first.borderPadding
style.paddingTop = first.paddingTop
style.paddingBottom = first.paddingBottom
style.paddingLeft = first.paddingLeft
style.paddingRight = first.paddingRight
# This is the old code replaced by the above, kept for reference
#style.borderWidth = 0
#if getBorderStyle(first.borderTopStyle):
# style.borderWidth = max(first.borderLeftWidth, first.borderRightWidth, first.borderTopWidth, first.borderBottomWidth)
# style.borderPadding = first.borderPadding # + first.borderWidth
# style.borderColor = first.borderTopColor
# # If no border color is given, the text color is used (XXX Tables!)
# if (style.borderColor is None) and style.borderWidth:
# style.borderColor = first.textColor
style.fontName = tt2ps(first.fontName, first.bold, first.italic)
return style
def addTOC(self):
# style = copy.deepcopy(self.toParagraphStyle(self.fragBlock))
#cssAttrs = copy.deepcopy(self.node.cssAttrs)
#frag = copy.deepcopy(self.frag)
styles = []
for i in range(0, 20):
self.node.attributes["class"] = "pdftoclevel%d" % i
#self.node.cssAttrs = copy.deepcopy(cssAttrs)
#self.frag = copy.deepcopy(frag)
self.cssAttr = xhtml2pdf.parser.CSSCollect(self.node, self)
xhtml2pdf.parser.CSS2Frag(self, {
"margin-top": 0,
"margin-bottom": 0,
"margin-left": 0,
"margin-right": 0,
}, True)
pstyle = self.toParagraphStyle(self.frag)
#styles.append(copy.deepcopy(pstyle))
styles.append(pstyle)
# log.warn("%r", self.fragBlock.textColor)
self.toc.levelStyles = styles
self.addStory(self.toc)
self.indexing_story = None
def addPageCount(self):
if not self.multiBuild:
self.indexing_story = PmlPageCount()
self.multiBuild = True
def dumpPara(self, frags, style):
return
print "%s/%s %s *** PARA" % (style.fontSize, style.leading, style.fontName)
for frag in frags:
print "%s/%s %r %r" % (
frag.fontSize,
frag.leading,
getattr(frag, "cbDefn", None),
frag.text)
print
def addPara(self, force=False):
# print self.force, repr(self.text)
force = (force or self.force)
self.force = False
# Cleanup the trail
try:
rfragList = reversed(self.fragList)
except:
# For Python 2.3 compatibility
rfragList = copy.copy(self.fragList)
rfragList.reverse()
#for frag in rfragList:
# frag.text = frag.text.rstrip()
# if frag.text:
# break
# Find maximum lead
maxLeading = 0
#fontSize = 0
for frag in self.fragList:
leading = getSize(frag.leadingSource, frag.fontSize) + frag.leadingSpace
maxLeading = max(leading, frag.fontSize + frag.leadingSpace, maxLeading)
frag.leading = leading
if force or (self.text.strip() and self.fragList):
# Strip trailing whitespaces
#for f in self.fragList:
# f.text = f.text.lstrip()
# if f.text:
# break
#self.fragList[-1].lineBreak = self.fragList[-1].text.rstrip()
# Update paragraph style by style of first fragment
first = self.fragBlock
style = self.toParagraphStyle(first)
# style.leading = first.leading + first.leadingSpace
if first.leadingSpace:
style.leading = maxLeading
else:
style.leading = getSize(first.leadingSource, first.fontSize) + first.leadingSpace
# style.leading = maxLeading # + first.leadingSpace
#style.fontSize = fontSize
# borderRadius: None,
# print repr(self.text.strip()), style.leading, "".join([repr(x.text) for x in self.fragList])
# print first.leftIndent, first.listStyleType,repr(self.text)
bulletText = copy.copy(first.bulletText)
first.bulletText = None
# Add paragraph to story
if force or len(self.fragAnchor + self.fragList) > 0:
# We need this empty fragment to work around problems in
# Reportlab paragraphs regarding backGround etc.
if self.fragList:
self.fragList.append(self.fragList[ - 1].clone(text=''))
else:
blank = self.frag.clone()
blank.fontName = "Helvetica"
blank.text = ''
self.fragList.append(blank)
self.dumpPara(self.fragAnchor + self.fragList, style)
para = PmlParagraph(
self.text,
style,
frags=self.fragAnchor + self.fragList,
bulletText=bulletText)
# Mirrored and BIDI
#import unicodedata
#for c in self.text:
# print unicodedata.bidirectional(c),
para.outline = first.outline
para.outlineLevel = first.outlineLevel
para.outlineOpen = first.outlineOpen
para.keepWithNext = first.keepWithNext
para.autoLeading = "max"
if self.image:
para = PmlParagraphAndImage(
para,
self.image,
side=self.imageData.get("align", "left"))
self.addStory(para)
self.fragAnchor = []
first.bulletText = None
# Reset data
self.image = None
self.imageData = {}
self.clearFrag()
# METHODS FOR FRAG
def clearFrag(self):
self.fragList = []
self.fragStrip = True
self.text = u""
def copyFrag(self, **kw):
return self.frag.clone(**kw)
def newFrag(self, **kw):
self.frag = self.frag.clone(**kw)
return self.frag
def _appendFrag(self, frag):
if frag.link and frag.link.startswith("#"):
self.anchorFrag.append((frag, frag.link[1:]))
self.fragList.append(frag)
# XXX Argument frag is useless!
def addFrag(self, text="", frag=None):
frag = baseFrag = self.frag.clone()
# if sub and super are both on they will cancel each other out
if frag.sub == 1 and frag.super == 1:
frag.sub = 0
frag.super = 0
# XXX Has to be replaced by CSS styles like vertical-align and font-size
if frag.sub:
frag.rise = - frag.fontSize * subFraction
frag.fontSize = max(frag.fontSize - sizeDelta, 3)
elif frag.super:
frag.rise = frag.fontSize * superFraction
frag.fontSize = max(frag.fontSize - sizeDelta, 3)
# XXX Unused?
#if frag.greek:
# frag.fontName = 'symbol'
# text = _greekConvert(text)
# bold, italic, and underline
frag.fontName = frag.bulletFontName = tt2ps(frag.fontName, frag.bold, frag.italic)
# print frag.bulletFontName
# Modify text for optimal whitespace handling
# XXX Support Unicode whitespaces?
# XXX What about images?
# XXX Doesn't work with Reportlab > 2.1
# NBSP = '\xc2\xa0' # u"_"
#if REPORTLAB22:
# NBSP = u" "
# Replace ­ with empty and normalize NBSP
text = (text
.replace(u"\xad", u"")
.replace(u"\xc2\xa0", NBSP)
.replace(u"\xa0", NBSP))
# log.debug("> %r", text)
if frag.whiteSpace == "pre":
# Handle by lines
for text in re.split(r'(\r\n|\n|\r)', text):
# This is an exceptionally expensive piece of code
self.text += text
if ("\n" in text) or ("\r" in text):
# If EOL insert a linebreak
frag = baseFrag.clone()
frag.text = ""
frag.lineBreak = 1
self._appendFrag(frag)
else:
# Handle tabs in a simple way
text = text.replace(u"\t", 8 * u" ")
# Somehow for Reportlab NBSP have to be inserted
# as single character fragments
for text in re.split(r'(\ )', text):
frag = baseFrag.clone()
if text == " ":
text = NBSP
frag.text = text
self._appendFrag(frag)
else:
for text in re.split(u'(' + NBSP + u')', text):
frag = baseFrag.clone()
if text == NBSP:
self.force = True
frag.text = NBSP
self.text += text
self._appendFrag(frag)
else:
frag.text = " ".join(("x" + text + "x").split())[1: - 1]
if self.fragStrip:
frag.text = frag.text.lstrip()
if frag.text:
self.fragStrip = False
self.text += frag.text
self._appendFrag(frag)
# print frag.fontName, repr(frag.text), frag.bulletText
def pushFrag(self):
self.fragStack.append(self.frag)
self.newFrag()
def pullFrag(self):
self.frag = self.fragStack.pop()
# XXX
def _getFragment(self, l=20):
try:
return repr(" ".join(self.node.toxml().split()[:l]))
except:
return ""
def _getLineNumber(self):
return 0
def context(self, msg):
return "%s\n%s" % (
str(msg),
self._getFragment(50))
# return "line %s: %s\n%s" % (
# self._getLineNumber(),
# str(msg),
# self._getFragment(50))
def warning(self, msg, *args):
self.warn += 1
self.log.append((xhtml2pdf.default.PML_WARNING, self._getLineNumber(), str(msg), self._getFragment(50)))
try:
return self.context(msg % args)
except:
return self.context(msg)
def error(self, msg, *args):
self.err += 1
self.log.append((xhtml2pdf.default.PML_ERROR, self._getLineNumber(), str(msg), self._getFragment(50)))
try:
return self.context(msg % args)
except:
return self.context(msg)
# UTILS
def _getFileDeprecated(self, name, relative):
try:
if name.startswith("data:"):
return name
path = relative or self.pathDirectory
if self.pathCallback is not None:
nv = self.pathCallback(name, relative)
else:
if path is None:
log.warn("Could not find main directory for getting filename. Use CWD")
path = os.getcwd()
nv = os.path.normpath(os.path.join(path, name))
if not(nv and os.path.isfile(nv)):
nv = None
if nv is None:
log.warn(self.warning("File '%s' does not exist", name))
return nv
except:
log.warn(self.warning("getFile %r %r %r", name, relative, path), exc_info=1)
def getFile(self, name, relative=None):
"""
Returns a file name or None
"""
if self.pathCallback is not None:
return getFile(self._getFileDeprecated(name, relative))
return getFile(name, relative or self.pathDirectory)
def getFontName(self, names, default="helvetica"):
"""
Name of a font
"""
# print names, self.fontList
if type(names) is not types.ListType:
names = str(names).strip().split(",")
for name in names:
font = self.fontList.get(str(name).strip().lower(), None)
if font is not None:
return font
return self.fontList.get(default, None)
def registerFont(self, fontname, alias=[]):
self.fontList[str(fontname).lower()] = str(fontname)
for a in alias:
self.fontList[str(a)] = str(fontname)
def loadFont(self, names, src, encoding="WinAnsiEncoding", bold=0, italic=0):
# XXX Just works for local filenames!
if names and src: # and src.local:
file = src
src = file.uri
log.debug("Load font %r", src)
if type(names) is types.ListType:
fontAlias = names
else:
fontAlias = [x.lower().strip() for x in names.split(",") if x]
# XXX Problems with unicode here
fontAlias = [str(x) for x in fontAlias]
fontName = fontAlias[0]
parts = src.split(".")
baseName, suffix = ".".join(parts[: - 1]), parts[ - 1]
suffix = suffix.lower()
if suffix in ["ttc", "ttf"]:
# determine full font name according to weight and style
fullFontName = "%s_%d%d" % (fontName, bold, italic)
# check if font has already been registered
if fullFontName in self.fontList:
log.warn(self.warning("Repeated font embed for %s, skip new embed ", fullFontName))
else:
# Register TTF font and special name
filename = file.getNamedFile()
pdfmetrics.registerFont(TTFont(fullFontName, filename))
# Add or replace missing styles
for bold in (0, 1):
for italic in (0, 1):
if ("%s_%d%d" % (fontName, bold, italic)) not in self.fontList:
addMapping(fontName, bold, italic, fullFontName)
# Register "normal" name and the place holder for style
self.registerFont(fontName, fontAlias + [fullFontName])
elif suffix in ("afm", "pfb"):
if suffix == "afm":
afm = file.getNamedFile()
tfile = pisaFileObject(baseName + ".pfb")
pfb = tfile.getNamedFile()
else:
pfb = file.getNamedFile()
tfile = pisaFileObject(baseName + ".afm")
afm = tfile.getNamedFile()
#afm = baseName + ".afm"
#pfb = baseName + ".pfb"
# determine full font name according to weight and style
fullFontName = "%s_%d%d" % (fontName, bold, italic)
#fontNameOriginal = ""
#for line in open(afm).readlines()[:-1]:
# if line[:16] == 'StartCharMetrics':
# self.error("Font name not found")
# if line[:8] == 'FontName':
# fontNameOriginal = line[9:].strip()
# break
# check if font has already been registered
if fullFontName in self.fontList:
log.warn(self.warning("Repeated font embed for %s, skip new embed", fontName))
else:
# Include font
face = pdfmetrics.EmbeddedType1Face(afm, pfb)
fontNameOriginal = face.name
pdfmetrics.registerTypeFace(face)
# print fontName, fontNameOriginal, fullFontName
justFont = pdfmetrics.Font(fullFontName, fontNameOriginal, encoding)
pdfmetrics.registerFont(justFont)
# Add or replace missing styles
for bold in (0, 1):
for italic in (0, 1):
if ("%s_%d%d" % (fontName, bold, italic)) not in self.fontList:
addMapping(fontName, bold, italic, fontNameOriginal)
# Register "normal" name and the place holder for style
self.registerFont(fontName, fontAlias + [fullFontName, fontNameOriginal])
#import pprint
#pprint.pprint(self.fontList)
else:
log.warning(self.warning("wrong attributes for <pdf:font>"))
|
|
#!/usr/bin/env python
"""
@package ion.services.mi.test.test_port_agent_client
@file ion/services/mi/test/test_port_agent_client.py
@author David Everett
@brief Some unit tests for R2 port agent client
"""
__author__ = 'David Everett'
__license__ = 'Apache 2.0'
# Ensure the test class is monkey patched for gevent
from gevent import monkey; monkey.patch_all()
import gevent
import logging
import unittest
import re
import time
import datetime
import array
import struct
import ctypes
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.port_agent_process import PortAgentProcess
from mi.core.port_agent_process import PortAgentProcessType
from mi.core.tcp_client import TcpClient
from mi.core.port_agent_simulator import TCPSimulatorServer
from mi.core.unit_test import MiUnitTest
from mi.core.unit_test import MiIntTestCase
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import InstrumentDriverIntegrationTestCase
from mi.core.instrument.port_agent_client import PortAgentClient, PortAgentPacket, Listener
from mi.core.instrument.port_agent_client import HEADER_SIZE
from mi.core.instrument.instrument_driver import DriverConnectionState
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.exceptions import InstrumentConnectionException
from mi.instrument.seabird.sbe37smb.ooicore.driver import SBE37Driver
# MI logger
from mi.core.log import get_logger ; log = get_logger()
SYSTEM_EPOCH = datetime.date(*time.gmtime(0)[0:3])
NTP_EPOCH = datetime.date(1900, 1, 1)
NTP_DELTA = (SYSTEM_EPOCH - NTP_EPOCH).days * 24 * 3600
## Initialize the test parameters
## Use the SBE37 here because this is a generic port_agent_client test not
## necessarily associated with any driver.
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.seabird.sbe37smb.ooicore.driver',
driver_class="SBE37Driver",
instrument_agent_resource_id = '123xyz',
instrument_agent_preload_id = 'IA2',
instrument_agent_name = 'Agent007',
driver_startup_config = {}
)
@attr('UNIT', group='mi')
class PAClientUnitTestCase(InstrumentDriverUnitTestCase):
def setUp(self):
self.ipaddr = "localhost"
self.cmd_port = 9001
self.data_port = 9002
self.device_port = 9003
def resetTestVars(self):
self.rawCallbackCalled = False
self.dataCallbackCalled = False
self.errorCallbackCalled = False
self.listenerCallbackCalled = False
def myGotData(self, paPacket):
self.dataCallbackCalled = True
if paPacket.is_valid():
validity = "valid"
else:
validity = "invalid"
log.info("Got %s port agent data packet with data length %d: %s", validity, paPacket.get_data_length(), paPacket.get_data())
def myGotRaw(self, paPacket):
self.rawCallbackCalled = True
if paPacket.is_valid():
validity = "valid"
else:
validity = "invalid"
log.info("Got %s port agent raw packet with data length %d: %s", validity, paPacket.get_data_length(), paPacket.get_data())
def myGotError(self, errorString = "No error string passed in."):
self.errorCallbackCalled = True
log.info("Got error: %s", errorString)
def myGotListenerError(self, exception):
self.listenerCallbackCalled = True
log.info("Got listener exception: %s", exception)
def raiseException(self, packet):
raise Exception("Boom")
def test_handle_packet(self):
"""
Test that a default PortAgentPacket creates a DATA_FROM_DRIVER packet,
and that the handle_packet method invokes the raw callback
"""
paListener = Listener(None, None, 0, 0, 5, self.myGotData, self.myGotRaw, self.myGotListenerError, self.myGotError)
test_data = "This is a great big test"
self.resetTestVars()
paPacket = PortAgentPacket()
paPacket.attach_data(test_data)
paPacket.pack_header()
paPacket.verify_checksum()
paListener.handle_packet(paPacket)
self.assertTrue(self.rawCallbackCalled)
###
# Test DATA_FROM_INSTRUMENT; handle_packet should invoke data and raw
# callbacks.
###
self.resetTestVars()
paPacket = PortAgentPacket(PortAgentPacket.DATA_FROM_INSTRUMENT)
paPacket.attach_data(test_data)
paPacket.pack_header()
paPacket.verify_checksum()
paListener.handle_packet(paPacket)
self.assertTrue(self.rawCallbackCalled)
self.assertTrue(self.dataCallbackCalled)
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
###
# Test PORT_AGENT_COMMAND; handle_packet should invoke raw callback.
###
self.resetTestVars()
paPacket = PortAgentPacket(PortAgentPacket.PORT_AGENT_COMMAND)
paPacket.attach_data(test_data)
paPacket.pack_header()
paPacket.verify_checksum()
paListener.handle_packet(paPacket)
self.assertTrue(self.rawCallbackCalled)
self.assertFalse(self.dataCallbackCalled)
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
###
# Test PORT_AGENT_STATUS; handle_packet should invoke raw callback.
###
self.resetTestVars()
paPacket = PortAgentPacket(PortAgentPacket.PORT_AGENT_STATUS)
paPacket.attach_data(test_data)
paPacket.pack_header()
paPacket.verify_checksum()
paListener.handle_packet(paPacket)
self.assertTrue(self.rawCallbackCalled)
self.assertFalse(self.dataCallbackCalled)
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
###
# Test PORT_AGENT_FAULT; handle_packet should invoke raw callback.
###
self.resetTestVars()
paPacket = PortAgentPacket(PortAgentPacket.PORT_AGENT_FAULT)
paPacket.attach_data(test_data)
paPacket.pack_header()
paPacket.verify_checksum()
paListener.handle_packet(paPacket)
self.assertTrue(self.rawCallbackCalled)
self.assertFalse(self.dataCallbackCalled)
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
###
# Test INSTRUMENT_COMMAND; handle_packet should invoke raw callback.
###
self.resetTestVars()
paPacket = PortAgentPacket(PortAgentPacket.INSTRUMENT_COMMAND)
paPacket.attach_data(test_data)
paPacket.pack_header()
paPacket.verify_checksum()
paListener.handle_packet(paPacket)
self.assertTrue(self.rawCallbackCalled)
self.assertFalse(self.dataCallbackCalled)
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
###
# Test HEARTBEAT; handle_packet should not invoke any callback.
###
self.resetTestVars()
paPacket = PortAgentPacket(PortAgentPacket.HEARTBEAT)
paPacket.attach_data(test_data)
paPacket.pack_header()
paPacket.verify_checksum()
paListener.handle_packet(paPacket)
self.assertFalse(self.rawCallbackCalled)
self.assertFalse(self.dataCallbackCalled)
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
def test_heartbeat_timeout(self):
"""
Initialize the Listener with a heartbeat value, then
start the heartbeat. Wait long enough for the heartbeat
to timeout MAX_MISSED_HEARTBEATS times, and then assert
that the error_callback was called.
"""
self.resetTestVars()
test_recovery_attempts = 1
test_heartbeat = 1
test_max_missed_heartbeats = 5
paListener = Listener(None, test_recovery_attempts, None, test_heartbeat, test_max_missed_heartbeats,
self.myGotData, self.myGotRaw, self.myGotListenerError, None, self.myGotError)
paListener.start_heartbeat_timer()
gevent.sleep((test_max_missed_heartbeats * paListener.heartbeat) + 4)
self.assertFalse(self.rawCallbackCalled)
self.assertFalse(self.dataCallbackCalled)
self.assertTrue(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
def test_set_heartbeat(self):
"""
Test the set_heart_beat function; make sure it returns False when
passed invalid values, and true when valid. Also make sure it
adds the HEARTBEAT_FUDGE
"""
self.resetTestVars()
test_recovery_attempts = 1
test_heartbeat = 0
test_max_missed_heartbeats = 5
paListener = Listener(None, test_recovery_attempts, None, test_heartbeat, test_max_missed_heartbeats,
self.myGotData, self.myGotRaw, self.myGotListenerError, None, self.myGotError)
###
# Test valid values
###
test_heartbeat = 1
retValue = paListener.set_heartbeat(test_heartbeat)
self.assertTrue(retValue)
self.assertTrue(paListener.heartbeat == test_heartbeat + paListener.HEARTBEAT_FUDGE)
test_heartbeat = paListener.MAX_HEARTBEAT_INTERVAL
retValue = paListener.set_heartbeat(test_heartbeat)
self.assertTrue(retValue)
self.assertTrue(paListener.heartbeat == test_heartbeat + paListener.HEARTBEAT_FUDGE)
###
# Test that a heartbeat value of zero results in the listener.heartbeat being zero
# (and doesn't include HEARTBEAT_FUDGE)
###
test_heartbeat = 0
retValue = paListener.set_heartbeat(test_heartbeat)
self.assertTrue(retValue)
self.assertTrue(paListener.heartbeat == test_heartbeat)
###
# Test invalid values
###
test_heartbeat = -1
retValue = paListener.set_heartbeat(test_heartbeat)
self.assertFalse(retValue)
test_heartbeat = paListener.MAX_HEARTBEAT_INTERVAL + 1
retValue = paListener.set_heartbeat(test_heartbeat)
self.assertFalse(retValue)
def test_connect_failure(self):
"""
Test that when the the port agent client cannot initially connect, it
raises an InstrumentConnectionException
"""
exceptionRaised = False
driver = SBE37Driver(self._got_data_event_callback)
current_state = driver.get_resource_state()
self.assertEqual(current_state, DriverConnectionState.UNCONFIGURED)
config = {'addr' : self.ipaddr, 'port' : self.data_port, 'cmd_port' : self.cmd_port}
driver.configure(config = config)
current_state = driver.get_resource_state()
self.assertEqual(current_state, DriverConnectionState.DISCONNECTED)
"""
Try to connect: it should not because there is no port agent running.
The state should remain DISCONNECTED, and an
InstrumentConnectionException should be caught.
"""
try:
driver.connect()
current_state = driver.get_resource_state()
self.assertEqual(current_state, DriverConnectionState.DISCONNECTED)
except InstrumentConnectionException as e:
exceptionRaised = True
"""
Give it some time to retry
"""
time.sleep(4)
self.assertTrue(exceptionRaised)
@attr('UNIT', group='mi')
class PAClientTestPortAgentPacket(MiUnitTest):
# time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime(time.time()))
#
@staticmethod
def ntp_to_system_time(date):
"""convert a NTP time to system time"""
return date - NTP_DELTA
@staticmethod
def system_to_ntp_time(date):
"""convert a system time to a NTP time"""
return date + NTP_DELTA
def setUp(self):
self.pap = PortAgentPacket()
#self.test_time = time.time()
#self.ntp_time = self.system_to_ntp_time(self.test_time)
#self.pap.set_timestamp(self.ntp_time)
def test_pack_header(self):
test_data = "Only the length of this matters?"
test_data_length = len(test_data)
self.pap.attach_data(test_data)
self.pap.pack_header()
header = self.pap.get_header()
self.assertEqual(self.pap.get_data_length(), test_data_length)
def test_get_length(self):
test_length = 100
self.pap.set_data_length(test_length)
got_length = self.pap.get_data_length()
self.assertEqual(got_length, test_length)
def test_checksum(self):
"""
This tests the checksum algorithm; if somebody changes the algorithm
this test should catch it. Had to jump through some hoops to do this;
needed to add set_data_length and set_header because we're building our
own header here (the one in PortAgentPacket includes the timestamp
so the checksum is not consistent).
"""
test_data = "This tests the checksum algorithm."
test_length = len(test_data)
self.pap.attach_data(test_data)
"""
Now build a header
"""
variable_tuple = (0xa3, 0x9d, 0x7a, self.pap.DATA_FROM_DRIVER,
test_length + HEADER_SIZE, 0x0000,
0)
self.pap.set_data_length(test_length)
format = '>BBBBHHd'
size = struct.calcsize(format)
temp_header = ctypes.create_string_buffer(size)
struct.pack_into(format, temp_header, 0, *variable_tuple)
"""
Now set the header member in PortAgentPacket to the header
we built
"""
self.pap.set_header(temp_header.raw)
"""
Now get the checksum and verify it is what we expect it to be.
"""
checksum = self.pap.calculate_checksum()
self.assertEqual(checksum, 2)
def test_unpack_header(self):
self.pap = PortAgentPacket()
data_length = 32
data = self.pap.unpack_header(array.array('B', [163, 157, 122, 2, 0, data_length + HEADER_SIZE, 14, 145, 65, 234, 142, 154, 23, 155, 51, 51]))
got_timestamp = self.pap.get_timestamp()
self.assertEqual(self.pap.get_header_type(), self.pap.DATA_FROM_DRIVER)
self.assertEqual(self.pap.get_data_length(), data_length)
# FIXME -- This broke with October 2013 timestamp fix...update this!
#self.assertEqual(got_timestamp, 1105890970.110589)
self.assertEqual(self.pap.get_header_recv_checksum(), 3729)
@attr('INT', group='mi')
class PAClientIntTestCase(InstrumentDriverTestCase):
def initialize(cls, *args, **kwargs):
log.debug("initialize")
def setUp(self):
#InstrumentDriverIntegrationTestCase.setUp(self)
#self.ipaddr = "69.196.56.192"
self.ipaddr = "localhost"
self.cmd_port = 9001
self.data_port = 9002
self.device_port = 9003
self.rawCallbackCalled = False
self.dataCallbackCalled = False
self.errorCallbackCalled = False
self.paPacket = None
def tearDown(self):
"""
@brief Test teardown
"""
log.debug("PACClientIntTestCase tearDown")
InstrumentDriverTestCase.tearDown(self)
def startPortAgent(self):
pa_port = self.init_port_agent()
log.debug("port_agent started on port: %d" % (pa_port))
time.sleep(2) # give it a chance to start responding
def resetTestVars(self):
log.debug("Resetting test variables...")
self.rawCallbackCalled = False
self.dataCallbackCalled = False
self.errorCallbackCalled = False
self.listenerCallbackCalled = False
def myGotData(self, paPacket):
self.dataCallbackCalled = True
self.paPacket = paPacket
if paPacket.is_valid():
validity = "valid"
else:
validity = "invalid"
log.debug("Got %s port agent data packet with data length %s: %s", validity, paPacket.get_data_length(), paPacket.get_data())
def myGotRaw(self, paPacket):
self.rawCallbackCalled = True
if paPacket.is_valid():
validity = "valid"
else:
validity = "invalid"
log.debug("Got %s port agent raw packet with data length %s: %s", validity, paPacket.get_data_length(), paPacket.get_data())
def myGotListenerError(self, exception):
self.listenerCallbackCalled = True
log.info("Got listener exception: %s", exception)
def myGotError(self, errorString = "No error string passed in."):
self.errorCallbackCalled = True
log.info("myGotError got error: %s", errorString)
def init_instrument_simulator(self):
"""
Startup a TCP server that we can use as an instrument simulator
"""
self._instrument_simulator = TCPSimulatorServer()
self.addCleanup(self._instrument_simulator.close)
# Wait for the simulator to bind to a port
timeout = time.time() + 10
while (timeout > time.time()):
if (self._instrument_simulator.port > 0):
log.debug("Instrument simulator initialized on port %s" % self._instrument_simulator.port)
return
log.debug("waiting for simulator to bind. sleeping")
time.sleep(1)
raise IDKException("Timeout waiting for simulator to bind")
def init_port_agent(self):
"""
@brief Launch the driver process and driver client. This is used in the
integration and qualification tests. The port agent abstracts the physical
interface with the instrument.
@retval return the pid to the logger process
"""
if (self.port_agent):
log.error("Port agent already initialized")
return
log.debug("Startup Port Agent")
#comm_config = self.get_comm_config()
config = self.port_agent_config()
log.debug("port agent config: %s" % config)
port_agent = PortAgentProcess.launch_process(config, timeout = 60, test_mode = True)
port = port_agent.get_data_port()
pid = port_agent.get_pid()
log.info('Started port agent pid %s listening at port %s' % (pid, port))
self.addCleanup(self.stop_port_agent)
self.port_agent = port_agent
return port
def port_agent_config(self):
"""
Overload the default port agent configuration so that
it connects to a simulated TCP connection.
"""
config = {
'device_addr' : self.ipaddr,
'device_port' : self.device_port,
'command_port': self.cmd_port,
'data_port': self.data_port,
'process_type': PortAgentProcessType.UNIX,
'log_level': 5,
'heartbeat_interval': 3
}
# Override the instrument connection information.
config['device_addr'] = 'localhost'
config['device_port'] = self._instrument_simulator.port
return config
def test_paClient_retry(self):
"""
Test that the port agent client will not continually try to recover
when the port agent closes the connection gracefully because it has
another client connected.
"""
exceptionRaised = False
self.resetTestVars()
self.init_instrument_simulator()
self.startPortAgent()
time.sleep(2)
"""
Start a TCP client that will connect to the data port; this sets up the
situation where the Port Agent will immediately close the connection
because it already has one
"""
self.tcp_client = TcpClient("localhost", self.data_port)
time.sleep(2)
paClient = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
try:
paClient.init_comms(self.myGotData, self.myGotRaw, self.myGotListenerError, self.myGotError)
except InstrumentConnectionException as e:
exceptionRaised = True
"""
Give it some time to retry
"""
time.sleep(4)
self.assertTrue(exceptionRaised)
def test_paClient_rx_heartbeat(self):
"""
Test that the port agent can send heartbeats when the paClient has
a heartbeat_interval of 0. The port_agent_config() method above
sets the heartbeat interval.
"""
self.resetTestVars()
self.init_instrument_simulator()
self.startPortAgent()
time.sleep(5)
paClient = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
paClient.init_comms(self.myGotData, self.myGotRaw, self.myGotListenerError, self.myGotError)
time.sleep(10)
self.assertFalse(self.errorCallbackCalled)
def test_start_paClient_no_port_agent(self):
self.resetTestVars()
paClient = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
self.assertRaises(InstrumentConnectionException,
paClient.init_comms,
self.myGotData, self.myGotRaw,
self.myGotListenerError, self.myGotError)
self.assertFalse(self.errorCallbackCalled)
def test_start_paClient_with_port_agent(self):
self.resetTestVars()
self.init_instrument_simulator()
self.startPortAgent()
paClient = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
try:
paClient.init_comms(self.myGotData, self.myGotRaw, self.myGotListenerError, self.myGotError)
except InstrumentConnectionException as e:
log.error("Exception caught: %r" % (e))
exceptionCaught = True
else:
exceptionCaught = False
data = "this is a great big test"
paClient.send(data)
time.sleep(1)
self._instrument_simulator.send(data)
time.sleep(5)
paClient.stop_comms()
"""
Assert that the error_callback was not called, that an exception was not
caught, and that the data and raw callbacks were called.
"""
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(exceptionCaught)
self.assertTrue(self.rawCallbackCalled)
self.assertTrue(self.dataCallbackCalled)
def test_start_paClient_no_port_agent_big_data(self):
self.resetTestVars()
logging.getLogger('mi.core.instrument.port_agent_client').setLevel(logging.DEBUG)
# I put this in here because PortAgentPacket cannot make a new packet
# with a valid checksum.
def makepacket(msgtype, timestamp, data):
from struct import Struct
SYNC = (0xA3, 0x9D, 0x7A)
HEADER_FORMAT = "!BBBBHHd"
header_struct = Struct(HEADER_FORMAT)
HEADER_SIZE = header_struct.size
def calculateChecksum(data, seed=0):
n = seed
for datum in data:
n ^= datum
return n
def pack_header(buf, msgtype, pktsize, checksum, timestamp):
sync1, sync2, sync3 = SYNC
header_struct.pack_into(buf, 0, sync1, sync2, sync3, msgtype, pktsize,
checksum, timestamp)
pktsize = HEADER_SIZE + len(data)
pkt = bytearray(pktsize)
pack_header(pkt, msgtype, pktsize, 0, timestamp)
pkt[HEADER_SIZE:] = data
checksum = calculateChecksum(pkt)
pack_header(pkt, msgtype, pktsize, checksum, timestamp)
return pkt
# Make a BIG packet
data = "A" * (2**16 - HEADER_SIZE - 1)
txpkt = makepacket(PortAgentPacket.DATA_FROM_INSTRUMENT, 0.0, data)
def handle(sock, addr):
# Send it in pieces
sock.sendall(txpkt[:1500])
time.sleep(1)
sock.sendall(txpkt[1500:])
time.sleep(10)
import gevent.server
dataserver = gevent.server.StreamServer((self.ipaddr, self.data_port), handle)
cmdserver = gevent.server.StreamServer((self.ipaddr, self.cmd_port), lambda x, y: None)
paClient = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
try:
dataserver.start()
cmdserver.start()
paClient.init_comms(self.myGotData, self.myGotRaw, self.myGotListenerError, self.myGotError)
except InstrumentConnectionException as e:
log.error("Exception caught: %r" % (e))
raise
else:
time.sleep(5)
finally:
paClient.stop_comms()
dataserver.kill()
cmdserver.kill()
"""
Assert that the error_callback was not called, that an exception was not
caught, and that the data and raw callbacks were called.
"""
self.assertFalse(self.errorCallbackCalled)
self.assertTrue(self.rawCallbackCalled)
self.assertTrue(self.dataCallbackCalled)
self.assertEquals(self.paPacket.get_data_length(), len(data))
self.assertEquals(len(self.paPacket.get_data()), len(data))
# don't use assertEquals b/c it will print 64kb
self.assert_(self.paPacket.get_data() == data)
def test_start_paClient_lost_port_agent_tx_rx(self):
"""
This test starts the port agent and the instrument_simulator and
tests that data is sent and received first; then it stops the port
agent and tests that the error_callback was called.
"""
self.resetTestVars()
self.init_instrument_simulator()
self.startPortAgent()
paClient = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
paClient.init_comms(self.myGotData, self.myGotRaw, self.myGotListenerError, self.myGotError)
"""
Now send some data; there should be no errors.
"""
try:
data = "this is a great big test"
paClient.send(data)
time.sleep(1)
self._instrument_simulator.send(data)
except InstrumentConnectionException as e:
log.error("Exception caught: %r" % (e))
exceptionCaught = True
else:
exceptionCaught = False
time.sleep(1)
"""
Assert that the error_callback was NOT called, that an exception was NOT
caught, and that the data and raw callbacks WERE called.
"""
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(exceptionCaught)
self.assertTrue(self.rawCallbackCalled)
self.assertTrue(self.dataCallbackCalled)
"""
Now reset the test variables and try again; this time after stopping
the port agent. Should be errors
"""
self.resetTestVars()
try:
self.stop_port_agent()
log.debug("Port agent stopped")
data = "this is another great big test"
paClient.send(data)
time.sleep(1)
log.debug("Sending from simulator")
self._instrument_simulator.send(data)
except InstrumentConnectionException as e:
log.error("Exception caught: %r" % (e))
exceptionCaught = True
else:
exceptionCaught = False
time.sleep(5)
"""
Assert that the error_callback WAS called. The listener usually
is seeing the error first, and that does not call the exception, so
only assert that the error callback was called.
"""
self.assertTrue(self.errorCallbackCalled)
def test_start_paClient_lost_port_agent_rx(self):
"""
This test starts the port agent and then stops the port agent and
verifies that the error callback was called (because the listener
is the only one that will see the error, since there is no send
operation).
"""
self.resetTestVars()
self.init_instrument_simulator()
self.startPortAgent()
paClient = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
paClient.init_comms(self.myGotData, self.myGotRaw, self.myGotListenerError, self.myGotError)
try:
self.stop_port_agent()
except InstrumentConnectionException as e:
log.error("Exception caught: %r" % (e))
exceptionCaught = True
else:
exceptionCaught = False
time.sleep(5)
"""
Assert that the error_callback was called. At this moment the listener
is seeing the error first, and that does not call the exception, so
don't test for that yet.
"""
self.assertTrue(self.errorCallbackCalled)
@unittest.skip('Skip; this test does not work consistently.')
def test_start_paClient_lost_port_agent_tx(self):
"""
This test starts the port agent and then starts the port agent client
in a special way that will not start the listener thread. This will
guarantee that the send context is the one the sees the error.
"""
self.resetTestVars()
self.init_instrument_simulator()
self.startPortAgent()
paClient = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
"""
Give the port agent time to initialize
"""
time.sleep(5)
paClient.init_comms(self.myGotData, self.myGotRaw, self.myGotError, self.myGotListenerError, start_listener = False)
try:
self.stop_port_agent()
data = "this big ol' test should cause send context to fail"
paClient.send(data)
time.sleep(1)
except InstrumentConnectionException as e:
log.error("Exception caught: %r" % (e))
exceptionCaught = True
else:
exceptionCaught = False
time.sleep(5)
"""
Assert that the error_callback was called. For this test the listener
should not be running, so the send context should see the error, and that
should throw an exception. Assert that the callback WAS called and that
an exception WAS thrown.
"""
self.assertTrue(self.errorCallbackCalled)
self.assertTrue(exceptionCaught)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_managed_cluster_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
config_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"configName": _SERIALIZER.url("config_name", config_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
config_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"configName": _SERIALIZER.url("config_name", config_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
config_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"configName": _SERIALIZER.url("config_name", config_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class MaintenanceConfigurationsOperations(object):
"""MaintenanceConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2021_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_managed_cluster(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> Iterable["_models.MaintenanceConfigurationListResult"]:
"""Gets a list of maintenance configurations in the specified managed cluster.
Gets a list of maintenance configurations in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MaintenanceConfigurationListResult or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2021_08_01.models.MaintenanceConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MaintenanceConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_managed_cluster_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_by_managed_cluster.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_managed_cluster_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("MaintenanceConfigurationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_managed_cluster.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
**kwargs: Any
) -> "_models.MaintenanceConfiguration":
"""Gets the specified maintenance configuration of a managed cluster.
Gets the specified maintenance configuration of a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param config_name: The name of the maintenance configuration.
:type config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_08_01.models.MaintenanceConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MaintenanceConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('MaintenanceConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
parameters: "_models.MaintenanceConfiguration",
**kwargs: Any
) -> "_models.MaintenanceConfiguration":
"""Creates or updates a maintenance configuration in the specified managed cluster.
Creates or updates a maintenance configuration in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param config_name: The name of the maintenance configuration.
:type config_name: str
:param parameters: The maintenance configuration to create or update.
:type parameters: ~azure.mgmt.containerservice.v2021_08_01.models.MaintenanceConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_08_01.models.MaintenanceConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MaintenanceConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'MaintenanceConfiguration')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('MaintenanceConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
**kwargs: Any
) -> None:
"""Deletes a maintenance configuration.
Deletes a maintenance configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param config_name: The name of the maintenance configuration.
:type config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}'} # type: ignore
|
|
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Data extraction/preprocessing for processing wiki history dumps for GEC.
We use a set of heuristics to distill prose from the wikipedia xml. We produce
source-target pairs of text reflecting wikipedia edits.
WikiRevision problem - fragment of older revision -> fragment of newer revision.
This implements data extraction from wikipedia as desribed in the paper,
Weakly Supervised Grammatical Error Correction using Iterative Decoding
(https://arxiv.org/pdf/1811.01710.pdf).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
from absl import flags
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.data_generators import wiki_revision_utils
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_integer("wiki_revision_num_train_shards", 50,
"Set the number of training shards to be output.")
flags.DEFINE_integer("wiki_revision_num_dev_shards", 1,
"Set the number of dev shards to be output.")
flags.DEFINE_string(
"wiki_revision_data_prefix", "",
"Specify the prefix for input data. Expects 7z compressed Wikipedia XML "
"files, available at https://dumps.wikimedia.org/enwiki/latest/.")
flags.DEFINE_string(
"wiki_revision_vocab_file", "",
"Specify a wordpieces vocabulary with which to encode the text. Will "
"generate one from data if not specified.")
flags.DEFINE_integer(
"wiki_revision_max_examples_per_shard", 0,
"Use this to set a cap on examples per shard. "
"0 is no cap.")
# Data filtration heuristics:
flags.DEFINE_integer("wiki_revision_max_page_size_exp", 26,
"Exponent for 2**X byte cap on page size.")
flags.DEFINE_float(
"wiki_revision_max_equal_to_diff_ratio", 0,
"Max ratio between count of equal, diff chars for generated "
"examples. Ratio of 1 means examples with more diff chars "
"than equal chars will be tossed out.")
flags.DEFINE_float(
"wiki_revision_revision_skip_factor", 1.5,
"If >1, process only logarithmically many revisions. "
"This avoids blowup in runtime due to many-revision pages. "
"See wiki_revision_utils.include_revision for details.")
flags.DEFINE_float("wiki_revision_percent_identical_examples", 0.04,
"Percent of generated examples for which source == target.")
flags.DEFINE_bool(
"wiki_revision_introduce_errors", True, "Add errors to the data."
"See wiki_revision_utils.introduce_errors for details.")
@registry.register_problem
class WikiRevision(text_problems.Text2TextProblem):
"""Old segment -> revised segment.
Data filtration heuristics:
wiki_revision_max_page_size_exp:
pages above this # of bytes are thrown out
wiki_revision_revision_skip_factor:
rate of logarithmic downsampling of revision history list
wiki_revision_percent_identical_examples:
how many identitcal examples to admit, as percent of total examples
wiki_revision_introduce_errors:
whether or not to introduce spelling-type errors on the source side
wiki_revision_max_equal_to_diff_ratio:
whether or not to introduce spelling-type errors on the source side
Vocab size=32k
Maximum input/target length = 1024 wordpiece tokens
"""
num_identity_examples = 0
num_total_examples = 0
num_identity_examples = 0
num_pages = 0
num_revisions_total = 0
num_revisions_admitted = 0
num_examples_thrown_out_identity = 0
num_examples_thrown_out_too_long = 0
num_examples_thrown_out_edit_distance = 0
num_examples_with_introduced_error = 0
num_introduced_errors = 0
num_source_tokens = 0
num_target_tokens = 0
corpus_files = None
@property
def approx_vocab_size(self):
return 2**15 # 32K
@property
def strip(self):
"""Whether to strip wikipedia-stuff to get plain text."""
return True
@property
def wiki_revision_skip_factor(self):
"""If this value is >1.0, process only logarithmically many revisions."""
return FLAGS.wiki_revision_revision_skip_factor
@property
def max_segment_length(self):
"""Maximum number of input/target wordpiece tokens."""
return 256
@property
def max_examples_per_shard(self):
"""Maximum number of examples to generate per shard. 0=unlimited."""
return FLAGS.wiki_revision_max_examples_per_shard
def aggregate_job_stats(self):
# Aggregate job stats for output.
stat = []
# Run stats.
stat.append("Flags for job:\n"
"Dev shards: {}\n"
"Train shards: {}\n"
"Revision skip factor: {}\n"
"Max page size: 2**{}\n"
"Introduce errors: {}\n"
"Max edit ratio: {}\n"
"Percent Identical Examples: {}\n"
"".format(FLAGS.wiki_revision_num_dev_shards,
FLAGS.wiki_revision_num_train_shards,
FLAGS.wiki_revision_revision_skip_factor,
FLAGS.wiki_revision_max_page_size_exp,
FLAGS.wiki_revision_introduce_errors,
FLAGS.wiki_revision_max_equal_to_diff_ratio,
FLAGS.wiki_revision_percent_identical_examples))
# File stats.
stat.append("corpus files: {}\n"
"\tnames: {}\n"
"\tpages per input file: {:.1f}\n"
"".format(
len(self.corpus_files), self.corpus_files,
(0 if not self.corpus_files else
self.num_pages / len(self.corpus_files))))
# Page stats.
stat.append(
"pages processed: {}\n"
"\trevisions per page: {:.2f}, total: {}\n"
"\trevisions admitted per page: {:.2f}, percent of total: {:.2f}\n"
"".format(
self.num_pages, (0 if not self.num_pages else
self.num_revisions_total / self.num_pages),
self.num_revisions_total,
(0 if not self.num_pages else
self.num_revisions_admitted / self.num_pages),
(0 if not self.num_revisions_total else
100 * self.num_revisions_admitted / self.num_revisions_total)))
# Revision stats.
stat.append(
"revisions admitted: {}\n"
"\texamples generated per revision: {:.2f}\n"
"".format(self.num_revisions_admitted,
(0 if not self.num_revisions_admitted else
self.num_total_examples / self.num_revisions_admitted)))
# Example stats.
stat.append(
"examples generated: {}\n"
"\twith error introduced: {}, percent of total: {:.2f}\n"
"\ttotal errors introduced: {}, errors per errorred example: {:.2f}\n"
"\texamples thrown out: {}\n"
"\t\ttoo long: {}\n"
"\t\tidentity: {}\n"
"\t\tedit distance: {}\n"
"\tremaining identity examples: {}\n"
"\tratio identity (actual, desired): {:.3f}, {}\n"
"".format(
self.num_total_examples, self.num_examples_with_introduced_error,
(0 if not self.num_total_examples else 100 *
self.num_examples_with_introduced_error / self.num_total_examples),
self.num_introduced_errors,
(0 if not self.num_examples_with_introduced_error else
self.num_introduced_errors /
self.num_examples_with_introduced_error),
self.num_examples_thrown_out_too_long +
self.num_examples_thrown_out_identity +
self.num_examples_thrown_out_edit_distance,
self.num_examples_thrown_out_too_long,
self.num_examples_thrown_out_identity,
self.num_examples_thrown_out_edit_distance,
self.num_identity_examples,
(0 if not self.num_total_examples else
self.num_identity_examples / self.num_total_examples),
FLAGS.wiki_revision_percent_identical_examples))
# Token stats.
stat.append("tokens generated: {}\n"
"\tsource: {}\n"
"\ttarget: {}\n"
"\tper example: {:.2f}\n"
"\t\tsource: {:.2f}\n"
"\t\ttarget: {:.2f}\n"
"".format(self.num_source_tokens + self.num_target_tokens,
self.num_source_tokens, self.num_target_tokens,
(0 if not self.num_total_examples else
(self.num_source_tokens + self.num_target_tokens) /
self.num_total_examples),
(0 if not self.num_total_examples else
self.num_source_tokens / self.num_total_examples),
(0 if not self.num_total_examples else
self.num_target_tokens / self.num_total_examples)))
return "\n".join(stat)
def generate_data(self, data_dir, tmp_dir, task_id=-1):
if task_id == -1 or task_id is None:
for i in range(FLAGS.wiki_revision_num_train_shards +
FLAGS.wiki_revision_num_dev_shards):
self.generate_data(data_dir, tmp_dir, i)
return
tf.logging.info(
"Flags for job (task_id {}): "
"Dev shards: {}, Train shards: {}, "
"Revision skip factor: {}, Max page size: 2**{}, Introduce errors: {},"
"Percent Identical Examples: {}"
"".format(task_id, FLAGS.wiki_revision_num_dev_shards,
FLAGS.wiki_revision_num_train_shards,
FLAGS.wiki_revision_revision_skip_factor,
FLAGS.wiki_revision_max_page_size_exp,
FLAGS.wiki_revision_introduce_errors,
FLAGS.wiki_revision_percent_identical_examples))
if FLAGS.wiki_revision_vocab_file:
encoder = wiki_revision_utils.get_encoder_from_vocab(
FLAGS.wiki_revision_vocab_file)
else:
encoder = wiki_revision_utils.get_or_generate_vocabulary(
data_dir, tmp_dir, FLAGS.wiki_revision_data_prefix,
FLAGS.wiki_revision_max_page_size_exp, self.approx_vocab_size,
self.strip)
random.seed(123)
if task_id < FLAGS.wiki_revision_num_train_shards:
out_file = self.training_filepaths(
data_dir, FLAGS.wiki_revision_num_train_shards,
shuffled=False)[task_id]
else:
out_file = self.dev_filepaths(
data_dir, FLAGS.wiki_revision_num_dev_shards,
shuffled=False)[task_id - FLAGS.wiki_revision_num_train_shards]
tf.logging.info("Generating files for path: %s", out_file)
self.corpus_files = wiki_revision_utils.corpus_files_for_shard(
task_id, FLAGS.wiki_revision_num_train_shards,
FLAGS.wiki_revision_num_dev_shards, FLAGS.wiki_revision_data_prefix)
example_generator = self.generator(encoder, self.corpus_files, tmp_dir)
packed_example_generator = self._maybe_pack_examples(example_generator)
generator_utils.generate_files(packed_example_generator, [out_file])
generator_utils.shuffle_dataset([out_file])
tf.logging.info(
"Job stats: identity examples: {}, total examples {}, ratio: {}".format(
self.num_identity_examples, self.num_total_examples,
(1 + self.num_identity_examples) / (1 + self.num_total_examples)))
job_stats_string = self.aggregate_job_stats()
out_dir, filename = out_file.replace("-unshuffled", "").rsplit("/", 1)
stats_prefix = "/stats_"
stats_file_path = "".join([out_dir, stats_prefix, filename])
if tf.gfile.Exists(
stats_file_path) and tf.gfile.Open(stats_file_path).size() != 0:
tf.logging.info("Skipping writing stats because output file exists.")
else:
with tf.gfile.Open(stats_file_path, "w") as out:
tf.logging.info("Writing job stats to {}".format(stats_file_path))
out.write(job_stats_string)
tf.logging.info(job_stats_string)
def generator(self, encoder, corpus_files, tmp_dir):
for page in wiki_revision_utils.corpus_page_generator(
corpus_files, tmp_dir, FLAGS.wiki_revision_max_page_size_exp):
self.num_pages += 1
examples = self.page_to_examples(page, encoder)
for x in examples:
yield x
if self.num_total_examples % 100000 == 0:
tf.logging.info(
u"page count={} num_total_examples={} id={} title={}".format(
self.num_pages, self.num_total_examples, page["id"],
page["title"]))
if (self.max_examples_per_shard and
self.num_total_examples >= self.max_examples_per_shard):
tf.logging.info(
"Examples per shard {} >= max_examples_per_shard {}. Shutting down."
.format(self.num_total_examples, self.max_examples_per_shard))
break
tf.logging.info(
"Total pages: {}, total examples: {}, examples per page: {}".format(
self.num_pages, self.num_total_examples, 0 if not self.num_pages
else self.num_total_examples / self.num_pages))
def page_to_examples(self, page, encoder):
revisions = page["revisions"]
self.num_revisions_total += len(revisions)
if len(revisions) < 2:
return []
revisions = [
wiki_revision_utils.get_text(r)
for n, r in enumerate(revisions)
if wiki_revision_utils.include_revision(
n, self.wiki_revision_skip_factor) or n + 1 == len(revisions)
]
self.num_revisions_admitted += len(revisions)
ret = []
for i in range(len(revisions) - 1):
old_revision = revisions[i]
new_revision = revisions[i + 1]
if FLAGS.wiki_revision_introduce_errors:
old_revision_text, num_added_err = wiki_revision_utils.introduce_errors(
revisions[i])
if num_added_err:
self.num_introduced_errors += num_added_err
self.num_examples_with_introduced_error += 1
else:
old_revision_text = revisions[i]
new_revision_text = revisions[i + 1]
if encoder:
# Encode text into list of ids, if a text encoder is present.
old_revision = encoder.encode(old_revision_text)
new_revision = encoder.encode(new_revision_text)
else:
# Retain text (as list of characters), if a text encoder is not present.
old_revision = old_revision_text
new_revision = new_revision_text
ret.extend(
self.make_examples(
encoder,
old_revision,
new_revision,
max_length=self.max_segment_length,
percent_identical_examples=FLAGS
.wiki_revision_percent_identical_examples))
return ret
def make_examples(self,
encoder,
old_snapshot,
new_snapshot,
max_length=1024,
percent_identical_examples=0.01,
max_length_distance=0):
"""Produce training examples based on a pair of snapshots.
Aligns the snapshots, then chops at a random subset of the alignment points
to create (old snippet -> new snippet) examples.
Most negative examples (those with no changes) are discarded, but we
keep some of them, maintaining a proportion in the final data
determined by percent_identical_examples.
Args:
encoder: the subword text encoder
old_snapshot: a list of ids
new_snapshot: a list of ids
max_length: an integer. Maximum length of "inputs" and "targets".
percent_identical_examples: a float
max_length_distance: an integer. Max token edit dist for admitted examples
Returns:
a list of feature dictionaries. The dictionaries have
"inputs" and "targets" populated. text_encoder.EOS is appended to both.
"""
ret = []
eos_sequence = [text_encoder.EOS_ID]
# Pick a per-token cut probability with a log-uniform distribution between
# 1/4 and 1/(max_length / 2)
bound1 = -math.log(4.0)
bound2 = -math.log(max_length / 2.0)
cut_prob = math.exp(random.random() * (bound2 - bound1) + bound1)
opcodes = wiki_revision_utils.fast_match_sequences(old_snapshot,
new_snapshot)
cut_points = [(0, 0)]
for tag, i1, i2, j1, j2 in opcodes:
if tag == "equal":
for i in range(i1, i2 + 1):
if random.random() < cut_prob:
cut_points.append((i, i + j1 - i1))
cut_points.append((len(old_snapshot), len(new_snapshot)))
src_tgt_pairs = []
for cut_number in range(len(cut_points) - 1):
i1, j1 = cut_points[cut_number]
i2, j2 = cut_points[cut_number + 1]
old_segment = old_snapshot[i1:i2]
new_segment = new_snapshot[j1:j2]
src_tgt_pairs.append((old_segment, new_segment))
src_tgt_pairs, thrown_edit_count = wiki_revision_utils.edit_distance_filter(
wiki_revision_utils.throw_empty_pairs(src_tgt_pairs),
FLAGS.wiki_revision_max_equal_to_diff_ratio)
self.num_examples_thrown_out_edit_distance += thrown_edit_count
for source, target in src_tgt_pairs:
# Add EOS segment.
old_segment = source + eos_sequence
new_segment = target + eos_sequence
if len(old_segment) <= max_length and len(new_segment) <= max_length:
if max_length_distance and (abs(len(old_segment) - len(new_segment)) >
max_length_distance):
self.num_examples_thrown_out_edit_distance += 1
continue
if old_segment == new_segment:
# If current proportion of identity is below target
# percent_identical_examples, then roll for a 50% chance to add an
# identitical example. Random roll preserves nondeterminism.
# percent_identical_examples, then add identitical example.
# Random roll preserves nondeterminism in selecting identity examples.
if (((self.num_identity_examples) / (1 + self.num_total_examples)) >
percent_identical_examples) or random.random() > 0.5:
self.num_examples_thrown_out_identity += 1
continue
else:
self.num_identity_examples += 1
self.num_total_examples += 1
self.num_source_tokens += len(old_segment) - 1
self.num_target_tokens += len(new_segment) - 1
ret.append({"inputs": old_segment, "targets": new_segment})
else:
self.num_examples_thrown_out_too_long += 1
return ret
def eval_metrics(self):
return [
metrics.Metrics.ACC,
metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ,
metrics.Metrics.NEG_LOG_PERPLEXITY,
]
@property
def invert_prob(self):
"""Ratio of e^2 positive forward to backward examples."""
return 1.0 / (1.0 + math.exp(2.0))
@registry.register_problem
class WikiRevisionPacked1k(WikiRevision):
"""Packed version for TPU."""
@property
def packed_length(self):
return 1024
@registry.register_problem
class WikiRevisionPacked256(WikiRevision):
"""Packed version for TPU."""
@property
def packed_length(self):
return 256
@property
def max_segment_length(self):
return 256
|
|
from functools import partial
from itertools import permutations
import numpy as np
import unittest
from numba.core.compiler import compile_isolated, Flags
from numba import jit, njit, from_dtype, typeof
from numba.core.errors import TypingError
from numba.core import types, errors
from numba.tests.support import (TestCase, MemoryLeakMixin, CompilationCache,
tag)
enable_pyobj_flags = Flags()
enable_pyobj_flags.enable_pyobject = True
no_pyobj_flags = Flags()
no_pyobj_flags.nrt = True
def from_generic(pyfuncs_to_use):
"""Decorator for generic check functions.
Iterates over 'pyfuncs_to_use', calling 'func' with the iterated
item as first argument. Example:
@from_generic(numpy_array_reshape, array_reshape)
def check_only_shape(pyfunc, arr, shape, expected_shape):
# Only check Numba result to avoid Numpy bugs
self.memory_leak_setup()
got = generic_run(pyfunc, arr, shape)
self.assertEqual(got.shape, expected_shape)
self.assertEqual(got.size, arr.size)
del got
self.memory_leak_teardown()
"""
def decorator(func):
def result(*args, **kwargs):
return [func(pyfunc, *args, **kwargs) for pyfunc in pyfuncs_to_use]
return result
return decorator
def array_reshape(arr, newshape):
return arr.reshape(newshape)
def numpy_array_reshape(arr, newshape):
return np.reshape(arr, newshape)
def numpy_broadcast_to(arr, shape):
return np.broadcast_to(arr, shape)
def numpy_broadcast_to_indexing(arr, shape, idx):
return np.broadcast_to(arr, shape)[idx]
def flatten_array(a):
return a.flatten()
def ravel_array(a):
return a.ravel()
def ravel_array_size(a):
return a.ravel().size
def numpy_ravel_array(a):
return np.ravel(a)
def transpose_array(a):
return a.transpose()
def numpy_transpose_array(a):
return np.transpose(a)
def numpy_transpose_array_axes_kwarg(arr, axes):
return np.transpose(arr, axes=axes)
def numpy_transpose_array_axes_kwarg_copy(arr, axes):
return np.transpose(arr, axes=axes).copy()
def array_transpose_axes(arr, axes):
return arr.transpose(axes)
def array_transpose_axes_copy(arr, axes):
return arr.transpose(axes).copy()
def transpose_issue_4708(m, n):
r1 = np.reshape(np.arange(m * n * 3), (m, 3, n))
r2 = np.reshape(np.arange(n * 3), (n, 3))
r_dif = (r1 - r2.T).T
r_dif = np.transpose(r_dif, (2, 0, 1))
z = r_dif + 1
return z
def squeeze_array(a):
return a.squeeze()
def expand_dims(a, axis):
return np.expand_dims(a, axis)
def atleast_1d(*args):
return np.atleast_1d(*args)
def atleast_2d(*args):
return np.atleast_2d(*args)
def atleast_3d(*args):
return np.atleast_3d(*args)
def as_strided1(a):
# as_strided() with implicit shape
strides = (a.strides[0] // 2,) + a.strides[1:]
return np.lib.stride_tricks.as_strided(a, strides=strides)
def as_strided2(a):
# Rolling window example as in https://github.com/numba/numba/issues/1884
window = 3
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def add_axis2(a):
return a[np.newaxis, :]
def bad_index(arr, arr2d):
x = arr.x,
y = arr.y
# note that `x` is a tuple, which causes a new axis to be created.
arr2d[x, y] = 1.0
def bad_float_index(arr):
# 2D index required for this function because 1D index
# fails typing
return arr[1, 2.0]
def numpy_fill_diagonal(arr, val, wrap=False):
return np.fill_diagonal(arr, val, wrap)
def numpy_shape(arr):
return np.shape(arr)
def numpy_flatnonzero(a):
return np.flatnonzero(a)
def numpy_argwhere(a):
return np.argwhere(a)
class TestArrayManipulation(MemoryLeakMixin, TestCase):
"""
Check shape-changing operations on arrays.
"""
def setUp(self):
super(TestArrayManipulation, self).setUp()
self.ccache = CompilationCache()
def test_array_reshape(self):
pyfuncs_to_use = [array_reshape, numpy_array_reshape]
def generic_run(pyfunc, arr, shape):
cres = compile_isolated(pyfunc, (typeof(arr), typeof(shape)))
return cres.entry_point(arr, shape)
@from_generic(pyfuncs_to_use)
def check(pyfunc, arr, shape):
expected = pyfunc(arr, shape)
self.memory_leak_setup()
got = generic_run(pyfunc, arr, shape)
self.assertPreciseEqual(got, expected)
del got
self.memory_leak_teardown()
@from_generic(pyfuncs_to_use)
def check_only_shape(pyfunc, arr, shape, expected_shape):
# Only check Numba result to avoid Numpy bugs
self.memory_leak_setup()
got = generic_run(pyfunc, arr, shape)
self.assertEqual(got.shape, expected_shape)
self.assertEqual(got.size, arr.size)
del got
self.memory_leak_teardown()
@from_generic(pyfuncs_to_use)
def check_err_shape(pyfunc, arr, shape):
with self.assertRaises(NotImplementedError) as raises:
generic_run(pyfunc, arr, shape)
self.assertEqual(str(raises.exception),
"incompatible shape for array")
@from_generic(pyfuncs_to_use)
def check_err_size(pyfunc, arr, shape):
with self.assertRaises(ValueError) as raises:
generic_run(pyfunc, arr, shape)
self.assertEqual(str(raises.exception),
"total size of new array must be unchanged")
@from_generic(pyfuncs_to_use)
def check_err_multiple_negative(pyfunc, arr, shape):
with self.assertRaises(ValueError) as raises:
generic_run(pyfunc, arr, shape)
self.assertEqual(str(raises.exception),
"multiple negative shape values")
# C-contiguous
arr = np.arange(24)
check(arr, (24,))
check(arr, (4, 6))
check(arr, (8, 3))
check(arr, (8, 1, 3))
check(arr, (1, 8, 1, 1, 3, 1))
arr = np.arange(24).reshape((2, 3, 4))
check(arr, (24,))
check(arr, (4, 6))
check(arr, (8, 3))
check(arr, (8, 1, 3))
check(arr, (1, 8, 1, 1, 3, 1))
check_err_size(arr, ())
check_err_size(arr, (25,))
check_err_size(arr, (8, 4))
arr = np.arange(24).reshape((1, 8, 1, 1, 3, 1))
check(arr, (24,))
check(arr, (4, 6))
check(arr, (8, 3))
check(arr, (8, 1, 3))
# F-contiguous
arr = np.arange(24).reshape((2, 3, 4)).T
check(arr, (4, 3, 2))
check(arr, (1, 4, 1, 3, 1, 2, 1))
check_err_shape(arr, (2, 3, 4))
check_err_shape(arr, (6, 4))
check_err_shape(arr, (2, 12))
# Test negative shape value
arr = np.arange(25).reshape(5,5)
check(arr, -1)
check(arr, (-1,))
check(arr, (-1, 5))
check(arr, (5, -1, 5))
check(arr, (5, 5, -1))
check_err_size(arr, (-1, 4))
check_err_multiple_negative(arr, (-1, -2, 5, 5))
check_err_multiple_negative(arr, (5, 5, -1, -1))
# 0-sized arrays
def check_empty(arr):
check(arr, 0)
check(arr, (0,))
check(arr, (1, 0, 2))
check(arr, (0, 55, 1, 0, 2))
# -1 is buggy in Numpy with 0-sized arrays
check_only_shape(arr, -1, (0,))
check_only_shape(arr, (-1,), (0,))
check_only_shape(arr, (0, -1), (0, 0))
check_only_shape(arr, (4, -1), (4, 0))
check_only_shape(arr, (-1, 0, 4), (0, 0, 4))
check_err_size(arr, ())
check_err_size(arr, 1)
check_err_size(arr, (1, 2))
arr = np.array([])
check_empty(arr)
check_empty(arr.reshape((3, 2, 0)))
# Exceptions leak references
self.disable_leak_check()
def test_array_transpose_axes(self):
pyfuncs_to_use = [numpy_transpose_array_axes_kwarg,
numpy_transpose_array_axes_kwarg_copy,
array_transpose_axes,
array_transpose_axes_copy]
def run(pyfunc, arr, axes):
cres = self.ccache.compile(pyfunc, (typeof(arr), typeof(axes)))
return cres.entry_point(arr, axes)
@from_generic(pyfuncs_to_use)
def check(pyfunc, arr, axes):
expected = pyfunc(arr, axes)
got = run(pyfunc, arr, axes)
self.assertPreciseEqual(got, expected)
self.assertEqual(got.flags.f_contiguous,
expected.flags.f_contiguous)
self.assertEqual(got.flags.c_contiguous,
expected.flags.c_contiguous)
@from_generic(pyfuncs_to_use)
def check_err_axis_repeated(pyfunc, arr, axes):
with self.assertRaises(ValueError) as raises:
run(pyfunc, arr, axes)
self.assertEqual(str(raises.exception),
"repeated axis in transpose")
@from_generic(pyfuncs_to_use)
def check_err_axis_oob(pyfunc, arr, axes):
with self.assertRaises(ValueError) as raises:
run(pyfunc, arr, axes)
self.assertEqual(str(raises.exception),
"axis is out of bounds for array of given dimension")
@from_generic(pyfuncs_to_use)
def check_err_invalid_args(pyfunc, arr, axes):
with self.assertRaises((TypeError, TypingError)):
run(pyfunc, arr, axes)
arrs = [np.arange(24),
np.arange(24).reshape(4, 6),
np.arange(24).reshape(2, 3, 4),
np.arange(24).reshape(1, 2, 3, 4),
np.arange(64).reshape(8, 4, 2)[::3,::2,:]]
for i in range(len(arrs)):
# First check `None`, the default, which is to reverse dims
check(arrs[i], None)
# Check supplied axis permutations
for axes in permutations(tuple(range(arrs[i].ndim))):
ndim = len(axes)
neg_axes = tuple([x - ndim for x in axes])
check(arrs[i], axes)
check(arrs[i], neg_axes)
@from_generic([transpose_issue_4708])
def check_issue_4708(pyfunc, m, n):
expected = pyfunc(m, n)
got = njit(pyfunc)(m, n)
# values in arrays are equals,
# but stronger assertions not hold (layout and strides equality)
np.testing.assert_equal(got, expected)
check_issue_4708(3, 2)
check_issue_4708(2, 3)
check_issue_4708(5, 4)
# Exceptions leak references
self.disable_leak_check()
check_err_invalid_args(arrs[1], "foo")
check_err_invalid_args(arrs[1], ("foo",))
check_err_invalid_args(arrs[1], 5.3)
check_err_invalid_args(arrs[2], (1.2, 5))
check_err_axis_repeated(arrs[1], (0, 0))
check_err_axis_repeated(arrs[2], (2, 0, 0))
check_err_axis_repeated(arrs[3], (3, 2, 1, 1))
check_err_axis_oob(arrs[0], (1,))
check_err_axis_oob(arrs[0], (-2,))
check_err_axis_oob(arrs[1], (0, 2))
check_err_axis_oob(arrs[1], (-3, 2))
check_err_axis_oob(arrs[1], (0, -3))
check_err_axis_oob(arrs[2], (3, 1, 2))
check_err_axis_oob(arrs[2], (-4, 1, 2))
check_err_axis_oob(arrs[3], (3, 1, 2, 5))
check_err_axis_oob(arrs[3], (3, 1, 2, -5))
with self.assertRaises(TypingError) as e:
jit(nopython=True)(numpy_transpose_array)((np.array([0, 1]),))
self.assertIn("np.transpose does not accept tuples",
str(e.exception))
def test_expand_dims(self):
pyfunc = expand_dims
def run(arr, axis):
cres = self.ccache.compile(pyfunc, (typeof(arr), typeof(axis)))
return cres.entry_point(arr, axis)
def check(arr, axis):
expected = pyfunc(arr, axis)
self.memory_leak_setup()
got = run(arr, axis)
self.assertPreciseEqual(got, expected)
del got
self.memory_leak_teardown()
def check_all_axes(arr):
for axis in range(-arr.ndim - 1, arr.ndim + 1):
check(arr, axis)
# 1d
arr = np.arange(5)
check_all_axes(arr)
# 3d (C, F, A)
arr = np.arange(24).reshape((2, 3, 4))
check_all_axes(arr)
check_all_axes(arr.T)
check_all_axes(arr[::-1])
# 0d
arr = np.array(42)
check_all_axes(arr)
def check_atleast_nd(self, pyfunc, cfunc):
def check_result(got, expected):
# We would like to check the result has the same contiguity,
# but we can't rely on the "flags" attribute when there are
# 1-sized dimensions.
self.assertStridesEqual(got, expected)
self.assertPreciseEqual(got.flatten(), expected.flatten())
def check_single(arg):
check_result(cfunc(arg), pyfunc(arg))
def check_tuple(*args):
expected_tuple = pyfunc(*args)
got_tuple = cfunc(*args)
self.assertEqual(len(got_tuple), len(expected_tuple))
for got, expected in zip(got_tuple, expected_tuple):
check_result(got, expected)
# 0d
a1 = np.array(42)
a2 = np.array(5j)
check_single(a1)
check_tuple(a1, a2)
# 1d
b1 = np.arange(5)
b2 = np.arange(6) + 1j
b3 = b1[::-1]
check_single(b1)
check_tuple(b1, b2, b3)
# 2d
c1 = np.arange(6).reshape((2, 3))
c2 = c1.T
c3 = c1[::-1]
check_single(c1)
check_tuple(c1, c2, c3)
# 3d
d1 = np.arange(24).reshape((2, 3, 4))
d2 = d1.T
d3 = d1[::-1]
check_single(d1)
check_tuple(d1, d2, d3)
# 4d
e = np.arange(16).reshape((2, 2, 2, 2))
check_single(e)
# mixed dimensions
check_tuple(a1, b2, c3, d2)
def test_atleast_1d(self):
pyfunc = atleast_1d
cfunc = jit(nopython=True)(pyfunc)
self.check_atleast_nd(pyfunc, cfunc)
def test_atleast_2d(self):
pyfunc = atleast_2d
cfunc = jit(nopython=True)(pyfunc)
self.check_atleast_nd(pyfunc, cfunc)
def test_atleast_3d(self):
pyfunc = atleast_3d
cfunc = jit(nopython=True)(pyfunc)
self.check_atleast_nd(pyfunc, cfunc)
def check_as_strided(self, pyfunc):
def run(arr):
cres = self.ccache.compile(pyfunc, (typeof(arr),))
return cres.entry_point(arr)
def check(arr):
expected = pyfunc(arr)
got = run(arr)
self.assertPreciseEqual(got, expected)
arr = np.arange(24)
check(arr)
check(arr.reshape((6, 4)))
check(arr.reshape((4, 1, 6)))
def test_as_strided(self):
self.check_as_strided(as_strided1)
self.check_as_strided(as_strided2)
def test_flatten_array(self, flags=enable_pyobj_flags, layout='C'):
a = np.arange(9).reshape(3, 3)
if layout == 'F':
a = a.T
pyfunc = flatten_array
arraytype1 = typeof(a)
if layout == 'A':
# Force A layout
arraytype1 = arraytype1.copy(layout='A')
self.assertEqual(arraytype1.layout, layout)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_flatten_array_npm(self):
self.test_flatten_array(flags=no_pyobj_flags)
self.test_flatten_array(flags=no_pyobj_flags, layout='F')
self.test_flatten_array(flags=no_pyobj_flags, layout='A')
def test_ravel_array(self, flags=enable_pyobj_flags):
def generic_check(pyfunc, a, assume_layout):
# compile
arraytype1 = typeof(a)
self.assertEqual(arraytype1.layout, assume_layout)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
# Check result matches
np.testing.assert_equal(expected, got)
# Check copying behavior
py_copied = (a.ctypes.data != expected.ctypes.data)
nb_copied = (a.ctypes.data != got.ctypes.data)
self.assertEqual(py_copied, assume_layout != 'C')
self.assertEqual(py_copied, nb_copied)
check_method = partial(generic_check, ravel_array)
check_function = partial(generic_check, numpy_ravel_array)
def check(*args, **kwargs):
check_method(*args, **kwargs)
check_function(*args, **kwargs)
# Check 2D
check(np.arange(9).reshape(3, 3), assume_layout='C')
check(np.arange(9).reshape(3, 3, order='F'), assume_layout='F')
check(np.arange(18).reshape(3, 3, 2)[:, :, 0], assume_layout='A')
# Check 3D
check(np.arange(18).reshape(2, 3, 3), assume_layout='C')
check(np.arange(18).reshape(2, 3, 3, order='F'), assume_layout='F')
check(np.arange(36).reshape(2, 3, 3, 2)[:, :, :, 0], assume_layout='A')
def test_ravel_array_size(self, flags=enable_pyobj_flags):
a = np.arange(9).reshape(3, 3)
pyfunc = ravel_array_size
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_ravel_array_npm(self):
self.test_ravel_array(flags=no_pyobj_flags)
def test_ravel_array_size_npm(self):
self.test_ravel_array_size(flags=no_pyobj_flags)
def test_transpose_array(self, flags=enable_pyobj_flags):
@from_generic([transpose_array, numpy_transpose_array])
def check(pyfunc):
a = np.arange(9).reshape(3, 3)
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
check()
def test_transpose_array_npm(self):
self.test_transpose_array(flags=no_pyobj_flags)
def test_squeeze_array(self, flags=enable_pyobj_flags):
a = np.arange(2 * 1 * 3 * 1 * 4).reshape(2, 1, 3, 1, 4)
pyfunc = squeeze_array
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_squeeze_array_npm(self):
with self.assertRaises(errors.TypingError) as raises:
self.test_squeeze_array(flags=no_pyobj_flags)
self.assertIn("squeeze", str(raises.exception))
def test_add_axis2(self, flags=enable_pyobj_flags):
a = np.arange(9).reshape(3, 3)
pyfunc = add_axis2
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_add_axis2_npm(self):
with self.assertTypingError() as raises:
self.test_add_axis2(flags=no_pyobj_flags)
self.assertIn("unsupported array index type none in",
str(raises.exception))
def test_bad_index_npm(self):
with self.assertTypingError() as raises:
arraytype1 = from_dtype(np.dtype([('x', np.int32),
('y', np.int32)]))
arraytype2 = types.Array(types.int32, 2, 'C')
compile_isolated(bad_index, (arraytype1, arraytype2),
flags=no_pyobj_flags)
self.assertIn('unsupported array index type', str(raises.exception))
def test_bad_float_index_npm(self):
with self.assertTypingError() as raises:
compile_isolated(bad_float_index,
(types.Array(types.float64, 2, 'C'),))
self.assertIn('unsupported array index type float64',
str(raises.exception))
def test_fill_diagonal_basic(self):
pyfunc = numpy_fill_diagonal
cfunc = jit(nopython=True)(pyfunc)
def _shape_variations(n):
# square
yield (n, n)
# tall and thin
yield (2 * n, n)
# short and fat
yield (n, 2 * n)
# a bit taller than wide; odd numbers of rows and cols
yield ((2 * n + 1), (2 * n - 1))
# 4d, all dimensions same
yield (n, n, n, n)
# weird edge case
yield (1, 1, 1)
def _val_variations():
yield 1
yield 3.142
yield np.nan
yield -np.inf
yield True
yield np.arange(4)
yield (4,)
yield [8, 9]
yield np.arange(54).reshape(9, 3, 2, 1) # contiguous C
yield np.asfortranarray(np.arange(9).reshape(3, 3)) # contiguous F
yield np.arange(9).reshape(3, 3)[::-1] # non-contiguous
# contiguous arrays
def _multi_dimensional_array_variations(n):
for shape in _shape_variations(n):
yield np.zeros(shape, dtype=np.float64)
yield np.asfortranarray(np.ones(shape, dtype=np.float64))
# non-contiguous arrays
def _multi_dimensional_array_variations_strided(n):
for shape in _shape_variations(n):
tmp = np.zeros(tuple([x * 2 for x in shape]), dtype=np.float64)
slicer = tuple(slice(0, x * 2, 2) for x in shape)
yield tmp[slicer]
def _check_fill_diagonal(arr, val):
for wrap in None, True, False:
a = arr.copy()
b = arr.copy()
if wrap is None:
params = {}
else:
params = {'wrap': wrap}
pyfunc(a, val, **params)
cfunc(b, val, **params)
self.assertPreciseEqual(a, b)
for arr in _multi_dimensional_array_variations(3):
for val in _val_variations():
_check_fill_diagonal(arr, val)
for arr in _multi_dimensional_array_variations_strided(3):
for val in _val_variations():
_check_fill_diagonal(arr, val)
# non-numeric input arrays
arr = np.array([True] * 9).reshape(3, 3)
_check_fill_diagonal(arr, False)
_check_fill_diagonal(arr, [False, True, False])
_check_fill_diagonal(arr, np.array([True, False, True]))
def test_fill_diagonal_exception_cases(self):
pyfunc = numpy_fill_diagonal
cfunc = jit(nopython=True)(pyfunc)
val = 1
# Exceptions leak references
self.disable_leak_check()
# first argument unsupported number of dimensions
for a in np.array([]), np.ones(5):
with self.assertRaises(TypingError) as raises:
cfunc(a, val)
assert "The first argument must be at least 2-D" in str(raises.exception)
# multi-dimensional input where dimensions are not all equal
with self.assertRaises(ValueError) as raises:
a = np.zeros((3, 3, 4))
cfunc(a, val)
self.assertEqual("All dimensions of input must be of equal length", str(raises.exception))
# cases where val has incompatible type / value
def _assert_raises(arr, val):
with self.assertRaises(ValueError) as raises:
cfunc(arr, val)
self.assertEqual("Unable to safely conform val to a.dtype", str(raises.exception))
arr = np.zeros((3, 3), dtype=np.int32)
val = np.nan
_assert_raises(arr, val)
val = [3.3, np.inf]
_assert_raises(arr, val)
val = np.array([1, 2, 1e10], dtype=np.int64)
_assert_raises(arr, val)
arr = np.zeros((3, 3), dtype=np.float32)
val = [1.4, 2.6, -1e100]
_assert_raises(arr, val)
val = 1.1e100
_assert_raises(arr, val)
val = np.array([-1e100])
_assert_raises(arr, val)
def test_broadcast_to(self):
pyfunc = numpy_broadcast_to
cfunc = jit(nopython=True)(pyfunc)
# Tests taken from
# https://github.com/numpy/numpy/blob/75f852edf94a7293e7982ad516bee314d7187c2d/numpy/lib/tests/test_stride_tricks.py#L234-L257 # noqa: E501
data = [
[np.array(0), (0,)],
[np.array(0), (1,)],
[np.array(0), (3,)],
[np.ones(1), (1,)],
[np.ones(1), (2,)],
[np.ones(1), (1, 2, 3)],
[np.arange(3), (3,)],
[np.arange(3), (1, 3)],
[np.arange(3), (2, 3)],
# test if shape is not a tuple
[np.ones(0), 0],
[np.ones(1), 1],
[np.ones(1), 2],
# these cases with size 0 are strange, but they reproduce the behavior
# of broadcasting with ufuncs
[np.ones(1), (0,)],
[np.ones((1, 2)), (0, 2)],
[np.ones((2, 1)), (2, 0)],
# numpy accepts scalar values as first argument to np.broadcast_to
[2, (2, 2)],
# tuple input
[(1, 2), (2, 2)],
]
for input_array, shape in data:
expected = pyfunc(input_array, shape)
got = cfunc(input_array, shape)
self.assertPreciseEqual(got, expected)
def test_broadcast_to_raises(self):
pyfunc = numpy_broadcast_to
cfunc = jit(nopython=True)(pyfunc)
# Tests taken from
# https://github.com/numpy/numpy/blob/75f852edf94a7293e7982ad516bee314d7187c2d/numpy/lib/tests/test_stride_tricks.py#L260-L276 # noqa: E501
data = [
[np.zeros((0,)), (), TypingError,
'The argument "shape" must be a tuple or an integer.'],
[np.zeros((1,)), (), TypingError,
'The argument "shape" must be a tuple or an integer.'],
[np.zeros((3,)), (), TypingError,
'The argument "shape" must be a tuple or an integer.'],
[np.zeros((3,)), (1,), ValueError,
'operands could not be broadcast together with remapped shapes'],
[np.zeros((3,)), (2,), ValueError,
'operands could not be broadcast together with remapped shapes'],
[np.zeros((3,)), (4,), ValueError,
'operands could not be broadcast together with remapped shapes'],
[np.zeros((1, 2)), (2, 1), ValueError,
'operands could not be broadcast together with remapped shapes'],
[np.zeros((1, 1)), (1,), ValueError,
'input operand has more dimensions than allowed by the axis remapping'],
[np.zeros((2, 2)), (3,), ValueError,
'input operand has more dimensions than allowed by the axis remapping'],
[np.zeros((1,)), -1, ValueError,
'all elements of broadcast shape must be non-negative'],
[np.zeros((1,)), (-1,), ValueError,
'all elements of broadcast shape must be non-negative'],
[np.zeros((1, 2)), (-1, 2), ValueError,
'all elements of broadcast shape must be non-negative'],
[np.zeros((1, 2)), (1.1, 2.2), TypingError,
'The second argument "shape" must be a tuple of integers'],
['hello', (3,), TypingError,
'The first argument "array" must be array-like'],
]
self.disable_leak_check()
for arr, target_shape, err, msg in data:
with self.assertRaises(err) as raises:
cfunc(arr, target_shape)
self.assertIn(msg, str(raises.exception))
def test_broadcast_to_change_view(self):
pyfunc = numpy_broadcast_to
cfunc = jit(nopython=True)(pyfunc)
input_array = np.zeros(2, dtype=np.int32)
shape = (2, 2)
view = cfunc(input_array, shape)
input_array[0] = 10
self.assertEqual(input_array.sum(), 10)
self.assertEqual(view.sum(), 20)
def test_broadcast_to_indexing(self):
pyfunc = numpy_broadcast_to_indexing
cfunc = jit(nopython=True)(pyfunc)
data = [
[np.ones(2), (2, 2), (1,)],
]
for input_array, shape, idx in data:
expected = pyfunc(input_array, shape, idx)
got = cfunc(input_array, shape, idx)
self.assertPreciseEqual(got, expected)
def test_shape(self):
pyfunc = numpy_shape
cfunc = jit(nopython=True)(pyfunc)
def check(x):
expected = pyfunc(x)
got = cfunc(x)
self.assertPreciseEqual(got, expected)
# check arrays
for t in [(), (1,), (2, 3,), (4, 5, 6)]:
arr = np.empty(t)
check(arr)
# check some types that go via asarray
for t in [1, False, [1,], [[1, 2,],[3, 4]], (1,), (1, 2, 3)]:
check(arr)
with self.assertRaises(TypingError) as raises:
cfunc('a')
self.assertIn("The argument to np.shape must be array-like",
str(raises.exception))
def test_flatnonzero_basic(self):
pyfunc = numpy_flatnonzero
cfunc = jit(nopython=True)(pyfunc)
def a_variations():
yield np.arange(-5, 5)
yield np.full(5, fill_value=0)
yield np.array([])
a = self.random.randn(100)
a[np.abs(a) > 0.2] = 0.0
yield a
yield a.reshape(5, 5, 4)
yield a.reshape(50, 2, order='F')
yield a.reshape(25, 4)[1::2]
yield a * 1j
for a in a_variations():
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def test_argwhere_basic(self):
pyfunc = numpy_argwhere
cfunc = jit(nopython=True)(pyfunc)
def a_variations():
yield np.arange(-5, 5) > 2
yield np.full(5, fill_value=0)
yield np.full(5, fill_value=1)
yield np.array([])
yield np.array([-1.0, 0.0, 1.0])
a = self.random.randn(100)
yield a > 0.2
yield a.reshape(5, 5, 4) > 0.5
yield a.reshape(50, 2, order='F') > 0.5
yield a.reshape(25, 4)[1::2] > 0.5
yield a == a - 1
yield a > -a
for a in a_variations():
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
@staticmethod
def array_like_variations():
yield ((1.1, 2.2), (3.3, 4.4), (5.5, 6.6))
yield (0.0, 1.0, 0.0, -6.0)
yield ([0, 1], [2, 3])
yield ()
yield np.nan
yield 0
yield 1
yield False
yield True
yield (True, False, True)
yield 2 + 1j
# the following are not array-like, but NumPy does not raise
yield None
yield 'a_string'
yield ''
def test_flatnonzero_array_like(self):
pyfunc = numpy_flatnonzero
cfunc = jit(nopython=True)(pyfunc)
for a in self.array_like_variations():
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def test_argwhere_array_like(self):
pyfunc = numpy_argwhere
cfunc = jit(nopython=True)(pyfunc)
for a in self.array_like_variations():
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
import sys
import argparse
import logging
import time
import config
import netcon
# Global variables
TENSOR_NAMES = []
TENSOR_MATH_NAMES = []
BOND_NAMES = []
BOND_DIMS = []
VECTORS = []
FINAL_ORDER = None
class Tensor:
def __init__(self,name=None,bonds=[]):
if name==None:
self.name = []
elif isinstance(name, list):
self.name = name[:]
else:
self.name = [name]
self.bonds = bonds[:]
def __repr__(self):
return "Tensor(" + str(self.name) + ", " + str(self.bonds) +")"
def __str__(self):
return str(self.name) + ", " + str(self.bonds)
class Bond:
def __init__(self,t0=-1,t1=-1):
self.t0 = t0
self.t1 = t1
def __str__(self):
return "({0},{1})".format(self.t0,self.t1)
def isFree(self):
return (self.t0 < 0 or self.t1 < 0)
def connect(self,tensor_index):
assert self.isFree(), "edge already connected to two tensors"
if self.t0<0:
self.t0 = tensor_index
else:
assert not self.t0==tensor_index, "edge connects to the same tensor"
self.t1 = tensor_index
def has(self,tensor_index):
return (self.t0==tensor_index or self.t1==tensor_index)
class TensorNetwork:
def __init__(self):
self.tensors = []
self.bonds = []
self.total_memory = 0.0
self.max_memory = 0.0
self.cpu_cost = 0.0
def __str__(self):
s = ""
for i,t in enumerate(self.tensors):
s += "tensor {0} : {1}\n".format(i,t)
for i,b in enumerate(self.bonds):
s += "bond {0} : {1}, {2} {3}\n".format(i,BOND_NAMES[i],b,BOND_DIMS[i])
s += "memory : {0}\n".format(self.total_memory)
s += "cpu : {0}\n".format(self.cpu_cost)
return s
def clone(self):
tn = TensorNetwork()
tn.total_memory = self.total_memory
tn.max_memory = self.max_memory
tn.cpu_cost = self.cpu_cost
tn.bonds = [ Bond(b.t0,b.t1) for b in self.bonds ]
tn.tensors = [ Tensor(t.name,t.bonds) for t in self.tensors ]
return tn
def output_log(self,prefix=""):
if not prefix=="": prefix += " "
for i,t in enumerate(self.tensors):
logging.info(prefix + "tensor{0} : {1} {2}".format(i,TENSOR_NAMES[i],t.bonds))
for i,b in enumerate(self.bonds):
logging.info(prefix + "bond{0} : {1} {2} {3}".format(i,BOND_NAMES[i],b,BOND_DIMS[i]))
def add_tensor(self, t_name, b_names):
t_index = len(self.tensors)
b_indexs = []
for b in b_names:
if b not in BOND_NAMES:
self.bonds.append(Bond())
BOND_NAMES.append(b)
BOND_DIMS.append(config.DEFAULT_BOND_DIM)
i = BOND_NAMES.index(b)
self.bonds[i].connect(t_index)
b_indexs.append(i)
TENSOR_NAMES.append(t_name)
self.tensors.append(Tensor(t_index,b_indexs))
def find_bonds(self, tensor_a, tensor_b):
bonds_a = self.tensors[tensor_a].bonds
bonds_b = self.tensors[tensor_b].bonds
contract = [ b for b in bonds_a if b in bonds_b]
replaced_a = [ b for b in bonds_a if b not in bonds_b ]
replaced_b = [ b for b in bonds_b if b not in bonds_a ]
return contract, replaced_a, replaced_b
def contract(self, t0, t1, bc, br0, br1):
tn = self.clone()
# create the contracted tensor
t_new = tn.tensors[t0]
## change names of tensors using Reverse Polish Notation
t_new.name = self.tensors[t0].name+self.tensors[t1].name+[-1]
## remove contracted bonds
for b in bc: t_new.bonds.remove(b)
## add bonds from deleted tensor
for b in br1: t_new.bonds.append(b)
# clear the removed tensor
tn.tensors[t1] = Tensor()
# update bonds
bonds = tn.bonds
## remove contracted bonds from the bond list
for b in bc: bonds[b].t0 = bonds[b].t1 = -1
## change bond connections
old_idx = t1
new_idx = t0
for b in br1:
if bonds[b].t0==old_idx: bonds[b].t0=new_idx
elif bonds[b].t1==old_idx: bonds[b].t1=new_idx
return tn
def get_memory(tn_orig,rpn):
"""Caluculate memory cost for contractions from Reverse Polish Notation"""
tn = tn_orig.clone()
cost = []
for item in rpn:
if item==-1:
c1 = cost.pop()
c0 = cost.pop()
index1 = c1[0]
index0 = c0[0]
t0 = tn.tensors[index0]
t1 = tn.tensors[index1]
bc, br0, br1 = tn.find_bonds(index0, index1)
mem_start = c0[2] + c1[2]
mem_end = 1.0
for b in br0 + br1: mem_end *= BOND_DIMS[b]
mem_req = max(c0[1]+c1[2], c0[1]+c1[3], c0[2]+c1[1], c0[3]+c1[1], mem_end+c0[3]+c1[3])
tn = tn.contract(index0, index1, bc, br0, br1)
cost.append( (index0, mem_req, mem_start, mem_end) )
else:
t = tn.tensors[item]
val = 1.0
for b in t.bonds: val *= BOND_DIMS[b]
cost.append( (item, val, val, val) ) # (index, mem_req, mem_start, mem_end)
return cost[0][1]
def get_math(rpn):
"""Generate mathematical formula from Reverse Polish Notation"""
stack = []
for c in rpn:
if c==-1:
t1 = stack.pop()
t0 = stack.pop()
new_name = "("+t0+"*"+t1+")"
stack.append( new_name )
else:
stack.append(TENSOR_MATH_NAMES[c])
return stack[0]
def get_script(tn_orig,rpn):
"""Generate tensordot script from Reverse Polish Notation"""
tn = tn_orig.clone()
index = []
name = []
for c in rpn:
if c==-1:
index1 = index.pop()
index0 = index.pop()
name1 = name.pop()
name0 = name.pop()
t0 = tn.tensors[index0]
t1 = tn.tensors[index1]
bc, br0, br1 = tn.find_bonds(index0, index1)
axes0 = [ t0.bonds.index(b) for b in bc]
axes1 = [ t1.bonds.index(b) for b in bc]
tn = tn.contract(index0, index1, bc, br0, br1)
trace = (len(br0)==0 and len(br1)==0)
new_name = tensordot_script(name0,name1,axes0,axes1,trace)
index.append(index0)
name.append(new_name)
else:
index.append(c)
name.append([TENSOR_NAMES[c]])
bond_order = tn.tensors[index.pop()].bonds
return name.pop(), bond_order
def tensordot_script(name0,name1,axes0,axes1,trace=False):
if config.STYLE == "numpy":
func_name = config.NUMPY+".tensordot"
axes = "(" + str(axes0) + ", " + str(axes1) + ")"
elif config.STYLE == "mptensor":
func_name = "trace" if trace else "tensordot"
str_axes0 = str(tuple(axes0)) if len(axes0)>1 else "("+str(axes0[0])+")"
str_axes1 = str(tuple(axes1)) if len(axes1)>1 else "("+str(axes1[0])+")"
axes = "Axes" + str_axes0 + ", " + "Axes" + str_axes1
script = []
script.append( func_name + "(" )
for l in name0: script.append(config.INDENT + l)
script[-1] += ", " + name1[0]
for i in range(1,len(name1)): script.append(config.INDENT + name1[i])
script[-1] += ", " + axes
script.append( ")" )
return script
def transpose_script(name,axes):
if config.STYLE == "numpy":
func_name = config.NUMPY+".transpose"
axes = str(axes)
elif config.STYLE == "mptensor":
func_name = "transpose"
str_axes = str(tuple(axes)) if len(axes)>1 else "("+str(axes[0])+")"
axes = "Axes" + str_axes
script = []
script.append( func_name + "(" )
for l in name: script.append(config.INDENT + l)
script[-1] += ", " + axes
script.append( ")" )
return script
def multiply_vector_script(name,vec_list,rank):
if config.STYLE == "numpy":
newaxis = ","+config.NUMPY+".newaxis"
script = "("+name
for axis,vec_name in vec_list:
if axis==rank-1:
script += "*"+vec_name
else:
script += "*"+vec_name+"[:"+newaxis*(rank-axis-1)+"]"
script += ")"
if config.STYLE == "mptensor":
arg = []
for axis,vec_name in vec_list:
arg.append(vec_name)
arg.append(str(axis))
script = name + ".multiply_vector(" + ",".join(arg)+ ")"
math = "("+name
for axis,vec_name in vec_list: math += "*"+vec_name
math += ")"
return script, math
def add_transpose(tn,script,bond_order):
if FINAL_ORDER == None: return script, bond_order
f_order = [ BOND_NAMES.index(b) for b in FINAL_ORDER ]
if not sorted(f_order)==sorted(bond_order):
logging.warning("The final bond order is invalid. It is ignored.")
return script,bond_order
elif f_order == bond_order:
logging.info("The final bond order was requested, but Transpose is not necessary.")
return script, bond_order
axes = [ bond_order.index(b) for b in f_order ]
return transpose_script(script,axes), f_order
def add_multiply_vector(tn):
"""Change names of tensors by vector multiplications"""
if len(VECTORS)==0: return
mod_list = [[] for _ in TENSOR_NAMES]
for v_name, b_name in VECTORS:
assert (b_name in BOND_NAMES), "Vector ({0}) is multiplied to a non-existent bond.".format(v_name)
b_index = BOND_NAMES.index(b_name)
bond = tn.bonds[b_index]
t0, t1 = bond.t0, bond.t1
# find a smaller tensor
if t0>-1 and t1>-1:
mem0 = mem1 = 1.0
for b in tn.tensors[t0].bonds: mem0 *= BOND_DIMS[b]
for b in tn.tensors[t1].bonds: mem1 *= BOND_DIMS[b]
t = t0 if mem0<mem1 else t1
else:
t = max(t0, t1)
axis = tn.tensors[t].bonds.index(b_index)
mod_list[t].append((axis,v_name))
logging.debug("vector: "+v_name+" on bond"+str(b_index)+" -> tensor"+str(t))
for i,l in enumerate(mod_list):
if len(l)==0: continue
rank = len(tn.tensors[i].bonds)
new_name, new_math = multiply_vector_script(TENSOR_NAMES[i],sorted(l),rank)
logging.info("vector: "+TENSOR_NAMES[i]+" -> "+new_math)
TENSOR_NAMES[i] = new_name
TENSOR_MATH_NAMES[i] = new_math
def read_file(infile, tn):
"""Read input file"""
global FINAL_ORDER
for line in infile:
data = line.split()
if data==[]: continue
command = data[0].lower()
if command=="style":
set_style(data[1].lower())
elif command=="numpy":
config.NUMPY = data[1]
elif command=="indent":
config.INDENT = " " * int(data[1])
elif command=="default_dimension":
# Should be set the top of input file.
config.DEFAULT_BOND_DIM = int(data[1])
elif command=="debug" or command=="verbose":
config.LOGGING_LEVEL = logging.DEBUG
elif command=="tensor":
tn.add_tensor(data[1], data[2:])
elif command=="bond":
for b in data[1:-1]: set_bond_dim(b, int(data[-1]))
elif command=="bond_dim":
for b in data[2:]: set_bond_dim(b, int(data[1]))
elif command=="order":
FINAL_ORDER = data[1:]
elif command=="vector":
VECTORS.append((data[1], data[2]))
infile.close()
def set_bond_dim(bond_name, dim):
BOND_DIMS[ BOND_NAMES.index(bond_name) ] = dim
def set_style(style):
if style=="numpy":
config.STYLE = "numpy"
config.COMMENT_PREFIX = "#"
elif style=="mptensor":
config.STYLE = "mptensor"
config.COMMENT_PREFIX = "//"
def check_bond_order(tn):
return FINAL_ORDER == None or \
frozenset(FINAL_ORDER) == frozenset( BOND_NAMES[i] for i,b in enumerate(tn.bonds) if b.isFree() )
def check_vector():
for v in VECTORS:
if v[1] not in BOND_NAMES: return False
return True
def output_result(outfile,script,math_script,cpu,mem,bond_order,input_file):
final_bonds = "(" + ", ".join([BOND_NAMES[b] for b in bond_order]) + ")"
BR = "\n"
SP = " "
output = [config.COMMENT_PREFIX*30,
config.COMMENT_PREFIX + SP + input_file,
config.COMMENT_PREFIX*30,
config.COMMENT_PREFIX + SP + math_script,
config.COMMENT_PREFIX + SP + "cpu_cost= {0:g} memory= {1:g}".format(cpu, mem),
config.COMMENT_PREFIX + SP + "final_bond_order " + final_bonds,
config.COMMENT_PREFIX*30]
output += script
outfile.write(BR.join(output) + BR)
def parse_args():
parser = argparse.ArgumentParser(description="Code generator for tensor contruction")
parser.add_argument('-s', metavar='style', dest='style',
type=str, default=None,
choices=['numpy', 'mptensor'],
help='set output style ("numpy" or "mptensor")')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
help='verbose mode')
parser.add_argument('-o', metavar='outfile', dest='outfile',
type=argparse.FileType('w'), default=sys.stdout,
help='write the result to outfile')
parser.add_argument('infile',
type=argparse.FileType('r'),
help='tensor-network definition file')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
tn = TensorNetwork()
# Read input file
read_file(args.infile, tn)
# Overwrite by command-line option
set_style(args.style)
if args.verbose:
config.LOGGING_LEVEL = logging.DEBUG
assert len(tn.tensors)>0, "No tensor."
assert len(tn.bonds)>0, "No bond."
assert check_bond_order(tn), "Final bond order is invalid."
assert check_vector(), "Vectors will be put on non-existent bond."
logging.basicConfig(format="%(levelname)s:%(message)s", level=config.LOGGING_LEVEL)
tn.output_log("input")
rpn, cpu = netcon.NetconOptimizer(tn.tensors, BOND_DIMS).optimize()
mem = get_memory(tn, rpn)
TENSOR_MATH_NAMES = TENSOR_NAMES[:]
add_multiply_vector(tn)
script, bond_order = get_script(tn, rpn)
script, bond_order = add_transpose(tn, script, bond_order)
output_result(args.outfile,
script,get_math(rpn),cpu,mem,bond_order,
args.infile.name)
|
|
"""Creates ACME accounts for server."""
import logging
import os
import re
import configobj
import zope.component
from letsencrypt.acme import messages2
from letsencrypt.client import crypto_util
from letsencrypt.client import errors
from letsencrypt.client import interfaces
from letsencrypt.client import le_util
from letsencrypt.client.display import util as display_util
class Account(object):
"""ACME protocol registration.
:ivar config: Client configuration object
:type config: :class:`~letsencrypt.client.interfaces.IConfig`
:ivar key: Account/Authorized Key
:type key: :class:`~letsencrypt.client.le_util.Key`
:ivar str email: Client's email address
:ivar str phone: Client's phone number
:ivar regr: Registration Resource
:type regr: :class:`~letsencrypt.acme.messages2.RegistrationResource`
"""
# Just make sure we don't get pwned
# Make sure that it also doesn't start with a period or have two consecutive
# periods <- this needs to be done in addition to the regex
EMAIL_REGEX = re.compile("[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+$")
def __init__(self, config, key, email=None, phone=None, regr=None):
le_util.make_or_verify_dir(
config.accounts_dir, 0o700, os.geteuid())
self.key = key
self.config = config
if email is not None and self.safe_email(email):
self.email = email
else:
self.email = None
self.phone = phone
self.regr = regr
@property
def uri(self):
"""URI link for new registrations."""
if self.regr is not None:
return self.regr.uri
else:
return None
@property
def new_authzr_uri(self): # pylint: disable=missing-docstring
if self.regr is not None:
return self.regr.new_authzr_uri
else:
return None
@property
def terms_of_service(self): # pylint: disable=missing-docstring
if self.regr is not None:
return self.regr.terms_of_service
else:
return None
@property
def recovery_token(self): # pylint: disable=missing-docstring
if self.regr is not None and self.regr.body is not None:
return self.regr.body.recovery_token
else:
return None
def save(self):
"""Save account to disk."""
le_util.make_or_verify_dir(
self.config.accounts_dir, 0o700, os.geteuid())
acc_config = configobj.ConfigObj()
acc_config.filename = os.path.join(
self.config.accounts_dir, self._get_config_filename(self.email))
acc_config.initial_comment = [
"DO NOT EDIT THIS FILE",
"Account information for %s under %s" % (
self._get_config_filename(self.email), self.config.server),
]
acc_config["key"] = self.key.file
acc_config["phone"] = self.phone
if self.regr is not None:
acc_config["RegistrationResource"] = {}
acc_config["RegistrationResource"]["uri"] = self.uri
acc_config["RegistrationResource"]["new_authzr_uri"] = (
self.new_authzr_uri)
acc_config["RegistrationResource"]["terms_of_service"] = (
self.terms_of_service)
regr_dict = self.regr.body.to_json()
acc_config["RegistrationResource"]["body"] = regr_dict
acc_config.write()
@classmethod
def _get_config_filename(cls, email):
return email if email is not None and email else "default"
@classmethod
def from_existing_account(cls, config, email=None):
"""Populate an account from an existing email."""
config_fp = os.path.join(
config.accounts_dir, cls._get_config_filename(email))
return cls._from_config_fp(config, config_fp)
@classmethod
def _from_config_fp(cls, config, config_fp):
try:
acc_config = configobj.ConfigObj(
infile=config_fp, file_error=True, create_empty=False)
except IOError:
raise errors.LetsEncryptClientError(
"Account for %s does not exist" % os.path.basename(config_fp))
if os.path.basename(config_fp) != "default":
email = os.path.basename(config_fp)
else:
email = None
phone = acc_config["phone"] if acc_config["phone"] != "None" else None
with open(acc_config["key"]) as key_file:
key = le_util.Key(acc_config["key"], key_file.read())
if "RegistrationResource" in acc_config:
acc_config_rr = acc_config["RegistrationResource"]
regr = messages2.RegistrationResource(
uri=acc_config_rr["uri"],
new_authzr_uri=acc_config_rr["new_authzr_uri"],
terms_of_service=acc_config_rr["terms_of_service"],
body=messages2.Registration.from_json(acc_config_rr["body"]))
else:
regr = None
return cls(config, key, email, phone, regr)
@classmethod
def get_accounts(cls, config):
"""Return all current accounts.
:param config: Configuration
:type config: :class:`letsencrypt.client.interfaces.IConfig`
"""
try:
filenames = os.listdir(config.accounts_dir)
except OSError:
return []
accounts = []
for name in filenames:
# Not some directory ie. keys
config_fp = os.path.join(config.accounts_dir, name)
if os.path.isfile(config_fp):
accounts.append(cls._from_config_fp(config, config_fp))
return accounts
@classmethod
def from_prompts(cls, config):
"""Generate an account from prompted user input.
:param config: Configuration
:type config: :class:`letsencrypt.client.interfaces.IConfig`
:returns: Account or None
:rtype: :class:`letsencrypt.client.account.Account`
"""
while True:
code, email = zope.component.getUtility(interfaces.IDisplay).input(
"Enter email address (optional, press Enter to skip)")
if code == display_util.OK:
try:
return cls.from_email(config, email)
except errors.LetsEncryptClientError:
continue
else:
return None
@classmethod
def from_email(cls, config, email):
"""Generate a new account from an email address.
:param config: Configuration
:type config: :class:`letsencrypt.client.interfaces.IConfig`
:param str email: Email address
:raises letsencrypt.client.errors.LetsEncryptClientError: If invalid
email address is given.
"""
if not email or cls.safe_email(email):
email = email if email else None
le_util.make_or_verify_dir(
config.account_keys_dir, 0o700, os.geteuid())
key = crypto_util.init_save_key(
config.rsa_key_size, config.account_keys_dir,
cls._get_config_filename(email))
return cls(config, key, email)
raise errors.LetsEncryptClientError("Invalid email address.")
@classmethod
def safe_email(cls, email):
"""Scrub email address before using it."""
if cls.EMAIL_REGEX.match(email):
return not email.startswith(".") and ".." not in email
else:
logging.warn("Invalid email address.")
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.