repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
phiroict/spotify-longlist | refs/heads/master | APITest/MainTest.py | 1 | __author__ = 'phiro'
import http.client
import Constants
import json
class APITest:
keys = {}
def readKeys(self):
with open("../licensekeys.properties") as reader:
for line in reader:
line = line.rstrip() #removes trailing whitespace and '\n' chars
if "=" not in line: continue #skips blanks and comments w/o =
if line.startswith("#"): continue #skips comments which contain =
k, v = line.split("=", 1)
self.keys[k] = v
def searchArtist(self,query="weezer"):
h = http.client.HTTPSConnection(Constants.base_url)
request = Constants.search_url.format("weezer&type=artist")
print("Calling {}".format(request))
h.request("GET", request)
r = h.getresponse()
obj = json.loads(r.read().decode('utf-8'))
print(str(obj["artists"]["offset"]))
def searchZeroPopulair(self,query="weezer"):
h = http.client.HTTPSConnection(Constants.base_url)
request = Constants.search_url.format("weezer&type=artist")
print("Calling {}".format(request))
h.request("GET", request)
r = h.getresponse()
obj = json.loads(r.read().decode('utf-8'))
print(str(obj["artists"]["offset"]))
# Test entry
a = APITest()
a.readKeys()
a.searchArtist()
|
frifri/django-tastypie | refs/heads/master | tests/namespaced/api/resources.py | 44 | from django.contrib.auth.models import User
from tastypie import fields
from tastypie.resources import ModelResource, NamespacedModelResource
from tastypie.authorization import Authorization
from basic.models import Note
class NamespacedUserResource(NamespacedModelResource):
class Meta:
resource_name = 'users'
queryset = User.objects.all()
authorization = Authorization()
class NamespacedNoteResource(NamespacedModelResource):
user = fields.ForeignKey(NamespacedUserResource, 'user')
class Meta:
resource_name = 'notes'
queryset = Note.objects.all()
authorization = Authorization()
|
cgre-aachen/gempy | refs/heads/master | gempy/plot/__init__.py | 1 |
if __name__ == '__main__':
pass
|
xamfoo/thumbor-docker | refs/heads/master | setup/thumbor/tests/fixtures/mongo_storage_conf.py | 14 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
STORAGE = 'thumbor.storages.mongo_storage'
MONGO_STORAGE_SERVER_HOST = 'localhost'
MONGO_STORAGE_SERVER_PORT = 27017
MONGO_STORAGE_SERVER_DB = 'thumbor_test'
MONGO_STORAGE_SERVER_COLLECTION = 'images'
|
htcondor/htcondor | refs/heads/master | src/condor_tests/test_htcondor_dags/__init__.py | 5 | # Copyright 2020 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
ValentinPearce/The-Shades | refs/heads/master | The-Shades.py | 2 | #==============
#INITIALISATION
#==============
import Game
import termios, sys
description = ''
def init(): # Mets en place les elements du jeu.
global description
description = Game.init()
def display(): # Affiche certains elements du jeu et le texte relatif a la derniere action effectuee
global description
Game.display(description,36)
def interact(): # Releve toute interaction du joueur.
global description
description = Game.getAction()
def run():
while 1: # Boucle de simulation.
Game.checkHealth()
Game.checkTime(36)
display()
interact()
def main(): # Fonction principale
init()
run()
#=========
#EXECUTION
#=========
main()
|
stanmoore1/lammps | refs/heads/master | unittest/python/python-open.py | 5 |
import sys,os,unittest
from lammps import lammps
has_mpi=False
has_mpi4py=False
has_exceptions=False
try:
from mpi4py import __version__ as mpi4py_version
# tested to work with mpi4py versions 2 and 3
has_mpi4py = mpi4py_version.split('.')[0] in ['2','3']
except:
pass
try:
if 'LAMMPS_MACHINE_NAME' in os.environ:
machine = os.environ['LAMMPS_MACHINE_NAME']
else:
machine = ""
lmp = lammps(name=machine)
has_mpi = lmp.has_mpi_support
has_exceptions = lmp.has_exceptions
lmp.close()
except:
pass
class PythonOpen(unittest.TestCase):
def setUp(self):
self.machine=None
if 'LAMMPS_MACHINE_NAME' in os.environ:
self.machine=os.environ['LAMMPS_MACHINE_NAME']
def testNoArgs(self):
"""Create LAMMPS instance without any arguments"""
lmp=lammps(name=self.machine)
self.assertIsNot(lmp.lmp,None)
self.assertEqual(lmp.opened,1)
self.assertEqual(has_mpi and has_mpi4py,lmp.has_mpi4py)
self.assertEqual(has_mpi,lmp.has_mpi_support)
lmp.close()
self.assertIsNone(lmp.lmp,None)
self.assertEqual(lmp.opened,0)
def testWithArgs(self):
"""Create LAMMPS instance with a few arguments"""
lmp=lammps(name=self.machine,
cmdargs=['-nocite','-sf','opt','-log','none'])
self.assertIsNot(lmp.lmp,None)
self.assertEqual(lmp.opened,1)
def testContextManager(self):
"""Automatically clean up LAMMPS instance"""
with lammps(name=self.machine) as lmp:
self.assertIsNot(lmp.lmp,None)
self.assertEqual(lmp.opened,1)
self.assertEqual(has_mpi and has_mpi4py,lmp.has_mpi4py)
self.assertEqual(has_mpi,lmp.has_mpi_support)
self.assertIsNone(lmp.lmp,None)
self.assertEqual(lmp.opened,0)
@unittest.skipIf(not (has_mpi and has_mpi4py),"Skipping MPI test since LAMMPS is not parallel or mpi4py is not found")
def testWithMPI(self):
from mpi4py import MPI
mycomm=MPI.Comm.Split(MPI.COMM_WORLD, 0, 1)
lmp=lammps(name=self.machine,comm=mycomm)
self.assertIsNot(lmp.lmp,None)
self.assertEqual(lmp.opened,1)
lmp.close()
@unittest.skipIf(not has_exceptions,"Skipping death test since LAMMPS isn't compiled with exception support")
def testUnknownCommand(self):
lmp = lammps(name=self.machine)
with self.assertRaisesRegex(Exception, "ERROR: Unknown command: write_paper"):
lmp.command("write_paper")
lmp.close()
@unittest.skipIf(not has_exceptions,"Skipping death test since LAMMPS isn't compiled with exception support")
def testUnknownCommandInList(self):
lmp = lammps(name=self.machine)
with self.assertRaisesRegex(Exception, "ERROR: Unknown command: write_paper"):
lmp.commands_list(["write_paper"])
lmp.close()
@unittest.skipIf(not has_exceptions,"Skipping death test since LAMMPS isn't compiled with exception support")
def testUnknownCommandInString(self):
lmp = lammps(name=self.machine)
with self.assertRaisesRegex(Exception, "ERROR: Unknown command: write_paper"):
lmp.commands_string("write_paper")
lmp.close()
if __name__ == "__main__":
unittest.main()
|
mayconvm/phonetooth | refs/heads/master | phonetooth/mergecontactsdialog.py | 3 | # Copyright (C) 2008 Dirk Vanden Boer <dirk.vdb@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import gobject
import gtk
import gtk.glade
import threading
from phonetooth import contacts
from gettext import gettext as _
class MergeContactsDialog:
def __init__(self, widgetTree, parent = None):
self.__mergeContactsDialog = widgetTree.get_widget('mergeContactsDialog')
self.__collisionView = widgetTree.get_widget('collisionView')
self.collisionlistStore = gtk.ListStore(str, bool, str, bool, str)
nameRenderer = gtk.CellRendererText()
nameRenderer.set_property('editable', False)
nrRenderer = gtk.CellRendererText()
nrRenderer.set_property('editable', False)
checkRenderer = gtk.CellRendererToggle()
checkRenderer.set_property('activatable', True)
checkRenderer.connect('toggled', self.__numberToggledCb)
self.__nameColumn = gtk.TreeViewColumn(_("Name"), nameRenderer, text = 0)
self.__nrColumn1 = gtk.TreeViewColumn(_("Existing number"), nrRenderer, text = 2)
self.__nrColumn2 = gtk.TreeViewColumn(_("Imported number"), nrRenderer, text = 4)
self.__checkColumn1 = gtk.TreeViewColumn(None, checkRenderer, active = 1)
self.__checkColumn2 = gtk.TreeViewColumn(None, checkRenderer, active = 3)
self.__collisionView.set_model(self.collisionlistStore)
self.__collisionView.append_column(self.__nameColumn)
self.__collisionView.append_column(self.__checkColumn1)
self.__collisionView.append_column(self.__nrColumn1)
self.__collisionView.append_column(self.__checkColumn2)
self.__collisionView.append_column(self.__nrColumn2)
self.__mergeContactsDialog.set_transient_for(parent)
dic = {
'onOverwriteToggled' : self.__overwriteModeToggled,
'onSkipModeToggled' : self.__skipModeSkipToggled
}
widgetTree.signal_autoconnect(dic)
def run(self, collisions):
resolvedCollisions = None
self.__updateListStore(collisions)
if self.__mergeContactsDialog.run() == 1:
resolvedCollisions = self.__getResolvedContacts()
self.__mergeContactsDialog.hide()
return resolvedCollisions
def __updateListStore(self, collisions):
self.collisionlistStore.clear()
for collision in collisions:
self.collisionlistStore.append((collision.name, False, collision.phoneNumber1, False, collision.phoneNumber2))
self.__applySkipMode()
def __numberToggledCb(self, cellrenderer, path):
iter = self.collisionlistStore.iter_nth_child(None, int(path))
number1Enabled = self.collisionlistStore.get_value(iter, 1)
self.collisionlistStore.set_value(iter, 1, not number1Enabled)
self.collisionlistStore.set_value(iter, 3, number1Enabled)
def __applyOverwriteMode(self):
iter = self.collisionlistStore.get_iter_first()
while iter != None:
self.collisionlistStore.set_value(iter, 1, False)
self.collisionlistStore.set_value(iter, 3, True)
iter = self.collisionlistStore.iter_next(iter)
def __applySkipMode(self):
iter = self.collisionlistStore.get_iter_first()
while iter != None:
self.collisionlistStore.set_value(iter, 1, True)
self.collisionlistStore.set_value(iter, 3, False)
iter = self.collisionlistStore.iter_next(iter)
def __overwriteModeToggled(self, togglebutton):
if togglebutton.get_active():
self.__applyOverwriteMode()
def __skipModeSkipToggled(self, togglebutton):
if togglebutton.get_active():
self.__applySkipMode()
def __getResolvedContacts(self):
resolvedContacts = contacts.ContactList()
iter = self.collisionlistStore.get_iter_first()
while iter != None:
name = self.collisionlistStore.get_value(iter, 0)
if self.collisionlistStore.get_value(iter, 1) == True:
phoneNumber = self.collisionlistStore.get_value(iter, 2)
else:
phoneNumber = self.collisionlistStore.get_value(iter, 4)
resolvedContacts.addContact(contacts.Contact(name, phoneNumber))
iter = self.collisionlistStore.iter_next(iter)
return resolvedContacts
|
cechrist/cardoon | refs/heads/master | cardoon/devices/mosExt.py | 1 | """
:mod:`mosExt` -- Extrinsic MOSFET model
---------------------------------------
.. module:: mosExt
.. moduleauthor:: Carlos Christoffersen
This module add the extrinsic part to an intrinsic mosfet model.
Requirements for intrinsic model classes:
1. Control voltages: VDB, VGB, VSB
2. Output currents: IDS, IDB, ISB
3. Output charges: QD, QG, QS
4. csOutPorts, controlPorts and qsOutPorts may be overwritten here
5. linearVCCS and linearVCQS handled by this module
6. Intrinsic device name must not include ``mos`` and must end with
``_i``. Extrinsic device name is: ``mos<intrinsic minus _i>``
7. Make sure parameter names do not collide
8. Noise ports/model not implemented here yet.
Usage::
import mosExt
EKV = mosExt.extrinsic_mos(IntEKV)
"""
import numpy as np
import cardoon.circuit as cir
from cardoon.globalVars import const, glVar
import cppaddev as ad
from diode import Junction
def extrinsic_mos(IMOS):
class ExtMOS(IMOS):
"""
Extrinsic Silicon MOSFET
------------------------
Extrinsic Internal Topology
+++++++++++++++++++++++++++
The model adds the following to the intrinsic model (for NMOS)::
o D (0)
|
\
Cgdo / Rd Drain/source area plus
\ sidewall model
|| |-----------,-----,
,------||------------| | |
| || | ----- -----
| ||--- ----- / \
| || | -----
G (1) o---+----------------||<-------------+-----+------o B (3)
| || | -----
| ||--- ----- \ /
| || | ----- -----
`------||------------| | |
|| |-----------'-----'
\
Cgso / Rs
\
|
o S (2)
Note 1: electrothermal implementation (if any) does not account for
the power dissipation in Rd and Rs. Use external thermal resistors
if that is needed.
Note 2: operating point information is given for just one
intrinsic device even if ``m > 1``.
"""
# devtype is the 'model' name: remove the '_i' from intrinsic name
devType = 'mos' + IMOS.devType.split('_i')[0]
# Additional documentation
extraDoc = """
Netlist examples
++++++++++++++++
The model accepts extrinsic plus intrinsic parameters (only
extrinsic parameters shown in example)::
{0}:m1 2 3 4 gnd w=10u l=1u asrc=4e-12 ps=8e=12 model=nch
{0}:m2 4 5 6 6 w=30e-6 l=1e-6 pd=8u ps=16u type=p
.model nch {0} (type=n js=1e-3 cj=2e-4 cjsw=1n)
Intrinsic model
+++++++++++++++
See **{1}** intrinsic model documentation.
""".format(devType, IMOS.devType)
paramDict = dict(
IMOS.paramDict.items(),
m = ('Parallel multiplier', '', float, 1.),
cgdo = ('Gate-drain overlap capacitance per meter channel width',
'F/m', float, 0.),
cgso = ('Gate-source overlap capacitance per meter channel width',
'F/m', float, 0.),
cgbo = ('Gate-bulk overlap capacitance per meter channel length',
'F/m', float, 0.),
rsh = ('Drain and source diffusion sheet resistance',
'Ohm/square', float, 0.),
js = ('Source drain junction current density', 'A/m^2', float, 0.),
jssw = ('Source drain sidewall junction current density',
'A/m', float, 0.),
pb = ('Built in potential of source drain junction',
'V', float, .8),
mj = ('Grading coefficient of source drain junction',
'', float, .5),
pbsw = ('Built in potential of source, drain junction sidewall',
'V', float, .8),
mjsw = ('Grading coefficient of source drain junction sidewall',
'', float, .33),
cj = ('Source drain junction capacitance per unit area',
'F/m^2', float, 0.),
cjsw = (
'Source drain junction sidewall capacitance per unit length',
'F/m', float, 0.),
ad = ('Drain area', 'm^2', float, 0.),
asrc = ('Source area', 'm^2', float, 0.),
pd = ('Drain perimeter', 'm', float, 0.),
ps = ('Source perimeter', 'm', float, 0.),
nrd = ('Number of squares in drain', 'squares', float, 1.),
nrs = ('Number of squares in source', 'squares', float, 1.),
fc = ('Coefficient for forward-bias depletion capacitances', ' ',
float, .5),
xti = ('Junction saturation current temperature exponent', '',
float, 3.),
eg0 = ('Energy bandgap', 'eV', float, 1.11)
)
def __init__(self, instanceName):
IMOS.__init__(self, instanceName)
self.__doc__ += IMOS.__doc__
def process_params(self, thermal = False):
# Remove tape if present
ad.delete_tape(self)
# Remove internal terminals (there should be none created
# by intrinsic model)
self.clean_internal_terms()
# Tell autothermal (if used) to re-generate thermal ports
self.__addThermalPorts = True
# By default drain and source are terminals 0 and 2
self.__di = 0
self.__si = 2
# Resistances
extraVCCS = list()
if self.rsh != 0.:
if self.nrd != 0.:
# Drain resistor
self.__di = self.add_internal_term('di', 'V')
extraVCCS += [((0, self.__di), (0, self.__di),
1. / self.rsh / self.nrd)]
if self.nrs != 0.:
# Source resistor
self.__si = self.add_internal_term('si', 'V')
extraVCCS += [((2, self.__si), (2, self.__si),
1. / self.rsh / self.nrs)]
# Linear capacitances
extraVCQS = list()
if self.cgdo != 0.:
# Gate-drain ovelrlap cap
extraVCQS += [((1, self.__di), (1, self.__di),
self.cgdo * self.w)]
if self.cgso != 0.:
# Gate-source ovelrlap cap
extraVCQS += [((1, self.__si), (1, self.__si),
self.cgso * self.w)]
if self.cgbo != 0.:
# Gate-bulk ovelrlap cap
extraVCQS += [((1, 3), (1, 3),
self.cgbo * self.l)]
# Add extra linear resistors/caps (if any)
self.linearVCCS = extraVCCS
self.linearVCQS = extraVCQS
# Override nonlinear port specs if needed
if extraVCCS:
# Ids, Idb, Isb
self.csOutPorts = [(self.__di, self.__si), (self.__di, 3),
(self.__si, 3)]
# Controling voltages are DB, GB and SB
self.controlPorts = [(self.__di, 3), (1, 3), (self.__si, 3)]
# One charge source connected to each D, G, S
self.qsOutPorts = [(self.__di, 3), (1, 3), (self.__si, 3)]
# Calculate some variables (that may also be calculated in
# intrinsic model)
self.__Tnabs = const.T0 + self.tnom
self.__egapn = self.eg0 - .000702 * (self.__Tnabs**2) \
/ (self.__Tnabs + 1108.)
# Initialize variables in junctions
if self.ad != 0.:
self.dj = Junction()
self.dj.process_params(isat = self.js * self.ad,
cj0 = self.cj * self.ad,
vj = self.pb, m = self.mj,
n = 1., fc = self.fc,
xti = self.xti, eg0 = self.eg0,
Tnomabs = self.__Tnabs)
if self.asrc != 0.:
self.sj = Junction()
self.sj.process_params(isat = self.js * self.asrc,
cj0 = self.cj * self.asrc,
vj = self.pb, m = self.mj,
n = 1., fc = self.fc,
xti = self.xti, eg0 = self.eg0,
Tnomabs = self.__Tnabs)
if self.pd != 0.:
self.djsw = Junction()
self.djsw.process_params(isat = self.jssw * self.pd,
cj0 = self.cjsw * self.pd,
vj = self.pbsw, m = self.mjsw,
n = 1., fc = self.fc,
xti = self.xti, eg0 = self.eg0,
Tnomabs = self.__Tnabs)
if self.ps != 0.:
self.sjsw = Junction()
self.sjsw.process_params(isat = self.jssw * self.ps,
cj0 = self.cjsw * self.ps,
vj = self.pbsw, m = self.mjsw,
n = 1., fc = self.fc,
xti = self.xti, eg0 = self.eg0,
Tnomabs = self.__Tnabs)
# Process parameters from intrinsic device:
# set_temp_vars() called there
IMOS.process_params(self)
def set_temp_vars(self, temp):
"""
Calculate temperature-dependent variables, given temp in deg. C
"""
# Remove tape if present
ad.delete_tape(self)
# First calculate variables from base class
IMOS.set_temp_vars(self, temp)
# Absolute temperature
Tabs = temp + const.T0
# Temperature-adjusted egap
egap_t = self.eg0 - .000702 * (Tabs**2) / (Tabs + 1108.)
# Thermal voltage
Vt = const.k * Tabs / const.q
# Adjust junction temperatures
if self.ad != 0.:
self.dj.set_temp_vars(Tabs, self.__Tnabs, Vt,
self.__egapn, egap_t)
if self.pd != 0.:
self.djsw.set_temp_vars(Tabs, self.__Tnabs, Vt,
self.__egapn, egap_t)
if self.asrc != 0.:
self.sj.set_temp_vars(Tabs, self.__Tnabs, Vt,
self.__egapn, egap_t)
if self.ps != 0.:
self.sjsw.set_temp_vars(Tabs, self.__Tnabs, Vt,
self.__egapn, egap_t)
def eval_cqs(self, vPort, getOP = False):
"""
vPort is a vector with control voltages
"""
# calculate currents and charges in base class
if getOP:
# More operating point info could be added here: for
# now just return intrinsic model info. Note: this
# info is just for 1 intrinsic device, even if m>1
return IMOS.eval_cqs(self, vPort, True)
(iVec, qVec) = IMOS.eval_cqs(self, vPort)
# Add contribution drain diode
v1 = -vPort[0] * self._tf
if self.ad != 0.:
# substract to idb
iVec[1] -= self.dj.get_id(v1) * self._tf
if self.cj != 0.:
# substract to qd
qVec[0] -= self.dj.get_qd(v1) * self._tf
if self.pd != 0.:
# substract to idb
iVec[1] -= self.djsw.get_id(v1) * self._tf
if self.cjsw != 0.:
qVec[0] -= self.djsw.get_qd(v1) * self._tf
# Add contribution source diode
v1 = -vPort[2] * self._tf
if self.asrc != 0.:
# substract to isb
iVec[2] -= self.sj.get_id(v1) * self._tf
if self.cj != 0.:
# substract to qs
qVec[2] -= self.sj.get_qd(v1) * self._tf
if self.ps != 0.:
# substract to isb
iVec[2] -= self.sjsw.get_id(v1) * self._tf
if self.cjsw:
qVec[2] -= self.sjsw.get_qd(v1) * self._tf
# Apply parallel multiplier
iVec *= self.m
qVec *= self.m
return (iVec, qVec)
# Create these using the AD facility
eval_and_deriv = ad.eval_and_deriv
eval = ad.eval
def power(self, vPort, currV):
"""
Calculate total instantaneous power
Power in RE, RC not considered.
Input: control voltages as in eval_cqs() and currents
returned by eval_cqs()
"""
pout = IMOS.power(self, vPort, currV)
return pout
# Return template class
return ExtMOS
#------------------------------------------------------------------------
|
glwu/python-for-android | refs/heads/master | python-modules/twisted/twisted/web/iweb.py | 53 | # -*- test-case-name: twisted.web.test -*-
# Copyright (c) 2008-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Interface definitions for L{twisted.web}.
@var UNKNOWN_LENGTH: An opaque object which may be used as the value of
L{IBodyProducer.length} to indicate that the length of the entity
body is not known in advance.
"""
from zope.interface import Interface, Attribute
from twisted.internet.interfaces import IPushProducer
from twisted.cred.credentials import IUsernameDigestHash
class IRequest(Interface):
"""
An HTTP request.
@since: 9.0
"""
method = Attribute("A C{str} giving the HTTP method that was used.")
uri = Attribute(
"A C{str} giving the full encoded URI which was requested (including "
"query arguments).")
path = Attribute(
"A C{str} giving the encoded query path of the request URI.")
args = Attribute(
"A mapping of decoded query argument names as C{str} to "
"corresponding query argument values as C{list}s of C{str}. "
"For example, for a URI with C{'foo=bar&foo=baz&quux=spam'} "
"for its query part, C{args} will be C{{'foo': ['bar', 'baz'], "
"'quux': ['spam']}}.")
received_headers = Attribute(
"Backwards-compatibility access to C{requestHeaders}. Use "
"C{requestHeaders} instead. C{received_headers} behaves mostly "
"like a C{dict} and does not provide access to all header values.")
requestHeaders = Attribute(
"A L{http_headers.Headers} instance giving all received HTTP request "
"headers.")
headers = Attribute(
"Backwards-compatibility access to C{responseHeaders}. Use"
"C{responseHeaders} instead. C{headers} behaves mostly like a "
"C{dict} and does not provide access to all header values nor "
"does it allow multiple values for one header to be set.")
responseHeaders = Attribute(
"A L{http_headers.Headers} instance holding all HTTP response "
"headers to be sent.")
def getHeader(key):
"""
Get an HTTP request header.
@type key: C{str}
@param key: The name of the header to get the value of.
@rtype: C{str} or C{NoneType}
@return: The value of the specified header, or C{None} if that header
was not present in the request.
"""
def getCookie(key):
"""
Get a cookie that was sent from the network.
"""
def getAllHeaders():
"""
Return dictionary mapping the names of all received headers to the last
value received for each.
Since this method does not return all header information,
C{requestHeaders.getAllRawHeaders()} may be preferred.
"""
def getRequestHostname():
"""
Get the hostname that the user passed in to the request.
This will either use the Host: header (if it is available) or the
host we are listening on if the header is unavailable.
@returns: the requested hostname
@rtype: C{str}
"""
def getHost():
"""
Get my originally requesting transport's host.
@return: An L{IAddress}.
"""
def getClientIP():
"""
Return the IP address of the client who submitted this request.
@returns: the client IP address or C{None} if the request was submitted
over a transport where IP addresses do not make sense.
@rtype: C{str} or L{NoneType}
"""
def getClient():
"""
Return the hostname of the IP address of the client who submitted this
request, if possible.
This method is B{deprecated}. See L{getClientIP} instead.
@rtype: L{NoneType} or L{str}
@return: The canonical hostname of the client, as determined by
performing a name lookup on the IP address of the client.
"""
def getUser():
"""
Return the HTTP user sent with this request, if any.
If no user was supplied, return the empty string.
@returns: the HTTP user, if any
@rtype: C{str}
"""
def getPassword():
"""
Return the HTTP password sent with this request, if any.
If no password was supplied, return the empty string.
@returns: the HTTP password, if any
@rtype: C{str}
"""
def isSecure():
"""
Return True if this request is using a secure transport.
Normally this method returns True if this request's HTTPChannel
instance is using a transport that implements ISSLTransport.
This will also return True if setHost() has been called
with ssl=True.
@returns: True if this request is secure
@rtype: C{bool}
"""
def getSession(sessionInterface=None):
"""
Look up the session associated with this request or create a new one if
there is not one.
@return: The L{Session} instance identified by the session cookie in
the request, or the C{sessionInterface} component of that session
if C{sessionInterface} is specified.
"""
def URLPath():
"""
@return: A L{URLPath} instance which identifies the URL for which this
request is.
"""
def prePathURL():
"""
@return: At any time during resource traversal, a L{str} giving an
absolute URL to the most nested resource which has yet been
reached.
"""
def rememberRootURL():
"""
Remember the currently-processed part of the URL for later
recalling.
"""
def getRootURL():
"""
Get a previously-remembered URL.
"""
# Methods for outgoing response
def finish():
"""
Indicate that the response to this request is complete.
"""
def write(data):
"""
Write some data to the body of the response to this request. Response
headers are written the first time this method is called, after which
new response headers may not be added.
"""
def addCookie(k, v, expires=None, domain=None, path=None, max_age=None, comment=None, secure=None):
"""
Set an outgoing HTTP cookie.
In general, you should consider using sessions instead of cookies, see
L{twisted.web.server.Request.getSession} and the
L{twisted.web.server.Session} class for details.
"""
def setResponseCode(code, message=None):
"""
Set the HTTP response code.
"""
def setHeader(k, v):
"""
Set an HTTP response header. Overrides any previously set values for
this header.
@type name: C{str}
@param name: The name of the header for which to set the value.
@type value: C{str}
@param value: The value to set for the named header.
"""
def redirect(url):
"""
Utility function that does a redirect.
The request should have finish() called after this.
"""
def setLastModified(when):
"""
Set the C{Last-Modified} time for the response to this request.
If I am called more than once, I ignore attempts to set Last-Modified
earlier, only replacing the Last-Modified time if it is to a later
value.
If I am a conditional request, I may modify my response code to
L{NOT_MODIFIED} if appropriate for the time given.
@param when: The last time the resource being returned was modified, in
seconds since the epoch.
@type when: C{int}, C{long} or C{float}
@return: If I am a C{If-Modified-Since} conditional request and the
time given is not newer than the condition, I return
L{http.CACHED<CACHED>} to indicate that you should write no body.
Otherwise, I return a false value.
"""
def setETag(etag):
"""
Set an C{entity tag} for the outgoing response.
That's "entity tag" as in the HTTP/1.1 C{ETag} header, "used for
comparing two or more entities from the same requested resource."
If I am a conditional request, I may modify my response code to
L{NOT_MODIFIED} or L{PRECONDITION_FAILED}, if appropriate for the tag
given.
@param etag: The entity tag for the resource being returned.
@type etag: C{str}
@return: If I am a C{If-None-Match} conditional request and the tag
matches one in the request, I return L{http.CACHED<CACHED>} to
indicate that you should write no body. Otherwise, I return a
false value.
"""
def setHost(host, port, ssl=0):
"""
Change the host and port the request thinks it's using.
This method is useful for working with reverse HTTP proxies (e.g. both
Squid and Apache's mod_proxy can do this), when the address the HTTP
client is using is different than the one we're listening on.
For example, Apache may be listening on https://www.example.com, and
then forwarding requests to http://localhost:8080, but we don't want
HTML produced by Twisted to say 'http://localhost:8080', they should
say 'https://www.example.com', so we do::
request.setHost('www.example.com', 443, ssl=1)
"""
class ICredentialFactory(Interface):
"""
A credential factory defines a way to generate a particular kind of
authentication challenge and a way to interpret the responses to these
challenges. It creates L{ICredentials} providers from responses. These
objects will be used with L{twisted.cred} to authenticate an authorize
requests.
"""
scheme = Attribute(
"A C{str} giving the name of the authentication scheme with which "
"this factory is associated. For example, C{'basic'} or C{'digest'}.")
def getChallenge(request):
"""
Generate a new challenge to be sent to a client.
@type peer: L{twisted.web.http.Request}
@param peer: The request the response to which this challenge will be
included.
@rtype: C{dict}
@return: A mapping from C{str} challenge fields to associated C{str}
values.
"""
def decode(response, request):
"""
Create a credentials object from the given response.
@type response: C{str}
@param response: scheme specific response string
@type request: L{twisted.web.http.Request}
@param request: The request being processed (from which the response
was taken).
@raise twisted.cred.error.LoginFailed: If the response is invalid.
@rtype: L{twisted.cred.credentials.ICredentials} provider
@return: The credentials represented by the given response.
"""
class IBodyProducer(IPushProducer):
"""
Objects which provide L{IBodyProducer} write bytes to an object which
provides L{IConsumer} by calling its C{write} method repeatedly.
L{IBodyProducer} providers may start producing as soon as they have
an L{IConsumer} provider. That is, they should not wait for a
C{resumeProducing} call to begin writing data.
L{IConsumer.unregisterProducer} must not be called. Instead, the
L{Deferred} returned from C{startProducing} must be fired when all bytes
have been written.
L{IConsumer.write} may synchronously invoke any of C{pauseProducing},
C{resumeProducing}, or C{stopProducing}. These methods must be implemented
with this in mind.
@since: 9.0
"""
# Despite the restrictions above and the additional requirements of
# stopProducing documented below, this interface still needs to be an
# IPushProducer subclass. Providers of it will be passed to IConsumer
# providers which only know about IPushProducer and IPullProducer, not
# about this interface. This interface needs to remain close enough to one
# of those interfaces for consumers to work with it.
length = Attribute(
"""
C{length} is a C{int} indicating how many bytes in total this
L{IBodyProducer} will write to the consumer or L{UNKNOWN_LENGTH}
if this is not known in advance.
""")
def startProducing(consumer):
"""
Start producing to the given L{IConsumer} provider.
@return: A L{Deferred} which fires with C{None} when all bytes have
been produced or with a L{Failure} if there is any problem before
all bytes have been produced.
"""
def stopProducing():
"""
In addition to the standard behavior of L{IProducer.stopProducing}
(stop producing data), make sure the L{Deferred} returned by
C{startProducing} is never fired.
"""
UNKNOWN_LENGTH = u"twisted.web.iweb.UNKNOWN_LENGTH"
__all__ = [
"IUsernameDigestHash", "ICredentialFactory", "IRequest",
"IBodyProducer",
"UNKNOWN_LENGTH"]
|
neurodata/ndstore | refs/heads/master | scripts/convertRamon.py | 2 | # Copyright 2016 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import argparse
import MySQLdb
from contextlib import closing
sys.path += [os.path.abspath('../django')]
import ND.settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'ND.settings'
import django
from django.conf import settings
django.setup()
from nduser.models import Project, Channel
from ndproject import NDProject, NDChannel
import annotation
import mysqlramondb
import ramondb
class ConvertRamon:
""" Converts an annotation project from the old RAMON format (spread across many tables) to the new RAMON format (consolidated in a single table) """
def __init__(self, project, channel):
try:
self.pr = Project.objects.get( project_name = project )
except Project.DoesNotExist:
raise
self.ch = Channel.objects.get( channel_name = channel, project = self.pr )
# pr and ch are django objects. proj and chan are NDStore objects
self.proj = NDProject(self.pr.project_name)
self.chan = NDChannel(self.proj, self.ch.channel_name)
self.annodb = mysqlramondb.MySQLRamonDB(self.proj)
self.ramondb = ramondb.RamonDB(self.proj)
self.getAllAnnoIDs()
def createRAMONTables(self):
with closing(MySQLdb.connect(host = self.proj.getDBHost(), user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'], db = self.proj.getDBName(), connect_timeout=1)) as conn:
with closing(conn.cursor()) as cursor:
try:
ramonTableName = '{}_ramon'.format(self.ch.channel_name)
cursor.execute("CREATE TABLE {} ( annoid BIGINT, kv_key VARCHAR(255), kv_value VARCHAR(20000), INDEX ( annoid, kv_key ) USING BTREE)".format(ramonTableName))
# Commiting at the end
conn.commit()
except MySQLdb.Error, e:
print "Error: Failed to create new RAMON table: {}".format(e)
sys.exit(1)
def processAnnos(self):
for id in self.ids:
self.processExistingAnnotationByID(id)
def getAllAnnoIDs(self):
with closing(MySQLdb.connect(host = self.proj.getDBHost(), user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'], db = self.proj.getDBName(), connect_timeout=1)) as conn:
with closing(conn.cursor()) as cursor:
idTableName = "{}_ids".format( self.ch.channel_name )
sql = "SELECT id FROM {}".format( idTableName )
try:
cursor.execute( sql )
tmpids = cursor.fetchall()
except MySQLdb.Error, e:
print "Error: Failed to fetch existing RAMON IDs: {}".format(e)
sys.exit(1)
self.ids = [x[0] for x in tmpids]
def _getAnnoType(self, id):
with closing(MySQLdb.connect(host = self.proj.getDBHost(), user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'], db = self.proj.getDBName(), connect_timeout=1)) as conn:
with closing(conn.cursor()) as cursor:
annoTableName = "{}_annotations".format( self.ch.channel_name )
sql = "SELECT type FROM {} WHERE annoid={}".format( annoTableName, id )
try:
cursor.execute( sql )
res = cursor.fetchall()
return res[0]
except MySQLdb.Error, e:
print "Error: Failed to get annotation type for RAMON object with ID {}: {}".format(id, e)
sys.exit(1)
def _readAnnoMetadata(self, id):
# reads the basic metadata in for all annotation types (from annotation_annotations table)
with closing(MySQLdb.connect(host = self.proj.getDBHost(), user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'], db = self.proj.getDBName(), connect_timeout=1)) as conn:
with closing(conn.cursor()) as cursor:
annoTableName = "{}_annotations".format( self.ch.channel_name )
sql = "SELECT confidence, status FROM {} WHERE annoid={}".format( annoTableName, id )
try:
cursor.execute( sql )
res = cursor.fetchall()
return res[0]
except MySQLdb.Error, e:
print "Error: Failed to get annotation type for RAMON object with ID {}: {}".format(id, e)
sys.exit(1)
def _readKVPairs(self, id):
# reads and returns KV Pairs from the kvpairs table
with closing(MySQLdb.connect(host = self.proj.getDBHost(), user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'], db = self.proj.getDBName(), connect_timeout=1)) as conn:
with closing(conn.cursor()) as cursor:
kvTableName = "{}_kvpairs".format( self.ch.channel_name )
sql = "SELECT kv_key, kv_value FROM {} WHERE annoid={}".format( kvTableName, id )
try:
cursor.execute( sql )
res = cursor.fetchall()
except MySQLdb.Error, e:
print "Error: Failed to get annotation type for RAMON object with ID {}: {}".format(id, e)
sys.exit(1)
kvpairs = {}
for row in res:
kvpairs[ row[0] ] = row[1]
return kvpairs
def _readSynapse(self, id):
# create a new synapse
anno = annotation.AnnSynapse( self.annodb, self.ch )
# set the synapse ID
anno.setField('annid', id)
# fill in the fields
# basic metadata first
[confidence, status] = self._readAnnoMetadata(id)
anno.setField('status', status)
anno.setField('confidence', confidence)
# synapse related metadata
with closing(MySQLdb.connect(host = self.proj.getDBHost(), user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'], db = self.proj.getDBName(), connect_timeout=1)) as conn:
with closing(conn.cursor()) as cursor:
synapseTableName = "{}_synapses".format( self.ch.channel_name )
sql = "SELECT synapse_type, weight FROM {} WHERE annoid={}".format( synapseTableName, id )
try:
cursor.execute( sql )
res = cursor.fetchall()[0]
except MySQLdb.Error, e:
print "Error: Failed to get annotation type for RAMON object with ID {}: {}".format(id, e)
sys.exit(1)
anno.setField('synapse_type', res[0])
anno.setField('weight', res[1])
# parse kvpairs
kvpairs = self._readKVPairs(id)
for key in kvpairs.keys():
value = kvpairs[key]
if key == 'ann_author':
anno.setField('author', value)
elif key == 'organelles':
continue
elif key == 'synapses':
continue
else:
anno.setField(key, value)
# return newly created anno object
return anno
def _readSeed(self, id):
# create a new seed
anno = annotation.AnnSeed( self.annodb, self.ch )
# set the anno ID
anno.setField('annid', id)
# fill in the fields
# basic metadata first
[confidence, status] = self._readAnnoMetadata(id)
anno.setField('status', status)
anno.setField('confidence', confidence)
# segment related metadata
with closing(MySQLdb.connect(host = self.proj.getDBHost(), user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'], db = self.proj.getDBName(), connect_timeout=1)) as conn:
with closing(conn.cursor()) as cursor:
seedTableName = "{}_seeds".format( self.ch.channel_name )
sql = "SELECT parentid, sourceid, cube_location, positionx, positiony, positionz FROM {} WHERE annoid={}".format( seedTableName, id )
try:
cursor.execute( sql )
res = cursor.fetchall()[0]
except MySQLdb.Error, e:
print "Error: Failed to get annotation type for RAMON object with ID {}: {}".format(id, e)
sys.exit(1)
anno.setField('parent', res[0])
anno.setField('source', res[1])
anno.setField('cubelocation', res[2])
anno.setField('position', "{},{},{}".format( res[3], res[4], res[5] ))
# parse kvpairs
kvpairs = self._readKVPairs(id)
for key in kvpairs.keys():
value = kvpairs[key]
if key == 'ann_author':
anno.setField('author', value)
else:
anno.setField(key, value)
# return newly completed anno object
return anno
def _readSegment(self, id):
# create a new segment
anno = annotation.AnnSegment( self.annodb, self.ch )
# set the segment ID
anno.setField('annid', id)
# fill in the fields
# basic metadata first
[confidence, status] = self._readAnnoMetadata(id)
anno.setField('status', status)
anno.setField('confidence', confidence)
# segment related metadata
with closing(MySQLdb.connect(host = self.proj.getDBHost(), user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'], db = self.proj.getDBName(), connect_timeout=1)) as conn:
with closing(conn.cursor()) as cursor:
segTableName = "{}_segments".format( self.ch.channel_name )
sql = "SELECT segmentclass, parentseed, neuron FROM {} WHERE annoid={}".format( segTableName, id )
try:
cursor.execute( sql )
res = cursor.fetchall()[0]
except MySQLdb.Error, e:
print "Error: Failed to get annotation type for RAMON object with ID {}: {}".format(id, e)
sys.exit(1)
anno.setField('segmentclass', res[0])
anno.setField('parentseed', res[1])
anno.setField('neuron', res[2])
# parse kvpairs
kvpairs = self._readKVPairs(id)
for key in kvpairs.keys():
value = kvpairs[key]
if key == 'ann_author':
anno.setField('author', value)
elif key == 'organelles':
continue
elif key == 'synapses':
continue
else:
anno.setField(key, value)
# return newly completed anno object
return anno
def _readNeuron(self, id):
# create a new neuron
anno = annotation.AnnNeuron( self.annodb, self.ch )
# set the anno ID
anno.setField('annid', id)
# fill in the fields
# basic metadata first
[confidence, status] = self._readAnnoMetadata(id)
anno.setField('status', status)
anno.setField('confidence', confidence)
# parse kvpairs
kvpairs = self._readKVPairs(id)
for key in kvpairs.keys():
value = kvpairs[key]
if key == 'ann_author':
anno.setField('author', value)
elif key == 'segments':
continue
else:
anno.setField(key, value)
# return newly completed anno object
return anno
def _readOrganelle(self, id):
# create a new organelle
anno = annotation.AnnOrganelle( self.annodb, self.ch )
# set the organelle ID
anno.setField('annid', id)
# fill in the fields
# basic metadata first
[confidence, status] = self._readAnnoMetadata(id)
anno.setField('status', status)
anno.setField('confidence', confidence)
# segment related metadata
with closing(MySQLdb.connect(host = self.proj.getDBHost(), user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'], db = self.proj.getDBName(), connect_timeout=1)) as conn:
with closing(conn.cursor()) as cursor:
orgTableName = "{}_organelles".format( self.ch.channel_name )
sql = "SELECT organelleclass, parentseed, centroidx, centroidy, centroidz FROM {} WHERE annoid={}".format( orgTableName, id )
try:
cursor.execute( sql )
res = cursor.fetchall()[0]
except MySQLdb.Error, e:
print "Error: Failed to get annotation type for RAMON object with ID {}: {}".format(id, e)
sys.exit(1)
anno.setField('organelleclass', res[0])
anno.setField('parentseed', res[1])
anno.setField('centroid', "{},{},{}".format(res[2], res[3], res[4]))
# parse kvpairs
kvpairs = self._readKVPairs(id)
for key in kvpairs.keys():
value = kvpairs[key]
if key == 'ann_author':
anno.setField('author', value)
else:
anno.setField(key, value)
# return newly completed anno object
return anno
def _readNode(self, id):
# create a new organodenelle
anno = annotation.AnnNode( self.annodb, self.ch )
# set the organelle ID
anno.setField('annid', id)
# fill in the fields
# basic metadata first
[confidence, status] = self._readAnnoMetadata(id)
anno.setField('status', status)
anno.setField('confidence', confidence)
# segment related metadata
with closing(MySQLdb.connect(host = self.proj.getDBHost(), user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'], db = self.proj.getDBName(), connect_timeout=1)) as conn:
with closing(conn.cursor()) as cursor:
nodeTableName = "{}_nodes".format( self.ch.channel_name )
sql = "SELECT skeletonid, nodetype, parentid, locationx, locationy, locationz, radius FROM {} WHERE annoid={}".format( nodeTableName, id )
try:
cursor.execute( sql )
res = cursor.fetchall()[0]
except MySQLdb.Error, e:
print "Error: Failed to get annotation type for RAMON object with ID {}: {}".format(id, e)
sys.exit(1)
anno.setField('skeleton', res[0])
anno.setField('nodetype', res[1])
anno.setField('parent', res[2])
anno.setField('location', "{},{},{}".format(res[3], res[4], res[5]))
anno.setField('radius', res[6])
# parse kvpairs
kvpairs = self._readKVPairs(id)
for key in kvpairs.keys():
value = kvpairs[key]
if key == 'ann_author':
anno.setField('author', value)
elif key == 'children':
continue
else:
anno.setField(key, value)
# return newly completed anno object
return anno
def _readSkeleton(self, id):
# create a new skeleton
anno = annotation.AnnSkeleton( self.annodb, self.ch )
# set the skeleton ID
anno.setField('annid', id)
# fill in the fields
# basic metadata first
[confidence, status] = self._readAnnoMetadata(id)
anno.setField('status', status)
anno.setField('confidence', confidence)
# segment related metadata
with closing(MySQLdb.connect(host = self.proj.getDBHost(), user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'], db = self.proj.getDBName(), connect_timeout=1)) as conn:
with closing(conn.cursor()) as cursor:
skeletonTableName = "{}_skeletons".format( self.ch.channel_name )
sql = "SELECT skeletontype, rootnode FROM {} WHERE annoid={}".format( skeletonTableName, id )
try:
cursor.execute( sql )
res = cursor.fetchall()[0]
except MySQLdb.Error, e:
print "Error: Failed to get annotation type for RAMON object with ID {}: {}".format(id, e)
sys.exit(1)
anno.setField('skeletontype', res[0])
anno.setField('rootnode', res[1])
# parse kvpairs
kvpairs = self._readKVPairs(id)
for key in kvpairs.keys():
value = kvpairs[key]
if key == 'ann_author':
anno.setField('author', value)
elif key == 'skeletonnodes':
continue
else:
anno.setField(key, value)
# return newly completed anno object
return anno
def _readAnnotation(self, id):
# create a new annotation
anno = annotation.Annotation( self.annodb, self.ch )
# set the segment ID
anno.setField('annid', id)
# fill in the fields
# basic metadata first
[confidence, status] = self._readAnnoMetadata(id)
anno.setField('status', status)
anno.setField('confidence', confidence)
# parse kvpairs
kvpairs = self._readKVPairs(id)
for key in kvpairs.keys():
value = kvpairs[key]
if key == 'ann_author':
anno.setField('author', value)
else:
anno.setField(key, value)
# return newly completed anno object
return anno
def processExistingAnnotationByID(self, id):
# get the annotation type
anntype = self._getAnnoType(id)[0]
if anntype == annotation.ANNO_SYNAPSE:
anno = self._readSynapse(id)
elif anntype == annotation.ANNO_SEED:
anno = self._readSeed(id)
elif anntype == annotation.ANNO_SEGMENT:
anno = self._readSegment(id)
elif anntype == annotation.ANNO_NEURON:
anno = self._readNeuron(id)
elif anntype == annotation.ANNO_ORGANELLE:
anno = self._readOrganelle(id)
elif anntype == annotation.ANNO_NODE:
anno = self._readNode(id)
elif anntype == annotation.ANNO_SKELETON:
anno = self._readSkeleton(id)
elif anntype == annotation.ANNO_ANNOTATION:
anno = self._readAnnotation(id)
else:
print "Unknown annotation type: {}".format(anntype)
sys.exit(1)
try:
self.ramondb.putAnnotation( self.chan, anno )
except Exception, e:
print e
import pdb; pdb.set_trace()
def main():
parser = argparse.ArgumentParser(description='Convert a channel from the old RAMON format (multiple tables) to the new RAMON format (single tables)')
parser.add_argument('project', action='store', help='Project (not token) name')
parser.add_argument('channel', action='store', help='Channel name')
parser.add_argument('--skip-table', action='store_true', help='Skip creatin the new annotation table.')
result = parser.parse_args()
cr = ConvertRamon(result.project, result.channel)
if not result.skip_table:
cr.createRAMONTables()
cr.processAnnos()
if __name__ == '__main__':
main()
|
basho/otp | refs/heads/basho | lib/asn1/test/asn1_SUITE_data/XSeq.py | 97 | XSeq DEFINITIONS ::=
BEGIN
-- F.2.10.2
-- Use a sequence type to model a collection of variables whose
-- types are the same,
-- whose number is known and modest, and whose order is significant,
-- provided that the
-- makeup of the collection is unlikely to change from one version
-- of the protocol to the next.
-- EXAMPLE
NamesOfOfficers ::= SEQUENCE {
president VisibleString,
vicePresident VisibleString,
secretary VisibleString}
acmeCorp NamesOfOfficers ::= {
president "Jane Doe",
vicePresident "John Doe",
secretary "Joe Doe"}
-- F.2.10.3
-- Use a sequence type to model a collection of variables whose types differ,
-- whose number is known and modest, and whose order is significant,
-- provided that
-- the makeup of the collection is unlikely to change from one version
-- of the protocol to the next.
-- EXAMPLE
Credentials ::= SEQUENCE {
userName VisibleString,
password VisibleString,
accountNumber INTEGER}
-- Empty SEQUENCE stupid but just for test
BasicCallCategories ::= SEQUENCE
{
... -- So far, no specific categories identified
}
END
|
azaghal/ansible | refs/heads/devel | test/support/integration/plugins/modules/x509_crl_info.py | 36 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: x509_crl_info
version_added: "2.10"
short_description: Retrieve information on Certificate Revocation Lists (CRLs)
description:
- This module allows one to retrieve information on Certificate Revocation Lists (CRLs).
requirements:
- cryptography >= 1.2
author:
- Felix Fontein (@felixfontein)
options:
path:
description:
- Remote absolute path where the generated CRL file should be created or is already located.
- Either I(path) or I(content) must be specified, but not both.
type: path
content:
description:
- Content of the X.509 certificate in PEM format.
- Either I(path) or I(content) must be specified, but not both.
type: str
notes:
- All timestamp values are provided in ASN.1 TIME format, i.e. following the C(YYYYMMDDHHMMSSZ) pattern.
They are all in UTC.
seealso:
- module: x509_crl
'''
EXAMPLES = r'''
- name: Get information on CRL
x509_crl_info:
path: /etc/ssl/my-ca.crl
register: result
- debug:
msg: "{{ result }}"
'''
RETURN = r'''
issuer:
description:
- The CRL's issuer.
- Note that for repeated values, only the last one will be returned.
returned: success
type: dict
sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
issuer_ordered:
description: The CRL's issuer as an ordered list of tuples.
returned: success
type: list
elements: list
sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
last_update:
description: The point in time from which this CRL can be trusted as ASN.1 TIME.
returned: success
type: str
sample: 20190413202428Z
next_update:
description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
returned: success
type: str
sample: 20190413202428Z
digest:
description: The signature algorithm used to sign the CRL.
returned: success
type: str
sample: sha256WithRSAEncryption
revoked_certificates:
description: List of certificates to be revoked.
returned: success
type: list
elements: dict
contains:
serial_number:
description: Serial number of the certificate.
type: int
sample: 1234
revocation_date:
description: The point in time the certificate was revoked as ASN.1 TIME.
type: str
sample: 20190413202428Z
issuer:
description: The certificate's issuer.
type: list
elements: str
sample: '["DNS:ca.example.org"]'
issuer_critical:
description: Whether the certificate issuer extension is critical.
type: bool
sample: no
reason:
description:
- The value for the revocation reason extension.
- One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
C(remove_from_crl).
type: str
sample: key_compromise
reason_critical:
description: Whether the revocation reason extension is critical.
type: bool
sample: no
invalidity_date:
description: |
The point in time it was known/suspected that the private key was compromised
or that the certificate otherwise became invalid as ASN.1 TIME.
type: str
sample: 20190413202428Z
invalidity_date_critical:
description: Whether the invalidity date extension is critical.
type: bool
sample: no
'''
import traceback
from distutils.version import LooseVersion
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography import x509
from cryptography.hazmat.backends import default_backend
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ"
class CRLError(crypto_utils.OpenSSLObjectError):
pass
class CRLInfo(crypto_utils.OpenSSLObject):
"""The main module implementation."""
def __init__(self, module):
super(CRLInfo, self).__init__(
module.params['path'] or '',
'present',
False,
module.check_mode
)
self.content = module.params['content']
self.module = module
self.crl = None
if self.content is None:
try:
with open(self.path, 'rb') as f:
data = f.read()
except Exception as e:
self.module.fail_json(msg='Error while reading CRL file from disk: {0}'.format(e))
else:
data = self.content.encode('utf-8')
try:
self.crl = x509.load_pem_x509_crl(data, default_backend())
except Exception as e:
self.module.fail_json(msg='Error while decoding CRL: {0}'.format(e))
def _dump_revoked(self, entry):
return {
'serial_number': entry['serial_number'],
'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT),
'issuer':
[crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']]
if entry['issuer'] is not None else None,
'issuer_critical': entry['issuer_critical'],
'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None,
'reason_critical': entry['reason_critical'],
'invalidity_date':
entry['invalidity_date'].strftime(TIMESTAMP_FORMAT)
if entry['invalidity_date'] is not None else None,
'invalidity_date_critical': entry['invalidity_date_critical'],
}
def get_info(self):
result = {
'changed': False,
'last_update': None,
'next_update': None,
'digest': None,
'issuer_ordered': None,
'issuer': None,
'revoked_certificates': [],
}
result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
try:
result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
except AttributeError:
# Older cryptography versions don't have signature_algorithm_oid yet
dotted = crypto_utils._obj2txt(
self.crl._backend._lib,
self.crl._backend._ffi,
self.crl._x509_crl.sig_alg.algorithm
)
oid = x509.oid.ObjectIdentifier(dotted)
result['digest'] = crypto_utils.cryptography_oid_to_name(oid)
issuer = []
for attribute in self.crl.issuer:
issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
result['issuer_ordered'] = issuer
result['issuer'] = {}
for k, v in issuer:
result['issuer'][k] = v
result['revoked_certificates'] = []
for cert in self.crl:
entry = crypto_utils.cryptography_decode_revoked_certificate(cert)
result['revoked_certificates'].append(self._dump_revoked(entry))
return result
def generate(self):
# Empty method because crypto_utils.OpenSSLObject wants this
pass
def dump(self):
# Empty method because crypto_utils.OpenSSLObject wants this
pass
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path'),
content=dict(type='str'),
),
required_one_of=(
['path', 'content'],
),
mutually_exclusive=(
['path', 'content'],
),
supports_check_mode=True,
)
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
try:
crl = CRLInfo(module)
result = crl.get_info()
module.exit_json(**result)
except crypto_utils.OpenSSLObjectError as e:
module.fail_json(msg=to_native(e))
if __name__ == "__main__":
main()
|
orablu/heroku-buildpack | refs/heads/master | vendor/setuptools-0.9.7/setuptools/command/easy_install.py | 63 | #!python
"""\
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import platform
from glob import glob
import pkg_resources
from setuptools import Command, _dont_write_bytecode
from setuptools.sandbox import run_setup
from distutils import log, dir_util
try:
# Python 2.7 or >=3.2
from sysconfig import get_config_vars, get_path
def _get_platlib():
return get_path("platlib")
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_config_vars, get_python_lib
def _get_platlib():
return get_python_lib(True)
def _get_purelib():
return get_python_lib(False)
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError, DistutilsPlatformError
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from setuptools.compat import (iteritems, maxsize, xrange, basestring, unicode,
reraise)
from pkg_resources import yield_lines, normalize_path, resource_string, \
ensure_directory, get_distribution, find_distributions, \
Environment, Requirement, Distribution, \
PathMetadata, EggMetadata, WorkingSet, \
DistributionNotFound, VersionConflict, \
DEVELOP_DIST
if '__VENV_LAUNCHER__' in os.environ:
sys_executable = os.environ['__VENV_LAUNCHER__']
else:
sys_executable = os.path.normpath(sys.executable)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
import site
HAS_USER_SITE = not sys.version < "2.6" and site.ENABLE_USER_SITE
import struct
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1,p2):
if hasattr(os.path,'samefile') and (
os.path.exists(p1) and os.path.exists(p2)
):
return os.path.samefile(p1,p2)
return (
os.path.normpath(os.path.normcase(p1)) ==
os.path.normpath(os.path.normcase(p2))
)
if sys.version_info <= (3,):
def _to_ascii(s):
return s
def isascii(s):
try:
unicode(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("delete-conflicting", "D", "no longer needed; don't use this"),
("ignore-conflicts-at-my-risk", None,
"no longer needed; don't use this"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=','S',"list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l', "allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'delete-conflicting', 'ignore-conflicts-at-my-risk', 'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if HAS_USER_SITE:
user_options.append(('user', None,
"install in user site-package '%s'" % site.USER_SITE))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
if HAS_USER_SITE:
whereami = os.path.abspath(__file__)
self.user = whereami.startswith(site.USER_SITE)
else:
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if HAS_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.delete_conflicting = None
self.ignore_conflicts_at_my_risk = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
for filename in blockers:
if os.path.exists(filename) or os.path.islink(filename):
log.info("Deleting %s", filename)
if not self.dry_run:
if os.path.isdir(filename) and not os.path.islink(filename):
rmtree(filename)
else:
os.unlink(filename)
def finalize_options(self):
if self.version:
print('setuptools %s' % get_distribution('setuptools').version)
sys.exit()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if HAS_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
# fix the install_dir if "--user" was used
#XXX: duplicate of the code in the setup command
if self.user and HAS_USER_SITE:
self.create_home_path()
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
if os.name == 'posix':
self.select_scheme("unix_user")
else:
self.select_scheme(os.name + "_user")
self.expand_basedirs()
self.expand_dirs()
self._expand('install_dir','script_dir','build_directory','site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options('install_lib',
('install_dir','install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options('install_scripts',
('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d+" (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable: self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path = self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path+sys.path)
if self.find_links is not None:
if isinstance(self.find_links, basestring):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path+sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize','optimize'))
if not isinstance(self.optimize,int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2): raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.delete_conflicting and self.ignore_conflicts_at_my_risk:
raise DistutilsOptionError(
"Can't use both --delete-conflicting and "
"--ignore-conflicts-at-my-risk at the same time"
)
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data',])
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in xrange(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0, maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
if self.delete_conflicting or self.ignore_conflicts_at_my_risk:
log.warn(
"Note: The -D, --delete-conflicting and"
" --ignore-conflicts-at-my-risk no longer have any purpose"
" and should not be used."
)
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir,'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname()+'.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists: os.unlink(testfile)
open(testfile,'w').close()
os.unlink(testfile)
except (OSError,IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH','').split(os.pathsep)
if instdir not in map(normalize_path, [_f for _f in PYTHONPATH if _f]):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
def cant_write_to_target(self):
msg = """can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""" % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += """
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
"""
else:
msg += """
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
"""
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname()+".pth"
ok_file = pth_file+'.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists: os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file,'w')
except (OSError,IOError):
self.cant_write_to_target()
else:
try:
f.write("import os; f = open(%r, 'w'); f.write('OK'); f.close()\n" % (ok_file,))
f.close(); f=None
executable = sys.executable
if os.name=='nt':
dirname,basename = os.path.split(executable)
alt = os.path.join(dirname,'pythonw.exe')
if basename.lower()=='python.exe' and os.path.exists(alt):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable,'-E','-c','pass'],0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f: f.close()
if os.path.exists(ok_file): os.unlink(ok_file)
if os.path.exists(pth_file): os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/'+script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base,filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self,spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable: self.install_site_py()
try:
if not isinstance(spec,Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable, not self.always_copy,
self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg+=" (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence==DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location==download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.check_conflicts(self.egg_distribution(download))]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound:
e = sys.exc_info()[1]
raise DistutilsError(
"Could not find required distribution %s" % e.args
)
except VersionConflict:
e = sys.exc_info()[1]
raise DistutilsError(
"Installed distribution %s conflicts with requirement %s"
% e.args
)
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
log.warn(
"%r already exists in %s; build directory %s will not be kept",
spec.key, self.build_directory, setup_base
)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename)==setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents)==1:
dist_filename = os.path.join(setup_base,contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst); shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if not self.exclude_scripts:
for args in get_script_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
def get_template(filename):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
These templates use triple-quotes to escape variable
substitutions so the scripts get the 2to3 treatment when build
on Python 3. The templates cannot use triple-quotes naturally.
"""
raw_bytes = resource_string('setuptools', template_name)
template_str = raw_bytes.decode('utf-8')
clean_template = template_str.replace('"""', '')
return clean_template
if is_script:
template_name = 'script template.py'
if dev_path:
template_name = template_name.replace('.py', ' (dev).py')
script_text = (get_script_header(script_text) +
get_template(template_name) % locals())
self.write_script(script_name, _to_ascii(script_text), 'b')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir,x) for x in blockers])
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target, 0x1FF-mask) # 0777
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None
):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" % os.path.abspath(dist_filename)
)
if len(setups)>1:
raise DistutilsError(
"Multiple setup scripts in %s" % os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path,os.path.join(egg_path,'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path,metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
self.check_conflicts(dist)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink,(destination,),"Removing "+destination)
uncache_zipdir(destination)
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f,m = shutil.move, "Moving"
else:
f,m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f,m = self.unpack_and_compile, "Extracting"
elif egg_path.startswith(tmpdir):
f,m = shutil.move, "Moving"
else:
f,m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m+" %s to %s") %
(os.path.basename(egg_path),os.path.dirname(destination)))
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(None,
project_name=cfg.get('metadata','name'),
version=cfg.get('metadata','version'), platform=get_platform()
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name()+'.egg')
egg_tmp = egg_path+'.tmp'
egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf,'w')
f.write('Metadata-Version: 1.0\n')
for k,v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_','-').title(), v))
f.close()
script_dir = os.path.join(egg_info,'scripts')
self.delete_blockers( # delete entry-point scripts to avoid duping
[os.path.join(script_dir,args[0]) for args in get_script_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src,dst):
s = src.lower()
for old,new in prefixes:
if s.startswith(old):
src = new+src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old!='SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1])+'.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile); stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(os.path.join(egg_tmp,'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level','native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name+'.txt')
if not os.path.exists(txt):
f = open(txt,'w')
f.write('\n'.join(locals()[name])+'\n')
f.close()
def check_conflicts(self, dist):
"""Verify that there are no conflicting "old-style" packages"""
return dist # XXX temporarily disable until new strategy is stable
from imp import find_module, get_suffixes
from glob import glob
blockers = []
names = dict.fromkeys(dist._get_metadata('top_level.txt')) # XXX private attr
exts = {'.pyc':1, '.pyo':1} # get_suffixes() might leave one out
for ext,mode,typ in get_suffixes():
exts[ext] = 1
for path,files in expand_paths([self.install_dir]+self.all_site_dirs):
for filename in files:
base,ext = os.path.splitext(filename)
if base in names:
if not ext:
# no extension, check for package
try:
f, filename, descr = find_module(base, [path])
except ImportError:
continue
else:
if f: f.close()
if filename not in blockers:
blockers.append(filename)
elif ext in exts and base!='site': # XXX ugh
blockers.append(os.path.join(path,filename))
if blockers:
self.found_conflicts(dist, blockers)
return dist
def found_conflicts(self, dist, blockers):
if self.delete_conflicting:
log.warn("Attempting to delete conflicting packages:")
return self.delete_blockers(blockers)
msg = """\
-------------------------------------------------------------------------
CONFLICT WARNING:
The following modules or packages have the same names as modules or
packages being installed, and will be *before* the installed packages in
Python's search path. You MUST remove all of the relevant files and
directories before you will be able to use the package(s) you are
installing:
%s
""" % '\n '.join(blockers)
if self.ignore_conflicts_at_my_risk:
msg += """\
(Note: you can run EasyInstall on '%s' with the
--delete-conflicting option to attempt deletion of the above files
and/or directories.)
""" % dist.project_name
else:
msg += """\
Note: you can attempt this installation again with EasyInstall, and use
either the --delete-conflicting (-D) option or the
--ignore-conflicts-at-my-risk option, to either delete the above files
and directories, or to ignore the conflicts, respectively. Note that if
you ignore the conflicts, the installed package(s) may not work.
"""
msg += """\
-------------------------------------------------------------------------
"""
sys.stderr.write(msg)
sys.stderr.flush()
if not self.ignore_conflicts_at_my_risk:
raise DistutilsError("Installation aborted due to conflicts")
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += """
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
"""
if self.install_dir not in map(normalize_path,sys.path):
msg += """
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
"""
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return """\nExtracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""" % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose>2:
v = 'v' * (self.verbose - 1)
args.insert(0,'-'+v)
elif self.verbose<2:
args.insert(0,'-q')
if self.dry_run:
args.insert(0,'-n')
log.info(
"Running %s %s", setup_script[len(setup_base)+1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit:
v = sys.exc_info()[1]
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives: continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self,dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key=='setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir,'setuptools.pth')
if os.path.islink(filename): os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location)+'\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []; to_chmod = []
def pf(src,dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src,dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0x16D) & 0xFED # 0555, 07755
chmod(f, mode)
def byte_compile(self, to_compile):
if _dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
def no_default_version_msg(self):
return """bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.""" % (
self.install_dir, os.environ.get('PYTHONPATH','')
)
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy,'rb')
current = f.read()
# we want str, not bytes
if sys.version_info >= (3,):
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy,'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0700)" % path)
os.makedirs(path, 0x1C0) # 0700
INSTALL_SCHEMES = dict(
posix = dict(
install_dir = '$base/lib/python$py_version_short/site-packages',
script_dir = '$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir = '$base/Lib/site-packages',
script_dir = '$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name,self.DEFAULT_SCHEME)
for attr,val in scheme.items():
if getattr(self,attr,None) is None:
setattr(self,attr,val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
sitedirs.extend(
[prefix, os.path.join(prefix, "lib", "site-packages")]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for site_lib in (_get_purelib(), _get_platlib()):
if site_lib not in sitedirs: sitedirs.append(site_lib)
if HAS_USER_SITE:
sitedirs.append(site.USER_SITE)
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth','setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname,name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a ConfigParser.RawConfigParser, or None
"""
f = open(dist_filename,'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended-12)
from setuptools.compat import StringIO, ConfigParser
import struct
tag, cfglen, bmlen = struct.unpack("<iii",f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended-(12+cfglen))
cfg = ConfigParser.RawConfigParser({'version':'','target_version':''})
try:
part = f.read(cfglen)
# part is in bytes, but we need to read up to the first null
# byte.
if sys.version_info >= (2,6):
null_byte = bytes([0])
else:
null_byte = chr(0)
config = part.split(null_byte, 1)[0]
# Now the config is in bytes, but on Python 3, it must be
# unicode for the RawConfigParser, so decode it. Is this the
# right encoding?
config = config.decode('ascii')
cfg.readfp(StringIO(config))
except ConfigParser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts)==3 and parts[2]=='PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0,('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB','PLATLIB'):
contents = z.read(name)
if sys.version_info >= (3,):
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\','/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0],pth)), ''))
finally:
z.close()
prefixes = [(x.lower(),y) for x, y in prefixes]
prefixes.sort(); prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load(); Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename,'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir,path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative,self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename,'wt')
f.write(data); f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
def add(self,dist):
"""Add `dist` to the distribution map"""
if (dist.location not in self.paths and (
dist.location not in self.sitedirs or
dist.location == os.getcwd() #account for '.' being in PYTHONPATH
)):
self.paths.append(dist.location)
self.dirty = True
Environment.add(self,dist)
def remove(self,dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location); self.dirty = True
Environment.remove(self,dist)
def make_relative(self,path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep=='/' and '/' or os.sep
while len(npath)>=baselen:
if npath==self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
def get_script_header(script_text, executable=sys_executable, wininst=False):
"""Create a #! line, getting options (if any) from script_text"""
from distutils.command.build_scripts import first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
if not isinstance(first_line_re.pattern, str):
first_line_re = re.compile(first_line_re.pattern.decode())
first = (script_text+'\n').splitlines()[0]
match = first_line_re.match(first)
options = ''
if match:
options = match.group(1) or ''
if options: options = ' '+options
if wininst:
executable = "python.exe"
else:
executable = nt_quote_arg(executable)
hdr = "#!%(executable)s%(options)s\n" % locals()
if not isascii(hdr):
# Non-ascii path to sys.executable, use -x to prevent warnings
if options:
if options.strip().startswith('-'):
options = ' -x'+options.strip()[1:]
# else: punt, we can't do it, let the warning happen anyway
else:
options = ' -x'
executable = fix_jython_executable(executable, options)
hdr = "#!%(executable)s%(options)s\n" % locals()
return hdr
def auto_chmod(func, arg, exc):
if func is os.remove and os.name=='nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
reraise(et, (ev[0], ev[1] + (" %s %s" % (func,arg))))
def uncache_zipdir(path):
"""Ensure that the importer caches dont have stale info for `path`"""
from zipimport import _zip_directory_cache as zdc
_uncache(path, zdc)
_uncache(path, sys.path_importer_cache)
def _uncache(path, cache):
if path in cache:
del cache[path]
else:
path = normalize_path(path)
for p in cache:
if normalize_path(p)==path:
del cache[p]
return
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
fp = open(executable)
magic = fp.read(2)
fp.close()
except (OSError,IOError): return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
result = []
needquote = False
nb = 0
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
nb += 1
elif c == '"':
# double preceding backslashes, then add a \"
result.append('\\' * (nb*2) + '\\"')
nb = 0
else:
if nb:
result.append('\\' * nb)
nb = 0
result.append(c)
if nb:
result.append('\\' * nb)
if needquote:
result.append('\\' * nb) # double the trailing backslashes
result.append('"')
return ''.join(result)
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args): pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error:
e = sys.exc_info()[1]
log.debug("chmod failed: %s", e)
def fix_jython_executable(executable, options):
if sys.platform.startswith('java') and is_sh(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty("os.name") == "Linux":
return executable
# Workaround Jython's sys.executable being a .sh (an invalid
# shebang line interpreter)
if options:
# Can't apply the workaround, leave it broken
log.warn("WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
else:
return '/usr/bin/env %s' % executable
return executable
def get_script_args(dist, executable=sys_executable, wininst=False):
"""Yield write_script() argument tuples for a distribution's entrypoints"""
spec = str(dist.as_requirement())
header = get_script_header("", executable, wininst)
for group in 'console_scripts', 'gui_scripts':
for name, ep in dist.get_entry_map(group).items():
script_text = (
"# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r\n"
"__requires__ = %(spec)r\n"
"import sys\n"
"from pkg_resources import load_entry_point\n"
"\n"
"if __name__ == '__main__':"
"\n"
" sys.exit(\n"
" load_entry_point(%(spec)r, %(group)r, %(name)r)()\n"
" )\n"
) % locals()
if sys.platform=='win32' or wininst:
# On Windows/wininst, add a .py extension and an .exe launcher
if group=='gui_scripts':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
new_header = re.sub('(?i)python.exe','pythonw.exe',header)
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py','.pyc','.pyo']
new_header = re.sub('(?i)pythonw.exe','python.exe',header)
if os.path.exists(new_header[2:-1].strip('"')) or sys.platform!='win32':
hdr = new_header
else:
hdr = header
yield (name+ext, hdr+script_text, 't', [name+x for x in old])
yield (
name+'.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
else:
# On other platforms, we assume the right thing to do is to
# just write the stub with no extension.
yield (name, header+script_text)
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower()=='arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if sys.version_info[0] < 3:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def current_umask():
tmp = os.umask(0x12) # 022
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools; argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0; sys.argv.append(argv0); main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
import distutils.core
USAGE = """\
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
"""
def gen_usage (script_name):
script = os.path.basename(script_name)
return USAGE % vars()
def with_ei_usage(f):
old_gen_usage = distutils.core.gen_usage
try:
distutils.core.gen_usage = gen_usage
return f()
finally:
distutils.core.gen_usage = old_gen_usage
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self,*args,**kw):
with_ei_usage(lambda: Distribution._show_help(self,*args,**kw))
if argv is None:
argv = sys.argv[1:]
with_ei_usage(lambda:
setup(
script_args = ['-q','easy_install', '-v']+argv,
script_name = sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
)
|
Andrey-Pavlov/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/cpp.py | 113 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009, 2010, 2012 Google Inc. All rights reserved.
# Copyright (C) 2009 Torch Mobile Inc.
# Copyright (C) 2009 Apple Inc. All rights reserved.
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is the modified version of Google's cpplint. The original code is
# http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py
"""Support for check-webkit-style."""
import codecs
import math # for log
import os
import os.path
import re
import sre_compile
import string
import sys
import unicodedata
from webkitpy.common.memoized import memoized
# The key to use to provide a class to fake loading a header file.
INCLUDE_IO_INJECTION_KEY = 'include_header_io'
# Headers that we consider STL headers.
_STL_HEADERS = frozenset([
'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'pair.h',
'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
'utility', 'vector', 'vector.h',
])
# Non-STL C++ system headers.
_CPP_HEADERS = frozenset([
'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream.h',
'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
'numeric', 'ostream.h', 'parsestream.h', 'pfstream.h', 'PlotFile.h',
'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h',
'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# These constants define types of headers for use with
# _IncludeState.check_next_include_order().
_CONFIG_HEADER = 0
_PRIMARY_HEADER = 1
_OTHER_HEADER = 2
_MOC_HEADER = 3
# A dictionary of items customize behavior for unit test. For example,
# INCLUDE_IO_INJECTION_KEY allows providing a custom io class which allows
# for faking a header file.
_unit_test_config = {}
# The regexp compilation caching is inlined in all regexp functions for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
_regexp_compile_cache = {}
def match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
def sub(pattern, replacement, s):
"""Substitutes occurrences of a pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(replacement, s)
def subn(pattern, replacement, s):
"""Substitutes occurrences of a pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].subn(replacement, s)
def iteratively_replace_matches_with_char(pattern, char_replacement, s):
"""Returns the string with replacement done.
Every character in the match is replaced with char.
Due to the iterative nature, pattern should not match char or
there will be an infinite loop.
Example:
pattern = r'<[^>]>' # template parameters
char_replacement = '_'
s = 'A<B<C, D>>'
Returns 'A_________'
Args:
pattern: The regex to match.
char_replacement: The character to put in place of every
character of the match.
s: The string on which to do the replacements.
Returns:
True, if the given line is blank.
"""
while True:
matched = search(pattern, s)
if not matched:
return s
start_match_index = matched.start(0)
end_match_index = matched.end(0)
match_length = end_match_index - start_match_index
s = s[:start_match_index] + char_replacement * match_length + s[end_match_index:]
def _rfind_in_lines(regex, lines, start_position, not_found_position):
"""Does a reverse find starting at start position and going backwards until
a match is found.
Returns the position where the regex ended.
"""
# Put the regex in a group and proceed it with a greedy expression that
# matches anything to ensure that we get the last possible match in a line.
last_in_line_regex = r'.*(' + regex + ')'
current_row = start_position.row
# Start with the given row and trim off everything past what may be matched.
current_line = lines[start_position.row][:start_position.column]
while True:
found_match = match(last_in_line_regex, current_line)
if found_match:
return Position(current_row, found_match.end(1))
# A match was not found so continue backward.
current_row -= 1
if current_row < 0:
return not_found_position
current_line = lines[current_row]
def _convert_to_lower_with_underscores(text):
"""Converts all text strings in camelCase or PascalCase to lowers with underscores."""
# First add underscores before any capital letter followed by a lower case letter
# as long as it is in a word.
# (This put an underscore before Password but not P and A in WPAPassword).
text = sub(r'(?<=[A-Za-z0-9])([A-Z])(?=[a-z])', r'_\1', text)
# Next add underscores before capitals at the end of words if it was
# preceeded by lower case letter or number.
# (This puts an underscore before A in isA but not A in CBA).
text = sub(r'(?<=[a-z0-9])([A-Z])(?=\b)', r'_\1', text)
# Next add underscores when you have a captial letter which is followed by a capital letter
# but is not proceeded by one. (This puts an underscore before A in 'WordADay').
text = sub(r'(?<=[a-z0-9])([A-Z][A-Z_])', r'_\1', text)
return text.lower()
def _create_acronym(text):
"""Creates an acronym for the given text."""
# Removes all lower case letters except those starting words.
text = sub(r'(?<!\b)[a-z]', '', text)
return text.upper()
def up_to_unmatched_closing_paren(s):
"""Splits a string into two parts up to first unmatched ')'.
Args:
s: a string which is a substring of line after '('
(e.g., "a == (b + c))").
Returns:
A pair of strings (prefix before first unmatched ')',
remainder of s after first unmatched ')'), e.g.,
up_to_unmatched_closing_paren("a == (b + c)) { ")
returns "a == (b + c)", " {".
Returns None, None if there is no unmatched ')'
"""
i = 1
for pos, c in enumerate(s):
if c == '(':
i += 1
elif c == ')':
i -= 1
if i == 0:
return s[:pos], s[pos + 1:]
return None, None
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call check_next_include_order() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, check_next_include_order will raise an error.
_INITIAL_SECTION = 0
_CONFIG_SECTION = 1
_PRIMARY_SECTION = 2
_OTHER_SECTION = 3
_TYPE_NAMES = {
_CONFIG_HEADER: 'WebCore config.h',
_PRIMARY_HEADER: 'header this file implements',
_OTHER_HEADER: 'other header',
_MOC_HEADER: 'moc file',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing.",
_CONFIG_SECTION: "WebCore config.h.",
_PRIMARY_SECTION: 'a header this file implements.',
_OTHER_SECTION: 'other header.',
}
def __init__(self):
dict.__init__(self)
self._section = self._INITIAL_SECTION
self._visited_primary_section = False
self.header_types = dict();
def visited_primary_section(self):
return self._visited_primary_section
def check_next_include_order(self, header_type, file_is_header, primary_header_exists):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
file_is_header: Whether the file that owns this _IncludeState is itself a header
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
if header_type == _CONFIG_HEADER and file_is_header:
return 'Header file should not contain WebCore config.h.'
if header_type == _PRIMARY_HEADER and file_is_header:
return 'Header file should not contain itself.'
if header_type == _MOC_HEADER:
return ''
error_message = ''
if self._section != self._OTHER_SECTION:
before_error_message = ('Found %s before %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section + 1]))
after_error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
if header_type == _CONFIG_HEADER:
if self._section >= self._CONFIG_SECTION:
error_message = after_error_message
self._section = self._CONFIG_SECTION
elif header_type == _PRIMARY_HEADER:
if self._section >= self._PRIMARY_SECTION:
error_message = after_error_message
elif self._section < self._CONFIG_SECTION:
error_message = before_error_message
self._section = self._PRIMARY_SECTION
self._visited_primary_section = True
else:
assert header_type == _OTHER_HEADER
if not file_is_header and self._section < self._PRIMARY_SECTION:
if primary_header_exists:
error_message = before_error_message
self._section = self._OTHER_SECTION
return error_message
class Position(object):
"""Holds the position of something."""
def __init__(self, row, column):
self.row = row
self.column = column
def __str__(self):
return '(%s, %s)' % (self.row, self.column)
def __cmp__(self, other):
return self.row.__cmp__(other.row) or self.column.__cmp__(other.column)
class Parameter(object):
"""Information about one function parameter."""
def __init__(self, parameter, parameter_name_index, row):
self.type = parameter[:parameter_name_index].strip()
# Remove any initializers from the parameter name (e.g. int i = 5).
self.name = sub(r'=.*', '', parameter[parameter_name_index:]).strip()
self.row = row
@memoized
def lower_with_underscores_name(self):
"""Returns the parameter name in the lower with underscores format."""
return _convert_to_lower_with_underscores(self.name)
class SingleLineView(object):
"""Converts multiple lines into a single line (with line breaks replaced by a
space) to allow for easier searching."""
def __init__(self, lines, start_position, end_position):
"""Create a SingleLineView instance.
Args:
lines: a list of multiple lines to combine into a single line.
start_position: offset within lines of where to start the single line.
end_position: just after where to end (like a slice operation).
"""
# Get the rows of interest.
trimmed_lines = lines[start_position.row:end_position.row + 1]
# Remove the columns on the last line that aren't included.
trimmed_lines[-1] = trimmed_lines[-1][:end_position.column]
# Remove the columns on the first line that aren't included.
trimmed_lines[0] = trimmed_lines[0][start_position.column:]
# Create a single line with all of the parameters.
self.single_line = ' '.join(trimmed_lines)
# Keep the row lengths, so we can calculate the original row number
# given a column in the single line (adding 1 due to the space added
# during the join).
self._row_lengths = [len(line) + 1 for line in trimmed_lines]
self._starting_row = start_position.row
def convert_column_to_row(self, single_line_column_number):
"""Convert the column number from the single line into the original
line number.
Special cases:
* Columns in the added spaces are considered part of the previous line.
* Columns beyond the end of the line are consider part the last line
in the view."""
total_columns = 0
row_offset = 0
while row_offset < len(self._row_lengths) - 1 and single_line_column_number >= total_columns + self._row_lengths[row_offset]:
total_columns += self._row_lengths[row_offset]
row_offset += 1
return self._starting_row + row_offset
def create_skeleton_parameters(all_parameters):
"""Converts a parameter list to a skeleton version.
The skeleton only has one word for the parameter name, one word for the type,
and commas after each parameter and only there. Everything in the skeleton
remains in the same columns as the original."""
all_simplifications = (
# Remove template parameters, function declaration parameters, etc.
r'(<[^<>]*?>)|(\([^\(\)]*?\))|(\{[^\{\}]*?\})',
# Remove all initializers.
r'=[^,]*',
# Remove :: and everything before it.
r'[^,]*::',
# Remove modifiers like &, *.
r'[&*]',
# Remove const modifiers.
r'\bconst\s+(?=[A-Za-z])',
# Remove numerical modifiers like long.
r'\b(unsigned|long|short)\s+(?=unsigned|long|short|int|char|double|float)')
skeleton_parameters = all_parameters
for simplification in all_simplifications:
skeleton_parameters = iteratively_replace_matches_with_char(simplification, ' ', skeleton_parameters)
# If there are any parameters, then add a , after the last one to
# make a regular pattern of a , following every parameter.
if skeleton_parameters.strip():
skeleton_parameters += ','
return skeleton_parameters
def find_parameter_name_index(skeleton_parameter):
"""Determines where the parametere name starts given the skeleton parameter."""
# The first space from the right in the simplified parameter is where the parameter
# name starts unless the first space is before any content in the simplified parameter.
before_name_index = skeleton_parameter.rstrip().rfind(' ')
if before_name_index != -1 and skeleton_parameter[:before_name_index].strip():
return before_name_index + 1
return len(skeleton_parameter)
def parameter_list(elided_lines, start_position, end_position):
"""Generator for a function's parameters."""
# Create new positions that omit the outer parenthesis of the parameters.
start_position = Position(row=start_position.row, column=start_position.column + 1)
end_position = Position(row=end_position.row, column=end_position.column - 1)
single_line_view = SingleLineView(elided_lines, start_position, end_position)
skeleton_parameters = create_skeleton_parameters(single_line_view.single_line)
end_index = -1
while True:
# Find the end of the next parameter.
start_index = end_index + 1
end_index = skeleton_parameters.find(',', start_index)
# No comma means that all parameters have been parsed.
if end_index == -1:
return
row = single_line_view.convert_column_to_row(end_index)
# Parse the parameter into a type and parameter name.
skeleton_parameter = skeleton_parameters[start_index:end_index]
name_offset = find_parameter_name_index(skeleton_parameter)
parameter = single_line_view.single_line[start_index:end_index]
yield Parameter(parameter, name_offset, row)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body.
Attributes:
min_confidence: The minimum confidence level to use while checking style.
"""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self, min_confidence):
self.min_confidence = min_confidence
self.current_function = ''
self.in_a_function = False
self.lines_in_function = 0
# Make sure these will not be mistaken for real positions (even when a
# small amount is added to them).
self.body_start_position = Position(-1000, 0)
self.end_position = Position(-1000, 0)
def begin(self, function_name, function_name_start_position, body_start_position, end_position,
parameter_start_position, parameter_end_position, clean_lines):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
function_name_start_position: Position in elided where the function name starts.
body_start_position: Position in elided of the { or the ; for a prototype.
end_position: Position in elided just after the final } (or ; is.
parameter_start_position: Position in elided of the '(' for the parameters.
parameter_end_position: Position in elided just after the ')' for the parameters.
clean_lines: A CleansedLines instance containing the file.
"""
self.in_a_function = True
self.lines_in_function = -1 # Don't count the open brace line.
self.current_function = function_name
self.function_name_start_position = function_name_start_position
self.body_start_position = body_start_position
self.end_position = end_position
self.is_declaration = clean_lines.elided[body_start_position.row][body_start_position.column] == ';'
self.parameter_start_position = parameter_start_position
self.parameter_end_position = parameter_end_position
self.is_pure = False
if self.is_declaration:
characters_after_parameters = SingleLineView(clean_lines.elided, parameter_end_position, body_start_position).single_line
self.is_pure = bool(match(r'\s*=\s*0\s*', characters_after_parameters))
self._clean_lines = clean_lines
self._parameter_list = None
def modifiers_and_return_type(self):
"""Returns the modifiers and the return type."""
# Go backwards from where the function name is until we encounter one of several things:
# ';' or '{' or '}' or 'private:', etc. or '#' or return Position(0, 0)
elided = self._clean_lines.elided
start_modifiers = _rfind_in_lines(r';|\{|\}|((private|public|protected):)|(#.*)',
elided, self.parameter_start_position, Position(0, 0))
return SingleLineView(elided, start_modifiers, self.function_name_start_position).single_line.strip()
def parameter_list(self):
if not self._parameter_list:
# Store the final result as a tuple since that is immutable.
self._parameter_list = tuple(parameter_list(self._clean_lines.elided, self.parameter_start_position, self.parameter_end_position))
return self._parameter_list
def count(self, line_number):
"""Count line in current function body."""
if self.in_a_function and line_number >= self.body_start_position.row:
self.lines_in_function += 1
def check(self, error, line_number):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
line_number: The number of the line to check.
"""
if match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2 ** self.min_confidence
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(line_number, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def end(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def full_name(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def repository_name(self):
"""Full name after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.full_name()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we
# recursively look up the directory tree for the top
# of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir)
and not os.path.exists(os.path.join(root_dir, ".git"))
and not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cpp', Split() would
return ('chrome/browser', 'browser', '.cpp')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.repository_name()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def base_name(self):
"""File base name - text after the final slash, before the final period."""
return self.split()[1]
def extension(self):
"""File extension - text following the final period."""
return self.split()[2]
def no_extension(self):
"""File has no source file extension."""
return '/'.join(self.split()[0:2])
def is_source(self):
"""File has a source file extension."""
return self.extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def is_cpp_string(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def find_next_multi_line_comment_start(lines, line_index):
"""Find the beginning marker for a multiline comment."""
while line_index < len(lines):
if lines[line_index].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[line_index].strip().find('*/', 2) < 0:
return line_index
line_index += 1
return len(lines)
def find_next_multi_line_comment_end(lines, line_index):
"""We are inside a comment, find the end marker."""
while line_index < len(lines):
if lines[line_index].strip().endswith('*/'):
return line_index
line_index += 1
return len(lines)
def remove_multi_line_comments_from_range(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def remove_multi_line_comments(lines, error):
"""Removes multiline (c-style) comments from lines."""
line_index = 0
while line_index < len(lines):
line_index_begin = find_next_multi_line_comment_start(lines, line_index)
if line_index_begin >= len(lines):
return
line_index_end = find_next_multi_line_comment_end(lines, line_index_begin)
if line_index_end >= len(lines):
error(line_index_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
remove_multi_line_comments_from_range(lines, line_index_begin, line_index_end + 1)
line_index = line_index_end + 1
def cleanse_comments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
comment_position = line.find('//')
if comment_position != -1 and not is_cpp_string(line[:comment_position]):
line = line[:comment_position]
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self._num_lines = len(lines)
for line_number in range(len(lines)):
self.lines.append(cleanse_comments(lines[line_number]))
elided = self.collapse_strings(lines[line_number])
self.elided.append(cleanse_comments(elided))
def num_lines(self):
"""Returns the number of lines represented."""
return self._num_lines
@staticmethod
def collapse_strings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def close_expression(elided, position):
"""If input points to ( or { or [, finds the position that closes it.
If elided[position.row][position.column] points to a '(' or '{' or '[',
finds the line_number/pos that correspond to the closing of the expression.
Args:
elided: A CleansedLines.elided instance containing the file.
position: The position of the opening item.
Returns:
The Position *past* the closing brace, or Position(len(elided), -1)
if we never find a close. Note we ignore strings and comments when matching.
"""
line = elided[position.row]
start_character = line[position.column]
if start_character == '(':
enclosing_character_regex = r'[\(\)]'
elif start_character == '[':
enclosing_character_regex = r'[\[\]]'
elif start_character == '{':
enclosing_character_regex = r'[\{\}]'
else:
return Position(len(elided), -1)
current_column = position.column + 1
line_number = position.row
net_open = 1
for line in elided[position.row:]:
line = line[current_column:]
# Search the current line for opening and closing characters.
while True:
next_enclosing_character = search(enclosing_character_regex, line)
# No more on this line.
if not next_enclosing_character:
break
current_column += next_enclosing_character.end(0)
line = line[next_enclosing_character.end(0):]
if next_enclosing_character.group(0) == start_character:
net_open += 1
else:
net_open -= 1
if not net_open:
return Position(line_number, current_column)
# Proceed to the next line.
line_number += 1
current_column = 0
# The given item was not closed.
return Position(len(elided), -1)
def check_for_copyright(lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I):
break
else: # means no copyright line was found
error(0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def get_header_guard_cpp_variable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that style checker is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
standard_name = sub(r'[-.\s]', '_', os.path.basename(filename))
# Files under WTF typically have header guards that start with WTF_.
if '/wtf/' in filename:
special_name = "WTF_" + standard_name
else:
special_name = standard_name
return (special_name, standard_name)
def check_for_header_guard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = get_header_guard_cpp_variable(filename)
ifndef = None
ifndef_line_number = 0
define = None
for line_number, line in enumerate(lines):
line_split = line.split()
if len(line_split) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and line_split[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = line_split[1]
ifndef_line_number = line_number
if not define and line_split[0] == '#define':
define = line_split[1]
if define and ifndef:
break
if not ifndef or not define or ifndef != define:
error(0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar[0])
return
# The guard should be File_h.
if ifndef not in cppvar:
error(ifndef_line_number, 'build/header_guard', 5,
'#ifndef header guard has wrong style, please use: %s' % cppvar[0])
def check_for_unicode_replacement_characters(lines, error):
"""Logs an error for each line containing Unicode replacement characters.
These indicate that either the file contained invalid UTF-8 (likely)
or Unicode replacement characters (which it shouldn't). Note that
it's possible for this to throw off line numbering if the invalid
UTF-8 occurred adjacent to a newline.
Args:
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for line_number, line in enumerate(lines):
if u'\ufffd' in line:
error(line_number, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
def check_for_new_line_at_eof(lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def check_for_multiline_comments_and_strings(clean_lines, line_number, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(line_number, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(line_number, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. They\'re '
'ugly and unnecessary, and you should use concatenation instead".')
_THREADING_LIST = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('rand(', 'rand_r('),
('readdir(', 'readdir_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def check_posix_threading(clean_lines, line_number, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number]
for single_thread_function, multithread_safe_function in _THREADING_LIST:
index = line.find(single_thread_function)
# Comparisons made explicit for clarity
if index >= 0 and (index == 0 or (not line[index - 1].isalnum()
and line[index - 1] not in ('_', '.', '>'))):
error(line_number, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def check_invalid_increment(clean_lines, line_number, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(line_number, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _ClassInfo(object):
"""Stores information about a class."""
def __init__(self, name, line_number):
self.name = name
self.line_number = line_number
self.seen_open_brace = False
self.is_derived = False
self.virtual_method_line_number = None
self.has_virtual_destructor = False
self.brace_depth = 0
class _ClassState(object):
"""Holds the current state of the parse relating to class declarations.
It maintains a stack of _ClassInfos representing the parser's guess
as to the current nesting of class declarations. The innermost class
is at the top (back) of the stack. Typically, the stack will either
be empty or have exactly one entry.
"""
def __init__(self):
self.classinfo_stack = []
def check_finished(self, error):
"""Checks that all classes have been completely parsed.
Call this when all lines in a file have been processed.
Args:
error: The function to call with any errors found.
"""
if self.classinfo_stack:
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpp_style_unittest.py for an example of this.
error(self.classinfo_stack[0].line_number, 'build/class', 5,
'Failed to find complete declaration of class %s' %
self.classinfo_stack[0].name)
class _FileState(object):
def __init__(self, clean_lines, file_extension):
self._did_inside_namespace_indent_warning = False
self._clean_lines = clean_lines
if file_extension in ['m', 'mm']:
self._is_objective_c = True
self._is_c = False
elif file_extension == 'h':
# In the case of header files, it is unknown if the file
# is c / objective c or not, so set this value to None and then
# if it is requested, use heuristics to guess the value.
self._is_objective_c = None
self._is_c = None
elif file_extension == 'c':
self._is_c = True
self._is_objective_c = False
else:
self._is_objective_c = False
self._is_c = False
def set_did_inside_namespace_indent_warning(self):
self._did_inside_namespace_indent_warning = True
def did_inside_namespace_indent_warning(self):
return self._did_inside_namespace_indent_warning
def is_objective_c(self):
if self._is_objective_c is None:
for line in self._clean_lines.elided:
# Starting with @ or #import seem like the best indications
# that we have an Objective C file.
if line.startswith("@") or line.startswith("#import"):
self._is_objective_c = True
break
else:
self._is_objective_c = False
return self._is_objective_c
def is_c(self):
if self._is_c is None:
for line in self._clean_lines.lines:
# if extern "C" is found, then it is a good indication
# that we have a C header file.
if line.startswith('extern "C"'):
self._is_c = True
break
else:
self._is_c = False
return self._is_c
def is_c_or_objective_c(self):
"""Return whether the file extension corresponds to C or Objective-C."""
return self.is_c() or self.is_objective_c()
class _EnumState(object):
"""Maintains whether currently in an enum declaration, and checks whether
enum declarations follow the style guide.
"""
def __init__(self):
self.in_enum_decl = False
self.is_webidl_enum = False
def process_clean_line(self, line):
# FIXME: The regular expressions for expr_all_uppercase and expr_enum_end only accept integers
# and identifiers for the value of the enumerator, but do not accept any other constant
# expressions. However, this is sufficient for now (11/27/2012).
expr_all_uppercase = r'\s*[A-Z0-9_]+\s*(?:=\s*[a-zA-Z0-9]+\s*)?,?\s*$'
expr_starts_lowercase = r'\s*[a-z]'
expr_enum_end = r'}\s*(?:[a-zA-Z0-9]+\s*(?:=\s*[a-zA-Z0-9]+)?)?\s*;\s*'
expr_enum_start = r'\s*enum(?:\s+[a-zA-Z0-9]+)?\s*\{?\s*'
if self.in_enum_decl:
if match(r'\s*' + expr_enum_end + r'$', line):
self.in_enum_decl = False
self.is_webidl_enum = False
elif match(expr_all_uppercase, line):
return self.is_webidl_enum
elif match(expr_starts_lowercase, line):
return False
else:
matched = match(expr_enum_start + r'$', line)
if matched:
self.in_enum_decl = True
else:
matched = match(expr_enum_start + r'(?P<members>.*)' + expr_enum_end + r'$', line)
if matched:
members = matched.group('members').split(',')
found_invalid_member = False
for member in members:
if match(expr_all_uppercase, member):
found_invalid_member = not self.is_webidl_enum
if match(expr_starts_lowercase, member):
found_invalid_member = True
if found_invalid_member:
self.is_webidl_enum = False
return False
return True
return True
def check_for_non_standard_constructs(clean_lines, line_number,
class_state, error):
"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
- classes with virtual methods need virtual destructors (compiler warning
available, but not turned on yet.)
Additionally, check for constructor/destructor style violations as it
is very convenient to do so while checking for gcc-2 compliance.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes parameters:
line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[line_number]
if search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(line_number, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if search(r'printf\s*\(.*".*%\d+\$', line):
error(line_number, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if search(r'("|\').*\\(%|\[|\(|{)', line):
error(line_number, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[line_number]
if search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(auto|register|static|extern|typedef)\b',
line):
error(line_number, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if match(r'\s*#\s*endif\s*[^/\s]+', line):
error(line_number, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(line_number, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line):
error(line_number, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
# Track class entry and exit, and attempt to find cases within the
# class declaration that don't meet the C++ style
# guidelines. Tracking is very dependent on the code matching Google
# style guidelines, but it seems to perform well enough in testing
# to be a worthwhile addition to the checks.
classinfo_stack = class_state.classinfo_stack
# Look for a class declaration
class_decl_match = match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?(class|struct)\s+(\w+(::\w+)*)', line)
if class_decl_match:
classinfo_stack.append(_ClassInfo(class_decl_match.group(3), line_number))
# Everything else in this function uses the top of the stack if it's
# not empty.
if not classinfo_stack:
return
classinfo = classinfo_stack[-1]
# If the opening brace hasn't been seen look for it and also
# parent class declarations.
if not classinfo.seen_open_brace:
# If the line has a ';' in it, assume it's a forward declaration or
# a single-line class declaration, which we won't process.
if line.find(';') != -1:
classinfo_stack.pop()
return
classinfo.seen_open_brace = (line.find('{') != -1)
# Look for a bare ':'
if search('(^|[^:]):($|[^:])', line):
classinfo.is_derived = True
if not classinfo.seen_open_brace:
return # Everything else in this function is for after open brace
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = match(r'(?<!explicit)\s+%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args
and args.group(1) != 'void'
and not match(r'(const\s+)?%s\s*&' % re.escape(base_classname),
args.group(1).strip())):
error(line_number, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
# Look for methods declared virtual.
if search(r'\bvirtual\b', line):
classinfo.virtual_method_line_number = line_number
# Only look for a destructor declaration on the same line. It would
# be extremely unlikely for the destructor declaration to occupy
# more than one line.
if search(r'~%s\s*\(' % base_classname, line):
classinfo.has_virtual_destructor = True
# Look for class end.
brace_depth = classinfo.brace_depth
brace_depth = brace_depth + line.count('{') - line.count('}')
if brace_depth <= 0:
classinfo = classinfo_stack.pop()
# Try to detect missing virtual destructor declarations.
# For now, only warn if a non-derived class with virtual methods lacks
# a virtual destructor. This is to make it less likely that people will
# declare derived virtual destructors without declaring the base
# destructor virtual.
if ((classinfo.virtual_method_line_number is not None)
and (not classinfo.has_virtual_destructor)
and (not classinfo.is_derived)): # Only warn for base classes
error(classinfo.line_number, 'runtime/virtual', 4,
'The class %s probably needs a virtual destructor due to '
'having virtual method(s), one declared at line %d.'
% (classinfo.name, classinfo.virtual_method_line_number))
else:
classinfo.brace_depth = brace_depth
def check_spacing_for_function_call(line, line_number, error):
"""Checks for the correctness of various spacing around function calls.
Args:
line: The text of the line to check.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/foreach/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
function_call = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bforeach\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
matched = search(pattern, line)
if matched:
function_call = matched.group(1) # look inside the parens for function calls
break
# Except in if/for/foreach/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not search(r'\b(if|for|foreach|while|switch|return|new|delete)\b', function_call)
# Ignore pointers/references to functions.
and not search(r' \([^)]+\)\([^)]*(\)|,$)', function_call)
# Ignore pointers/references to arrays.
and not search(r' \([^)]+\)\[[^\]]+\]', function_call)):
if search(r'\w\s*\([ \t](?!\s*\\$)', function_call): # a ( used for a fn call
error(line_number, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif search(r'\([ \t]+(?!(\s*\\)|\()', function_call):
error(line_number, 'whitespace/parens', 2,
'Extra space after (')
if (search(r'\w\s+\(', function_call)
and not match(r'\s*(#|typedef)', function_call)):
error(line_number, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if search(r'[^)\s]\s+\)(?!\s*$|{\s*$)', function_call):
error(line_number, 'whitespace/parens', 2,
'Extra space before )')
def is_blank_line(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def detect_functions(clean_lines, line_number, function_state, error):
"""Finds where functions start and end.
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
# Are we now past the end of a function?
if function_state.end_position.row + 1 == line_number:
function_state.end()
# If we're in a function, don't try to detect a new one.
if function_state.in_a_function:
return
lines = clean_lines.lines
line = lines[line_number]
raw = clean_lines.raw_lines
raw_line = raw[line_number]
# Lines ending with a \ indicate a macro. Don't try to check them.
if raw_line.endswith('\\'):
return
regexp = r'\s*(\w(\w|::|\*|\&|\s|<|>|,|~|(operator\s*(/|-|=|!|\+)+))*)\(' # decls * & space::name( ...
match_result = match(regexp, line)
if not match_result:
return
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name != 'TEST' and function_name != 'TEST_F' and match(r'[A-Z_]+$', function_name):
return
joined_line = ''
for start_line_number in xrange(line_number, clean_lines.num_lines()):
start_line = clean_lines.elided[start_line_number]
joined_line += ' ' + start_line.lstrip()
body_match = search(r'{|;', start_line)
if body_match:
body_start_position = Position(start_line_number, body_match.start(0))
# Replace template constructs with _ so that no spaces remain in the function name,
# while keeping the column numbers of other characters the same as "line".
line_with_no_templates = iteratively_replace_matches_with_char(r'<[^<>]*>', '_', line)
match_function = search(r'((\w|:|<|>|,|~|(operator\s*(/|-|=|!|\+)+))*)\(', line_with_no_templates)
if not match_function:
return # The '(' must have been inside of a template.
# Use the column numbers from the modified line to find the
# function name in the original line.
function = line[match_function.start(1):match_function.end(1)]
function_name_start_position = Position(line_number, match_function.start(1))
if match(r'TEST', function): # Handle TEST... macros
parameter_regexp = search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
parameter_start_position = Position(line_number, match_function.end(1))
parameter_end_position = close_expression(clean_lines.elided, parameter_start_position)
if parameter_end_position.row == len(clean_lines.elided):
# No end was found.
return
if start_line[body_start_position.column] == ';':
end_position = Position(body_start_position.row, body_start_position.column + 1)
else:
end_position = close_expression(clean_lines.elided, body_start_position)
# Check for nonsensical positions. (This happens in test cases which check code snippets.)
if parameter_end_position > body_start_position:
return
function_state.begin(function, function_name_start_position, body_start_position, end_position,
parameter_start_position, parameter_end_position, clean_lines)
return
# No body for the function (or evidence of a non-function) was found.
error(line_number, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
def check_for_function_lengths(clean_lines, line_number, function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and commments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[line_number]
raw = clean_lines.raw_lines
raw_line = raw[line_number]
if function_state.end_position.row == line_number: # last line
if not search(r'\bNOLINT\b', raw_line):
function_state.check(error, line_number)
elif not match(r'^\s*$', line):
function_state.count(line_number) # Count non-blank/non-comment lines.
def _check_parameter_name_against_text(parameter, text, error):
"""Checks to see if the parameter name is contained within the text.
Return false if the check failed (i.e. an error was produced).
"""
# Treat 'lower with underscores' as a canonical form because it is
# case insensitive while still retaining word breaks. (This ensures that
# 'elate' doesn't look like it is duplicating of 'NateLate'.)
canonical_parameter_name = parameter.lower_with_underscores_name()
# Appends "object" to all text to catch variables that did the same (but only
# do this when the parameter name is more than a single character to avoid
# flagging 'b' which may be an ok variable when used in an rgba function).
if len(canonical_parameter_name) > 1:
text = sub(r'(\w)\b', r'\1Object', text)
canonical_text = _convert_to_lower_with_underscores(text)
# Used to detect cases like ec for ExceptionCode.
acronym = _create_acronym(text).lower()
if canonical_text.find(canonical_parameter_name) != -1 or acronym.find(canonical_parameter_name) != -1:
error(parameter.row, 'readability/parameter_name', 5,
'The parameter name "%s" adds no information, so it should be removed.' % parameter.name)
return False
return True
def check_function_definition_and_pass_ptr(type_text, row, location_description, error):
"""Check that function definitions for use Pass*Ptr instead of *Ptr.
Args:
type_text: A string containing the type. (For return values, it may contain more than the type.)
row: The row number of the type.
location_description: Used to indicate where the type is. This is either 'parameter' or 'return'.
error: The function to call with any errors found.
"""
match_ref_or_own_ptr = '(?=\W|^)(Ref|Own)Ptr(?=\W)'
bad_type_usage = search(match_ref_or_own_ptr, type_text)
if not bad_type_usage or type_text.endswith('&') or type_text.endswith('*'):
return
type_name = bad_type_usage.group(0)
error(row, 'readability/pass_ptr', 5,
'The %s type should use Pass%s instead of %s.' % (location_description, type_name, type_name))
def check_function_definition(filename, file_extension, clean_lines, line_number, function_state, error):
"""Check that function definitions for style issues.
Specifically, check that parameter names in declarations add information.
Args:
filename: Filename of the file that is being processed.
file_extension: The current file extension, without the leading dot.
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
if line_number != function_state.body_start_position.row:
return
modifiers_and_return_type = function_state.modifiers_and_return_type()
check_function_definition_and_pass_ptr(modifiers_and_return_type, function_state.function_name_start_position.row, 'return', error)
parameter_list = function_state.parameter_list()
for parameter in parameter_list:
check_function_definition_and_pass_ptr(parameter.type, parameter.row, 'parameter', error)
# Do checks specific to function declarations and parameter names.
if not function_state.is_declaration or not parameter.name:
continue
# Check the parameter name against the function name for single parameter set functions.
if len(parameter_list) == 1 and match('set[A-Z]', function_state.current_function):
trimmed_function_name = function_state.current_function[len('set'):]
if not _check_parameter_name_against_text(parameter, trimmed_function_name, error):
continue # Since an error was noted for this name, move to the next parameter.
# Check the parameter name against the type.
if not _check_parameter_name_against_text(parameter, parameter.type, error):
continue # Since an error was noted for this name, move to the next parameter.
def check_pass_ptr_usage(clean_lines, line_number, function_state, error):
"""Check for proper usage of Pass*Ptr.
Currently this is limited to detecting declarations of Pass*Ptr
variables inside of functions.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
if not function_state.in_a_function:
return
lines = clean_lines.lines
line = lines[line_number]
if line_number > function_state.body_start_position.row:
matched_pass_ptr = match(r'^\s*Pass([A-Z][A-Za-z]*)Ptr<', line)
if matched_pass_ptr:
type_name = 'Pass%sPtr' % matched_pass_ptr.group(1)
error(line_number, 'readability/pass_ptr', 5,
'Local variables should never be %s (see '
'http://webkit.org/coding/RefPtr.html).' % type_name)
def check_for_leaky_patterns(clean_lines, line_number, function_state, error):
"""Check for constructs known to be leak prone.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[line_number]
matched_get_dc = search(r'\b(?P<function_name>GetDC(Ex)?)\s*\(', line)
if matched_get_dc:
error(line_number, 'runtime/leaky_pattern', 5,
'Use the class HWndDC instead of calling %s to avoid potential '
'memory leaks.' % matched_get_dc.group('function_name'))
matched_create_dc = search(r'\b(?P<function_name>Create(Compatible)?DC)\s*\(', line)
matched_own_dc = search(r'\badoptPtr\b', line)
if matched_create_dc and not matched_own_dc:
error(line_number, 'runtime/leaky_pattern', 5,
'Use adoptPtr and OwnPtr<HDC> when calling %s to avoid potential '
'memory leaks.' % matched_create_dc.group('function_name'))
def check_spacing(file_extension, clean_lines, line_number, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't have too many
blank lines in a row.
Args:
file_extension: The current file extension, without the leading dot.
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.raw_lines
line = raw[line_number]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}').
if is_blank_line(line):
elided = clean_lines.elided
previous_line = elided[line_number - 1]
previous_brace = previous_line.rfind('{')
# FIXME: Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if (previous_brace != -1 and previous_line[previous_brace:].find('}') == -1
and previous_line[:previous_brace].find('namespace') == -1):
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if match(r' {6}\w', previous_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = line_number - 2
while (search_position >= 0
and match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
previous_line)
or match(r' {4}:', previous_line))
if not exception:
error(line_number, 'whitespace/blank_line', 2,
'Blank line at the start of a code block. Is this needed?')
# This doesn't ignore whitespace at the end of a namespace block
# because that is too hard without pairing open/close braces;
# however, a special exception is made for namespace closing
# brackets which have a comment containing "namespace".
#
# Also, ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if line_number + 1 < clean_lines.num_lines():
next_line = raw[line_number + 1]
if (next_line
and match(r'\s*}', next_line)
and next_line.find('namespace') == -1
and next_line.find('} else ') == -1):
error(line_number, 'whitespace/blank_line', 3,
'Blank line at the end of a code block. Is this needed?')
# Next, we check for proper spacing with respect to comments.
comment_position = line.find('//')
if comment_position != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity
if (line.count('"', 0, comment_position) - line.count('\\"', 0, comment_position)) % 2 == 0: # not in quotes
# Allow one space before end of line comment.
if (not match(r'^\s*$', line[:comment_position])
and (comment_position >= 1
and ((line[comment_position - 1] not in string.whitespace)
or (comment_position >= 2
and line[comment_position - 2] in string.whitespace)))):
error(line_number, 'whitespace/comments', 5,
'One space before end of line comments')
# There should always be a space between the // and the comment
commentend = comment_position + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or they begin with multiple slashes followed by a space:
# //////// Header comment
matched = (search(r'[=/-]{4,}\s*$', line[commentend:])
or search(r'^/+ ', line[commentend:]))
if not matched:
error(line_number, 'whitespace/comments', 4,
'Should have a space between // and comment')
# There should only be one space after punctuation in a comment.
if search(r'[.!?,;:]\s\s+\w', line[comment_position:]):
error(line_number, 'whitespace/comments', 5,
'Should have only a single space after a punctuation in a comment.')
line = clean_lines.elided[line_number] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = sub(r'operator(==|!=|<|<<|<=|>=|>>|>|\+=|-=|\*=|/=|%=|&=|\|=|^=|<<=|>>=|/)\(', 'operator\(', line)
# Don't try to do spacing checks for #include or #import statements at
# minimum because it messes up checks for spacing around /
if match(r'\s*#\s*(?:include|import)', line):
return
if search(r'[\w.]=[\w.]', line):
error(line_number, 'whitespace/operators', 4,
'Missing spaces around =')
# FIXME: It's not ok to have spaces around binary operators like .
# You should always have whitespace around binary operators.
# Alas, we can't test < or > because they're legitimately used sans spaces
# (a->b, vector<int> a). The only time we can tell is a < with no >, and
# only if it's not template params list spilling into the next line.
matched = search(r'[^<>=!\s](==|!=|\+=|-=|\*=|/=|/|\|=|&=|<<=|>>=|<=|>=|\|\||\||&&|>>|<<)[^<>=!\s]', line)
if not matched:
# Note that while it seems that the '<[^<]*' term in the following
# regexp could be simplified to '<.*', which would indeed match
# the same class of strings, the [^<] means that searching for the
# regexp takes linear rather than quadratic time.
if not search(r'<[^<]*,\s*$', line): # template params spill
matched = search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line)
if matched:
error(line_number, 'whitespace/operators', 3,
'Missing spaces around %s' % matched.group(1))
# There shouldn't be space around unary operators
matched = search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if matched:
error(line_number, 'whitespace/operators', 4,
'Extra space for operator %s' % matched.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
matched = search(r' (if\(|for\(|foreach\(|while\(|switch\()', line)
if matched:
error(line_number, 'whitespace/parens', 5,
'Missing space before ( in %s' % matched.group(1))
# For if/for/foreach/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
matched = search(r'\b(?P<statement>if|for|foreach|while|switch)\s*\((?P<remainder>.*)$', line)
if matched:
statement = matched.group('statement')
condition, rest = up_to_unmatched_closing_paren(matched.group('remainder'))
if condition is not None:
condition_match = search(r'(?P<leading>[ ]*)(?P<separator>.).*[^ ]+(?P<trailing>[ ]*)', condition)
if condition_match:
n_leading = len(condition_match.group('leading'))
n_trailing = len(condition_match.group('trailing'))
if n_leading != 0:
for_exception = statement == 'for' and condition.startswith(' ;')
if not for_exception:
error(line_number, 'whitespace/parens', 5,
'Extra space after ( in %s' % statement)
if n_trailing != 0:
for_exception = statement == 'for' and condition.endswith('; ')
if not for_exception:
error(line_number, 'whitespace/parens', 5,
'Extra space before ) in %s' % statement)
# Do not check for more than one command in macros
in_preprocessor_directive = match(r'\s*#', line)
if not in_preprocessor_directive and not match(r'((\s*{\s*}?)|(\s*;?))\s*\\?$', rest):
error(line_number, 'whitespace/parens', 4,
'More than one command on the same line in %s' % statement)
# You should always have a space after a comma (either as fn arg or operator)
if search(r',[^\s]', line):
error(line_number, 'whitespace/comma', 3,
'Missing space after ,')
matched = search(r'^\s*(?P<token1>[a-zA-Z0-9_\*&]+)\s\s+(?P<token2>[a-zA-Z0-9_\*&]+)', line)
if matched:
error(line_number, 'whitespace/declaration', 3,
'Extra space between %s and %s' % (matched.group('token1'), matched.group('token2')))
if file_extension == 'cpp':
# C++ should have the & or * beside the type not the variable name.
matched = match(r'\s*\w+(?<!\breturn|\bdelete)\s+(?P<pointer_operator>\*|\&)\w+', line)
if matched:
error(line_number, 'whitespace/declaration', 3,
'Declaration has space between type name and %s in %s' % (matched.group('pointer_operator'), matched.group(0).strip()))
elif file_extension == 'c':
# C Pointer declaration should have the * beside the variable not the type name.
matched = search(r'^\s*\w+\*\s+\w+', line)
if matched:
error(line_number, 'whitespace/declaration', 3,
'Declaration has space between * and variable name in %s' % matched.group(0).strip())
# Next we will look for issues with function calls.
check_spacing_for_function_call(line, line_number, error)
# Except after an opening paren, you should have spaces before your braces.
# And since you should never have braces at the beginning of a line, this is
# an easy test.
if search(r'[^ ({]{', line):
error(line_number, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if search(r'}else', line):
error(line_number, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if search(r'\w\s+\[', line) and not search(r'delete\s+\[', line):
error(line_number, 'whitespace/braces', 5,
'Extra space before [')
# There should always be a single space in between braces on the same line.
if search(r'\{\}', line):
error(line_number, 'whitespace/braces', 5, 'Missing space inside { }.')
if search(r'\{\s\s+\}', line):
error(line_number, 'whitespace/braces', 5, 'Too many spaces inside { }.')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if search(r':\s*;\s*$', line):
error(line_number, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use { } instead.')
elif search(r'^\s*;\s*$', line):
error(line_number, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use { } instead.')
elif (search(r'\s+;\s*$', line) and not search(r'\bfor\b', line)):
error(line_number, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use { } instead.')
elif (search(r'\b(for|while)\s*\(.*\)\s*;\s*$', line)
and line.count('(') == line.count(')')
# Allow do {} while();
and not search(r'}\s*while', line)):
error(line_number, 'whitespace/semicolon', 5,
'Semicolon defining empty statement for this loop. Use { } instead.')
def get_previous_non_blank_line(clean_lines, line_number):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
line_number: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
previous_line_number = line_number - 1
while previous_line_number >= 0:
previous_line = clean_lines.elided[previous_line_number]
if not is_blank_line(previous_line): # if not a blank line...
return (previous_line, previous_line_number)
previous_line_number -= 1
return ('', -1)
def check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error):
"""Looks for indentation errors inside of namespaces.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_extension: The extension (dot not included) of the file.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
namespace_match = match(r'(?P<namespace_indentation>\s*)namespace\s+\S+\s*{\s*$', line)
if not namespace_match:
return
current_indentation_level = len(namespace_match.group('namespace_indentation'))
if current_indentation_level > 0:
# Don't warn about an indented namespace if we already warned about indented code.
if not file_state.did_inside_namespace_indent_warning():
error(line_number, 'whitespace/indent', 4,
'namespace should never be indented.')
return
looking_for_semicolon = False;
line_offset = 0
in_preprocessor_directive = False;
for current_line in clean_lines.elided[line_number + 1:]:
line_offset += 1
if not current_line.strip():
continue
if not current_indentation_level:
if not (in_preprocessor_directive or looking_for_semicolon):
if not match(r'\S', current_line) and not file_state.did_inside_namespace_indent_warning():
file_state.set_did_inside_namespace_indent_warning()
error(line_number + line_offset, 'whitespace/indent', 4,
'Code inside a namespace should not be indented.')
if in_preprocessor_directive or (current_line.strip()[0] == '#'): # This takes care of preprocessor directive syntax.
in_preprocessor_directive = current_line[-1] == '\\'
else:
looking_for_semicolon = ((current_line.find(';') == -1) and (current_line.strip()[-1] != '}')) or (current_line[-1] == '\\')
else:
looking_for_semicolon = False; # If we have a brace we may not need a semicolon.
current_indentation_level += current_line.count('{') - current_line.count('}')
if current_indentation_level < 0:
break;
def check_enum_casing(clean_lines, line_number, enum_state, error):
"""Looks for incorrectly named enum values.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
enum_state: A _EnumState instance which maintains enum declaration state.
error: The function to call with any errors found.
"""
enum_state.is_webidl_enum |= bool(match(r'\s*// Web(?:Kit)?IDL enum\s*$', clean_lines.raw_lines[line_number]))
line = clean_lines.elided[line_number] # Get rid of comments and strings.
if not enum_state.process_clean_line(line):
error(line_number, 'readability/enum_casing', 4,
'enum members should use InterCaps with an initial capital letter.')
def check_directive_indentation(clean_lines, line_number, file_state, error):
"""Looks for indentation of preprocessor directives.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
indented_preprocessor_directives = match(r'\s+#', line)
if not indented_preprocessor_directives:
return
error(line_number, 'whitespace/indent', 4, 'preprocessor directives (e.g., #ifdef, #define, #import) should never be indented.')
def get_initial_spaces_for_line(clean_line):
initial_spaces = 0
while initial_spaces < len(clean_line) and clean_line[initial_spaces] == ' ':
initial_spaces += 1
return initial_spaces
def check_indentation_amount(clean_lines, line_number, error):
line = clean_lines.elided[line_number]
initial_spaces = get_initial_spaces_for_line(line)
if initial_spaces % 4:
error(line_number, 'whitespace/indent', 3,
'Weird number of spaces at line-start. Are you using a 4-space indent?')
return
previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
if not previous_line.strip() or match(r'\s*\w+\s*:\s*$', previous_line) or previous_line[0] == '#':
return
previous_line_initial_spaces = get_initial_spaces_for_line(previous_line)
if initial_spaces > previous_line_initial_spaces + 4:
error(line_number, 'whitespace/indent', 3, 'When wrapping a line, only indent 4 spaces.')
def check_using_std(clean_lines, line_number, file_state, error):
"""Looks for 'using std::foo;' statements which should be replaced with 'using namespace std;'.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
# This check doesn't apply to C or Objective-C implementation files.
if file_state.is_c_or_objective_c():
return
line = clean_lines.elided[line_number] # Get rid of comments and strings.
using_std_match = match(r'\s*using\s+std::(?P<method_name>\S+)\s*;\s*$', line)
if not using_std_match:
return
method_name = using_std_match.group('method_name')
error(line_number, 'build/using_std', 4,
"Use 'using namespace std;' instead of 'using std::%s;'." % method_name)
def check_using_namespace(clean_lines, line_number, file_extension, error):
"""Looks for 'using namespace foo;' which should be removed.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_extension: The extension (dot not included) of the file.
error: The function to call with any errors found.
"""
# This check applies only to headers.
if file_extension != 'h':
return
line = clean_lines.elided[line_number] # Get rid of comments and strings.
using_namespace_match = match(r'\s*using\s+namespace\s+(?P<method_name>\S+)\s*;\s*$', line)
if not using_namespace_match:
return
method_name = using_namespace_match.group('method_name')
error(line_number, 'build/using_namespace', 4,
"Do not use 'using namespace %s;'." % method_name)
def check_max_min_macros(clean_lines, line_number, file_state, error):
"""Looks use of MAX() and MIN() macros that should be replaced with std::max() and std::min().
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
# This check doesn't apply to C or Objective-C implementation files.
if file_state.is_c_or_objective_c():
return
line = clean_lines.elided[line_number] # Get rid of comments and strings.
max_min_macros_search = search(r'\b(?P<max_min_macro>(MAX|MIN))\s*\(', line)
if not max_min_macros_search:
return
max_min_macro = max_min_macros_search.group('max_min_macro')
max_min_macro_lower = max_min_macro.lower()
error(line_number, 'runtime/max_min_macros', 4,
'Use std::%s() or std::%s<type>() instead of the %s() macro.'
% (max_min_macro_lower, max_min_macro_lower, max_min_macro))
def check_ctype_functions(clean_lines, line_number, file_state, error):
"""Looks for use of the standard functions in ctype.h and suggest they be replaced
by use of equivilent ones in <wtf/ASCIICType.h>?.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
ctype_function_search = search(r'\b(?P<ctype_function>(isalnum|isalpha|isascii|isblank|iscntrl|isdigit|isgraph|islower|isprint|ispunct|isspace|isupper|isxdigit|toascii|tolower|toupper))\s*\(', line)
if not ctype_function_search:
return
ctype_function = ctype_function_search.group('ctype_function')
error(line_number, 'runtime/ctype_function', 4,
'Use equivelent function in <wtf/ASCIICType.h> instead of the %s() function.'
% (ctype_function))
def check_switch_indentation(clean_lines, line_number, error):
"""Looks for indentation errors inside of switch statements.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
switch_match = match(r'(?P<switch_indentation>\s*)switch\s*\(.+\)\s*{\s*$', line)
if not switch_match:
return
switch_indentation = switch_match.group('switch_indentation')
inner_indentation = switch_indentation + ' ' * 4
line_offset = 0
encountered_nested_switch = False
for current_line in clean_lines.elided[line_number + 1:]:
line_offset += 1
# Skip not only empty lines but also those with preprocessor directives.
if current_line.strip() == '' or current_line.startswith('#'):
continue
if match(r'\s*switch\s*\(.+\)\s*{\s*$', current_line):
# Complexity alarm - another switch statement nested inside the one
# that we're currently testing. We'll need to track the extent of
# that inner switch if the upcoming label tests are still supposed
# to work correctly. Let's not do that; instead, we'll finish
# checking this line, and then leave it like that. Assuming the
# indentation is done consistently (even if incorrectly), this will
# still catch all indentation issues in practice.
encountered_nested_switch = True
current_indentation_match = match(r'(?P<indentation>\s*)(?P<remaining_line>.*)$', current_line);
current_indentation = current_indentation_match.group('indentation')
remaining_line = current_indentation_match.group('remaining_line')
# End the check at the end of the switch statement.
if remaining_line.startswith('}') and current_indentation == switch_indentation:
break
# Case and default branches should not be indented. The regexp also
# catches single-line cases like "default: break;" but does not trigger
# on stuff like "Document::Foo();".
elif match(r'(default|case\s+.*)\s*:([^:].*)?$', remaining_line):
if current_indentation != switch_indentation:
error(line_number + line_offset, 'whitespace/indent', 4,
'A case label should not be indented, but line up with its switch statement.')
# Don't throw an error for multiple badly indented labels,
# one should be enough to figure out the problem.
break
# We ignore goto labels at the very beginning of a line.
elif match(r'\w+\s*:\s*$', remaining_line):
continue
# It's not a goto label, so check if it's indented at least as far as
# the switch statement plus one more level of indentation.
elif not current_indentation.startswith(inner_indentation):
error(line_number + line_offset, 'whitespace/indent', 4,
'Non-label code inside switch statements should be indented.')
# Don't throw an error for multiple badly indented statements,
# one should be enough to figure out the problem.
break
if encountered_nested_switch:
break
def check_braces(clean_lines, line_number, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
if match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone
# is using braces for function definition or in a block to
# explicitly create a new scope, which is commonly used to control
# the lifetime of stack-allocated variables. We don't detect this
# perfectly: we just don't complain if the last non-whitespace
# character on the previous non-blank line is ';', ':', '{', '}',
# ')', or ') const' and doesn't begin with 'if|for|while|switch|else'.
# We also allow '#' for #endif and '=' for array initialization.
previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
if ((not search(r'[;:}{)=]\s*$|\)\s*((const|OVERRIDE)\s*)*\s*$', previous_line)
or search(r'\b(if|for|foreach|while|switch|else)\b', previous_line))
and previous_line.find('#') < 0):
error(line_number, 'whitespace/braces', 4,
'This { should be at the end of the previous line')
elif (search(r'\)\s*(((const|OVERRIDE)\s*)*\s*)?{\s*$', line)
and line.count('(') == line.count(')')
and not search(r'\b(if|for|foreach|while|switch)\b', line)
and not match(r'\s+[A-Z_][A-Z_0-9]+\b', line)):
error(line_number, 'whitespace/braces', 4,
'Place brace on its own line for function definitions.')
if (match(r'\s*}\s*(else\s*({\s*)?)?$', line) and line_number > 1):
# We check if a closed brace has started a line to see if a
# one line control statement was previous.
previous_line = clean_lines.elided[line_number - 2]
last_open_brace = previous_line.rfind('{')
if (last_open_brace != -1 and previous_line.find('}', last_open_brace) == -1
and search(r'\b(if|for|foreach|while|else)\b', previous_line)):
error(line_number, 'whitespace/braces', 4,
'One line control clauses should not use braces.')
# An else clause should be on the same line as the preceding closing brace.
if match(r'\s*else\s*', line):
previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
if match(r'\s*}\s*$', previous_line):
error(line_number, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# Likewise, an else should never have the else clause on the same line
if search(r'\belse [^\s{]', line) and not search(r'\belse if\b', line):
error(line_number, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if match(r'\s*do [^\s{]', line):
error(line_number, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Braces shouldn't be followed by a ; unless they're defining a struct
# or initializing an array.
# We can't tell in general, but we can for some common cases.
previous_line_number = line_number
while True:
(previous_line, previous_line_number) = get_previous_non_blank_line(clean_lines, previous_line_number)
if match(r'\s+{.*}\s*;', line) and not previous_line.count(';'):
line = previous_line + line
else:
break
if (search(r'{.*}\s*;', line)
and line.count('{') == line.count('}')
and not search(r'struct|class|enum|\s*=\s*{', line)):
error(line_number, 'readability/braces', 4,
"You don't need a ; after a }")
def check_exit_statement_simplifications(clean_lines, line_number, error):
"""Looks for else or else-if statements that should be written as an
if statement when the prior if concludes with a return, break, continue or
goto statement.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
else_match = match(r'(?P<else_indentation>\s*)(\}\s*)?else(\s+if\s*\(|(?P<else>\s*(\{\s*)?\Z))', line)
if not else_match:
return
else_indentation = else_match.group('else_indentation')
inner_indentation = else_indentation + ' ' * 4
previous_lines = clean_lines.elided[:line_number]
previous_lines.reverse()
line_offset = 0
encountered_exit_statement = False
for current_line in previous_lines:
line_offset -= 1
# Skip not only empty lines but also those with preprocessor directives
# and goto labels.
if current_line.strip() == '' or current_line.startswith('#') or match(r'\w+\s*:\s*$', current_line):
continue
# Skip lines with closing braces on the original indentation level.
# Even though the styleguide says they should be on the same line as
# the "else if" statement, we also want to check for instances where
# the current code does not comply with the coding style. Thus, ignore
# these lines and proceed to the line before that.
if current_line == else_indentation + '}':
continue
current_indentation_match = match(r'(?P<indentation>\s*)(?P<remaining_line>.*)$', current_line);
current_indentation = current_indentation_match.group('indentation')
remaining_line = current_indentation_match.group('remaining_line')
# As we're going up the lines, the first real statement to encounter
# has to be an exit statement (return, break, continue or goto) -
# otherwise, this check doesn't apply.
if not encountered_exit_statement:
# We only want to find exit statements if they are on exactly
# the same level of indentation as expected from the code inside
# the block. If the indentation doesn't strictly match then we
# might have a nested if or something, which must be ignored.
if current_indentation != inner_indentation:
break
if match(r'(return(\W+.*)|(break|continue)\s*;|goto\s*\w+;)$', remaining_line):
encountered_exit_statement = True
continue
break
# When code execution reaches this point, we've found an exit statement
# as last statement of the previous block. Now we only need to make
# sure that the block belongs to an "if", then we can throw an error.
# Skip lines with opening braces on the original indentation level,
# similar to the closing braces check above. ("if (condition)\n{")
if current_line == else_indentation + '{':
continue
# Skip everything that's further indented than our "else" or "else if".
if current_indentation.startswith(else_indentation) and current_indentation != else_indentation:
continue
# So we've got a line with same (or less) indentation. Is it an "if"?
# If yes: throw an error. If no: don't throw an error.
# Whatever the outcome, this is the end of our loop.
if match(r'if\s*\(', remaining_line):
if else_match.start('else') != -1:
error(line_number + line_offset, 'readability/control_flow', 4,
'An else statement can be removed when the prior "if" '
'concludes with a return, break, continue or goto statement.')
else:
error(line_number + line_offset, 'readability/control_flow', 4,
'An else if statement should be written as an if statement '
'when the prior "if" concludes with a return, break, '
'continue or goto statement.')
break
def replaceable_check(operator, macro, line):
"""Determine whether a basic CHECK can be replaced with a more specific one.
For example suggest using CHECK_EQ instead of CHECK(a == b) and
similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
Args:
operator: The C++ operator used in the CHECK.
macro: The CHECK or EXPECT macro being called.
line: The current source line.
Returns:
True if the CHECK can be replaced with a more specific one.
"""
# This matches decimal and hex integers, strings, and chars (in that order).
match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
# Expression to match two sides of the operator with something that
# looks like a literal, since CHECK(x == iterator) won't compile.
# This means we can't catch all the cases where a more specific
# CHECK is possible, but it's less annoying than dealing with
# extraneous warnings.
match_this = (r'\s*' + macro + r'\((\s*' +
match_constant + r'\s*' + operator + r'[^<>].*|'
r'.*[^<>]' + operator + r'\s*' + match_constant +
r'\s*\))')
# Don't complain about CHECK(x == NULL) or similar because
# CHECK_EQ(x, NULL) won't compile (requires a cast).
# Also, don't complain about more complex boolean expressions
# involving && or || such as CHECK(a == b || c == d).
return match(match_this, line) and not search(r'NULL|&&|\|\|', line)
def check_check(clean_lines, line_number, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
raw_lines = clean_lines.raw_lines
current_macro = ''
for macro in _CHECK_MACROS:
if raw_lines[line_number].find(macro) >= 0:
current_macro = macro
break
if not current_macro:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
line = clean_lines.elided[line_number] # get rid of comments and strings
# Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
for operator in ['==', '!=', '>=', '>', '<=', '<']:
if replaceable_check(operator, current_macro, line):
error(line_number, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[current_macro][operator],
current_macro, operator))
break
def check_for_comparisons_to_zero(clean_lines, line_number, error):
# Get the line without comments and strings.
line = clean_lines.elided[line_number]
# Include NULL here so that users don't have to convert NULL to 0 first and then get this error.
if search(r'[=!]=\s*(NULL|0|true|false)[^\w.]', line) or search(r'[^\w.](NULL|0|true|false)\s*[=!]=', line):
if not search('LIKELY', line) and not search('UNLIKELY', line):
error(line_number, 'readability/comparison_to_zero', 5,
'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.')
def check_for_null(clean_lines, line_number, file_state, error):
# This check doesn't apply to C or Objective-C implementation files.
if file_state.is_c_or_objective_c():
return
line = clean_lines.elided[line_number]
# Don't warn about NULL usage in g_*(). See Bug 32858 and 39372.
if search(r'\bg(_[a-z]+)+\b', line):
return
# Don't warn about NULL usage in gst_*(). See Bug 70498.
if search(r'\bgst(_[a-z]+)+\b', line):
return
# Don't warn about NULL usage in gdk_pixbuf_save_to_*{join,concat}(). See Bug 43090.
if search(r'\bgdk_pixbuf_save_to\w+\b', line):
return
# Don't warn about NULL usage in gtk_widget_style_get(), gtk_style_context_get_style(), or gtk_style_context_get(). See Bug 51758
if search(r'\bgtk_widget_style_get\(\w+\b', line) or search(r'\bgtk_style_context_get_style\(\w+\b', line) or search(r'\bgtk_style_context_get\(\w+\b', line):
return
# Don't warn about NULL usage in soup_server_new(). See Bug 77890.
if search(r'\bsoup_server_new\(\w+\b', line):
return
if search(r'\bNULL\b', line):
error(line_number, 'readability/null', 5, 'Use 0 instead of NULL.')
return
line = clean_lines.raw_lines[line_number]
# See if NULL occurs in any comments in the line. If the search for NULL using the raw line
# matches, then do the check with strings collapsed to avoid giving errors for
# NULLs occurring in strings.
if search(r'\bNULL\b', line) and search(r'\bNULL\b', CleansedLines.collapse_strings(line)):
error(line_number, 'readability/null', 4, 'Use 0 or null instead of NULL (even in *comments*).')
def get_line_width(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for c in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(c) in ('W', 'F'):
width += 2
elif not unicodedata.combining(c):
width += 1
return width
return len(line)
def check_style(clean_lines, line_number, file_extension, class_state, file_state, enum_state, error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 4-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
file_state: A _FileState instance which maintains information about
the state of things in the file.
enum_state: A _EnumState instance which maintains the current enum state.
error: The function to call with any errors found.
"""
raw_lines = clean_lines.raw_lines
line = raw_lines[line_number]
if line.find('\t') != -1:
error(line_number, 'whitespace/tab', 1,
'Tab found; better to use spaces')
cleansed_line = clean_lines.elided[line_number]
if line and line[-1].isspace():
error(line_number, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
if (cleansed_line.count(';') > 1
# for loops are allowed two ;'s (and may run over two lines).
and cleansed_line.find('for') == -1
and (get_previous_non_blank_line(clean_lines, line_number)[0].find('for') == -1
or get_previous_non_blank_line(clean_lines, line_number)[0].find(';') != -1)
# It's ok to have many commands in a switch case that fits in 1 line
and not ((cleansed_line.find('case ') != -1
or cleansed_line.find('default:') != -1)
and cleansed_line.find('break;') != -1)
# Also it's ok to have many commands in trivial single-line accessors in class definitions.
and not (match(r'.*\(.*\).*{.*.}', line)
and class_state.classinfo_stack
and line.count('{') == line.count('}'))
and not cleansed_line.startswith('#define ')
# It's ok to use use WTF_MAKE_NONCOPYABLE and WTF_MAKE_FAST_ALLOCATED macros in 1 line
and not (cleansed_line.find("WTF_MAKE_NONCOPYABLE") != -1
and cleansed_line.find("WTF_MAKE_FAST_ALLOCATED") != -1)):
error(line_number, 'whitespace/newline', 4,
'More than one command on the same line')
if cleansed_line.strip().endswith('||') or cleansed_line.strip().endswith('&&'):
error(line_number, 'whitespace/operators', 4,
'Boolean expressions that span multiple lines should have their '
'operators on the left side of the line instead of the right side.')
# Some more style checks
check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error)
check_directive_indentation(clean_lines, line_number, file_state, error)
check_using_std(clean_lines, line_number, file_state, error)
check_using_namespace(clean_lines, line_number, file_extension, error)
check_max_min_macros(clean_lines, line_number, file_state, error)
check_ctype_functions(clean_lines, line_number, file_state, error)
check_switch_indentation(clean_lines, line_number, error)
check_braces(clean_lines, line_number, error)
check_exit_statement_simplifications(clean_lines, line_number, error)
check_spacing(file_extension, clean_lines, line_number, error)
check_check(clean_lines, line_number, error)
check_for_comparisons_to_zero(clean_lines, line_number, error)
check_for_null(clean_lines, line_number, file_state, error)
check_indentation_amount(clean_lines, line_number, error)
check_enum_casing(clean_lines, line_number, enum_state, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cpp').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cpp').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cpp').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _drop_common_suffixes(filename):
"""Drops common suffixes like _test.cpp or -inl.h from filename.
For example:
>>> _drop_common_suffixes('foo/foo-inl.h')
'foo/foo'
>>> _drop_common_suffixes('foo/bar/foo.cpp')
'foo/bar/foo'
>>> _drop_common_suffixes('foo/foo_internal.h')
'foo/foo'
>>> _drop_common_suffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cpp', 'regtest.cpp', 'unittest.cpp',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix)
and filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _classify_include(filename, include, is_system, include_state):
"""Figures out what kind of header 'include' is.
Args:
filename: The current file cpp_style is running over.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
include_state: An _IncludeState instance in which the headers are inserted.
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _classify_include('foo.cpp', 'config.h', False)
_CONFIG_HEADER
>>> _classify_include('foo.cpp', 'foo.h', False)
_PRIMARY_HEADER
>>> _classify_include('foo.cpp', 'bar.h', False)
_OTHER_HEADER
"""
# If it is a system header we know it is classified as _OTHER_HEADER.
if is_system and not include.startswith('public/'):
return _OTHER_HEADER
# If the include is named config.h then this is WebCore/config.h.
if include == "config.h":
return _CONFIG_HEADER
# There cannot be primary includes in header files themselves. Only an
# include exactly matches the header filename will be is flagged as
# primary, so that it triggers the "don't include yourself" check.
if filename.endswith('.h') and filename != include:
return _OTHER_HEADER;
# Qt's moc files do not follow the naming and ordering rules, so they should be skipped
if include.startswith('moc_') and include.endswith('.cpp'):
return _MOC_HEADER
if include.endswith('.moc'):
return _MOC_HEADER
# If the target file basename starts with the include we're checking
# then we consider it the primary header.
target_base = FileInfo(filename).base_name()
include_base = FileInfo(include).base_name()
# If we haven't encountered a primary header, then be lenient in checking.
if not include_state.visited_primary_section():
if target_base.find(include_base) != -1:
return _PRIMARY_HEADER
# Qt private APIs use _p.h suffix.
if include_base.find(target_base) != -1 and include_base.endswith('_p'):
return _PRIMARY_HEADER
# If we already encountered a primary header, perform a strict comparison.
# In case the two filename bases are the same then the above lenient check
# probably was a false positive.
elif include_state.visited_primary_section() and target_base == include_base:
if include == "ResourceHandleWin.h":
# FIXME: Thus far, we've only seen one example of these, but if we
# start to see more, please consider generalizing this check
# somehow.
return _OTHER_HEADER
return _PRIMARY_HEADER
return _OTHER_HEADER
def _does_primary_header_exist(filename):
"""Return a primary header file name for a file, or empty string
if the file is not source file or primary header does not exist.
"""
fileinfo = FileInfo(filename)
if not fileinfo.is_source():
return False
primary_header = fileinfo.no_extension() + ".h"
return os.path.isfile(primary_header)
def check_include_line(filename, file_extension, clean_lines, line_number, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
file_extension: The current file extension, without the leading dot.
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
# FIXME: For readability or as a possible optimization, consider
# exiting early here by checking whether the "build/include"
# category should be checked for the given filename. This
# may involve having the error handler classes expose a
# should_check() method, in addition to the usual __call__
# method.
line = clean_lines.lines[line_number]
matched = _RE_PATTERN_INCLUDE.search(line)
if not matched:
return
include = matched.group(2)
is_system = (matched.group(1) == '<')
# Look for any of the stream classes that are part of standard C++.
if match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
error(line_number, 'readability/streams', 3,
'Streams are highly discouraged.')
# Look for specific includes to fix.
if include.startswith('wtf/') and not is_system:
error(line_number, 'build/include', 4,
'wtf includes should be <wtf/file.h> instead of "wtf/file.h".')
duplicate_header = include in include_state
if duplicate_header:
error(line_number, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = line_number
header_type = _classify_include(filename, include, is_system, include_state)
primary_header_exists = _does_primary_header_exist(filename)
include_state.header_types[line_number] = header_type
# Only proceed if this isn't a duplicate header.
if duplicate_header:
return
# We want to ensure that headers appear in the right order:
# 1) for implementation files: config.h, primary header, blank line, alphabetically sorted
# 2) for header files: alphabetically sorted
# The include_state object keeps track of the last type seen
# and complains if the header types are out of order or missing.
error_message = include_state.check_next_include_order(header_type,
file_extension == "h",
primary_header_exists)
# Check to make sure we have a blank line after primary header.
if not error_message and header_type == _PRIMARY_HEADER:
next_line = clean_lines.raw_lines[line_number + 1]
if not is_blank_line(next_line):
error(line_number, 'build/include_order', 4,
'You should add a blank line after implementation file\'s own header.')
# Check to make sure all headers besides config.h and the primary header are
# alphabetically sorted. Skip Qt's moc files.
if not error_message and header_type == _OTHER_HEADER:
previous_line_number = line_number - 1;
previous_line = clean_lines.lines[previous_line_number]
previous_match = _RE_PATTERN_INCLUDE.search(previous_line)
while (not previous_match and previous_line_number > 0
and not search(r'\A(#if|#ifdef|#ifndef|#else|#elif|#endif)', previous_line)):
previous_line_number -= 1;
previous_line = clean_lines.lines[previous_line_number]
previous_match = _RE_PATTERN_INCLUDE.search(previous_line)
if previous_match:
previous_header_type = include_state.header_types[previous_line_number]
if previous_header_type == _OTHER_HEADER and previous_line.strip() > line.strip():
# This type of error is potentially a problem with this line or the previous one,
# so if the error is filtered for one line, report it for the next. This is so that
# we properly handle patches, for which only modified lines produce errors.
if not error(line_number - 1, 'build/include_order', 4, 'Alphabetical sorting problem.'):
error(line_number, 'build/include_order', 4, 'Alphabetical sorting problem.')
if error_message:
if file_extension == 'h':
error(line_number, 'build/include_order', 4,
'%s Should be: alphabetically sorted.' %
error_message)
else:
error(line_number, 'build/include_order', 4,
'%s Should be: config.h, primary header, blank line, and then alphabetically sorted.' %
error_message)
def check_language(filename, clean_lines, line_number, file_extension, include_state,
file_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[line_number]
if not line:
return
matched = _RE_PATTERN_INCLUDE.search(line)
if matched:
check_include_line(filename, file_extension, clean_lines, line_number, include_state, error)
return
# FIXME: figure out if they're using default arguments in fn proto.
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
matched = search(
r'\b(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
if matched:
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts.
if not match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line):
error(line_number, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched.group(1))
check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)',
error)
# This doesn't catch all cases. Consider (const char * const)"hello".
check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
if search(
r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
error(line_number, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
matched = match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
if matched and not match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
matched.group(3)):
error(line_number, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(matched.group(1), matched.group(2)))
# Check that we're not using RTTI outside of testing code.
if search(r'\bdynamic_cast<', line):
error(line_number, 'runtime/rtti', 5,
'Do not use dynamic_cast<>. If you need to cast within a class '
"hierarchy, use static_cast<> to upcast. Google doesn't support "
'RTTI.')
if search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(line_number, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# FIXME: check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in check_for_non_standard_constructs for now)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if search(r'\bshort port\b', line):
if not search(r'\bunsigned short port\b', line):
error(line_number, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
# When snprintf is used, the second argument shouldn't be a literal.
matched = search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if matched:
error(line_number, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (matched.group(1), matched.group(2)))
# Check if some verboten C functions are being used.
if search(r'\bsprintf\b', line):
error(line_number, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
matched = search(r'\b(strcpy|strcat)\b', line)
if matched:
error(line_number, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % matched.group(1))
if search(r'\bsscanf\b', line):
error(line_number, 'runtime/printf', 1,
'sscanf can be ok, but is slow and can overflow buffers.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if search(r'\}\s*if\s*\(', line):
error(line_number, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
matched = re.search(r'\b((?:string)?printf)\s*\(([\w.\->()]+)\)', line, re.I)
if matched:
error(line_number, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (matched.group(1), matched.group(2)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
matched = search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if matched and not match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", matched.group(2)):
error(line_number, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (matched.group(1), matched.group(2)))
# Detect variable-length arrays.
matched = match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (matched and matched.group(2) != 'return' and matched.group(2) != 'delete' and
matched.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', matched.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if search(r'sizeof\(.+\)', tok):
continue
if search(r'arraysize\(\w+\)', tok):
continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok:
continue
if match(r'\d+', tok):
continue
if match(r'0[xX][0-9a-fA-F]+', tok):
continue
if match(r'k[A-Z0-9]\w*', tok):
continue
if match(r'(.+::)?k[A-Z0-9]\w*', tok):
continue
if match(r'(.+::)?[A-Z][A-Z0-9_]*', tok):
continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token becasue we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(line_number, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(line_number, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
# Check for plain bitfields declared without either "singed" or "unsigned".
# Most compilers treat such bitfields as signed, but there are still compilers like
# RVCT 4.0 that use unsigned by default.
matched = re.match(r'\s*((const|mutable)\s+)?(char|(short(\s+int)?)|int|long(\s+(long|int))?)\s+[a-zA-Z_][a-zA-Z0-9_]*\s*:\s*\d+\s*;', line)
if matched:
error(line_number, 'runtime/bitfields', 5,
'Please declare integral type bitfields with either signed or unsigned.')
check_identifier_name_in_declaration(filename, line_number, line, file_state, error)
# Check for unsigned int (should be just 'unsigned')
if search(r'\bunsigned int\b', line):
error(line_number, 'runtime/unsigned', 1,
'Omit int when using unsigned')
# Check that we're not using static_cast<Text*>.
if search(r'\bstatic_cast<Text\*>', line):
error(line_number, 'readability/check', 4,
'Consider using toText helper function in WebCore/dom/Text.h '
'instead of static_cast<Text*>')
def check_identifier_name_in_declaration(filename, line_number, line, file_state, error):
"""Checks if identifier names contain any underscores.
As identifiers in libraries we are using have a bunch of
underscores, we only warn about the declarations of identifiers
and don't check use of identifiers.
Args:
filename: The name of the current file.
line_number: The number of the line to check.
line: The line of code to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
# We don't check a return statement.
if match(r'\s*(return|delete)\b', line):
return
# Basically, a declaration is a type name followed by whitespaces
# followed by an identifier. The type name can be complicated
# due to type adjectives and templates. We remove them first to
# simplify the process to find declarations of identifiers.
# Convert "long long", "long double", and "long long int" to
# simple types, but don't remove simple "long".
line = sub(r'long (long )?(?=long|double|int)', '', line)
# Convert unsigned/signed types to simple types, too.
line = sub(r'(unsigned|signed) (?=char|short|int|long)', '', line)
line = sub(r'\b(inline|using|static|const|volatile|auto|register|extern|typedef|restrict|struct|class|virtual)(?=\W)', '', line)
# Remove "new" and "new (expr)" to simplify, too.
line = sub(r'new\s*(\([^)]*\))?', '', line)
# Remove all template parameters by removing matching < and >.
# Loop until no templates are removed to remove nested templates.
while True:
line, number_of_replacements = subn(r'<([\w\s:]|::)+\s*[*&]*\s*>', '', line)
if not number_of_replacements:
break
# Declarations of local variables can be in condition expressions
# of control flow statements (e.g., "if (RenderObject* p = o->parent())").
# We remove the keywords and the first parenthesis.
#
# Declarations in "while", "if", and "switch" are different from
# other declarations in two aspects:
#
# - There can be only one declaration between the parentheses.
# (i.e., you cannot write "if (int i = 0, j = 1) {}")
# - The variable must be initialized.
# (i.e., you cannot write "if (int i) {}")
#
# and we will need different treatments for them.
line = sub(r'^\s*for\s*\(', '', line)
line, control_statement = subn(r'^\s*(while|else if|if|switch)\s*\(', '', line)
# Detect variable and functions.
type_regexp = r'\w([\w]|\s*[*&]\s*|::)+'
identifier_regexp = r'(?P<identifier>[\w:]+)'
maybe_bitfield_regexp = r'(:\s*\d+\s*)?'
character_after_identifier_regexp = r'(?P<character_after_identifier>[[;()=,])(?!=)'
declaration_without_type_regexp = r'\s*' + identifier_regexp + r'\s*' + maybe_bitfield_regexp + character_after_identifier_regexp
declaration_with_type_regexp = r'\s*' + type_regexp + r'\s' + declaration_without_type_regexp
is_function_arguments = False
number_of_identifiers = 0
while True:
# If we are seeing the first identifier or arguments of a
# function, there should be a type name before an identifier.
if not number_of_identifiers or is_function_arguments:
declaration_regexp = declaration_with_type_regexp
else:
declaration_regexp = declaration_without_type_regexp
matched = match(declaration_regexp, line)
if not matched:
return
identifier = matched.group('identifier')
character_after_identifier = matched.group('character_after_identifier')
# If we removed a non-for-control statement, the character after
# the identifier should be '='. With this rule, we can avoid
# warning for cases like "if (val & INT_MAX) {".
if control_statement and character_after_identifier != '=':
return
is_function_arguments = is_function_arguments or character_after_identifier == '('
# Remove "m_" and "s_" to allow them.
modified_identifier = sub(r'(^|(?<=::))[ms]_', '', identifier)
if not file_state.is_objective_c() and modified_identifier.find('_') >= 0:
# Various exceptions to the rule: JavaScript op codes functions, const_iterator.
if (not (filename.find('JavaScriptCore') >= 0 and modified_identifier.find('op_') >= 0)
and not (filename.find('gtk') >= 0 and modified_identifier.startswith('webkit_') >= 0)
and not modified_identifier.startswith('tst_')
and not modified_identifier.startswith('webkit_dom_object_')
and not modified_identifier.startswith('webkit_soup')
and not modified_identifier.startswith('NPN_')
and not modified_identifier.startswith('NPP_')
and not modified_identifier.startswith('NP_')
and not modified_identifier.startswith('qt_')
and not modified_identifier.startswith('_q_')
and not modified_identifier.startswith('cairo_')
and not modified_identifier.startswith('Ecore_')
and not modified_identifier.startswith('Eina_')
and not modified_identifier.startswith('Evas_')
and not modified_identifier.startswith('Ewk_')
and not modified_identifier.startswith('cti_')
and not modified_identifier.find('::qt_') >= 0
and not modified_identifier.find('::_q_') >= 0
and not modified_identifier == "const_iterator"
and not modified_identifier == "vm_throw"
and not modified_identifier == "DFG_OPERATION"):
error(line_number, 'readability/naming/underscores', 4, identifier + " is incorrectly named. Don't use underscores in your identifier names.")
# Check for variables named 'l', these are too easy to confuse with '1' in some fonts
if modified_identifier == 'l':
error(line_number, 'readability/naming', 4, identifier + " is incorrectly named. Don't use the single letter 'l' as an identifier name.")
# There can be only one declaration in non-for-control statements.
if control_statement:
return
# We should continue checking if this is a function
# declaration because we need to check its arguments.
# Also, we need to check multiple declarations.
if character_after_identifier != '(' and character_after_identifier != ',':
return
number_of_identifiers += 1
line = line[matched.end():]
def check_c_style_cast(line_number, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
This also handles sizeof(type) warnings, due to similarity of content.
Args:
line_number: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast or static_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
"""
matched = search(pattern, line)
if not matched:
return
# e.g., sizeof(int)
sizeof_match = match(r'.*sizeof\s*$', line[0:matched.start(1) - 1])
if sizeof_match:
error(line_number, 'runtime/sizeof', 1,
'Using sizeof(type). Use sizeof(varname) instead if possible')
return
remainder = line[matched.end(0):]
# The close paren is for function pointers as arguments to a function.
# eg, void foo(void (*bar)(int));
# The semicolon check is a more basic function check; also possibly a
# function pointer typedef.
# eg, void foo(int); or void foo(int) const;
# The equals check is for function pointer assignment.
# eg, void *(*foo)(int) = ...
#
# Right now, this will only catch cases where there's a single argument, and
# it's unnamed. It should probably be expanded to check for multiple
# arguments with some unnamed.
function_match = match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)))', remainder)
if function_match:
if (not function_match.group(3)
or function_match.group(3) == ';'
or raw_line.find('/*') < 0):
error(line_number, 'readability/function', 3,
'All parameters should be named in a function')
return
# At this point, all that should be left is actual casts.
error(line_number, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, matched.group(1)))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_HEADERS_ACCEPTED_BUT_NOT_PROMOTED = {
# We can trust with reasonable confidence that map gives us pair<>, too.
'pair<>': ('map', 'multimap', 'hash_map', 'hash_multimap')
}
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def files_belong_to_same_module(filename_cpp, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cpp, foo_test.cpp and foo_unittest.cpp belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cpp contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cpp', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cpp and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cpp: is the path for the .cpp file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cpp and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cpp.endswith('.cpp'):
return (False, '')
filename_cpp = filename_cpp[:-len('.cpp')]
if filename_cpp.endswith('_unittest'):
filename_cpp = filename_cpp[:-len('_unittest')]
elif filename_cpp.endswith('_test'):
filename_cpp = filename_cpp[:-len('_test')]
filename_cpp = filename_cpp.replace('/public/', '/')
filename_cpp = filename_cpp.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cpp.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cpp[:-len(filename_h)]
return files_belong_to_same_module, common_path
def update_include_state(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was succesfully added. False otherwise.
"""
io = _unit_test_config.get(INCLUDE_IO_INJECTION_KEY, codecs)
header_file = None
try:
header_file = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
line_number = 0
for line in header_file:
line_number += 1
clean_line = cleanse_comments(line)
matched = _RE_PATTERN_INCLUDE.search(clean_line)
if matched:
include = matched.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, line_number))
return True
def check_for_include_what_you_use(filename, clean_lines, include_state, error):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
"""
required = {} # A map of header name to line_number and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for line_number in xrange(clean_lines.num_lines()):
line = clean_lines.elided[line_number]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
if _RE_PATTERN_STRING.search(line):
required['<string>'] = (line_number, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (line_number, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (line_number, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cpp. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and succesfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = os.path.abspath(filename)
# For Emacs's flymake.
# If cpp_style is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cpp'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cpp', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cpp$', '.cpp', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
for header in include_state.keys(): #NOLINT
(same_module, common_path) = files_belong_to_same_module(abs_filename, header)
fullpath = common_path + header
if same_module and update_include_state(fullpath, include_state):
header_found = True
# If we can't find the header file for a .cpp, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# FIXME: Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cpp') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if template in _HEADERS_ACCEPTED_BUT_NOT_PROMOTED:
headers = _HEADERS_ACCEPTED_BUT_NOT_PROMOTED[template]
if [True for header in headers if header in include_state]:
continue
if required_header_unstripped.strip('<>"') not in include_state:
error(required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
def process_line(filename, file_extension,
clean_lines, line, include_state, function_state,
class_state, file_state, enum_state, error):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
file_state: A _FileState instance which maintains information about
the state of things in the file.
enum_state: A _EnumState instance which maintains an enum declaration
state.
error: A callable to which errors are reported, which takes arguments:
line number, error level, and message
"""
raw_lines = clean_lines.raw_lines
detect_functions(clean_lines, line, function_state, error)
check_for_function_lengths(clean_lines, line, function_state, error)
if search(r'\bNOLINT\b', raw_lines[line]): # ignore nolint lines
return
if match(r'\s*\b__asm\b', raw_lines[line]): # Ignore asm lines as they format differently.
return
check_function_definition(filename, file_extension, clean_lines, line, function_state, error)
check_pass_ptr_usage(clean_lines, line, function_state, error)
check_for_leaky_patterns(clean_lines, line, function_state, error)
check_for_multiline_comments_and_strings(clean_lines, line, error)
check_style(clean_lines, line, file_extension, class_state, file_state, enum_state, error)
check_language(filename, clean_lines, line, file_extension, include_state,
file_state, error)
check_for_non_standard_constructs(clean_lines, line, class_state, error)
check_posix_threading(clean_lines, line, error)
check_invalid_increment(clean_lines, line, error)
def _process_lines(filename, file_extension, lines, error, min_confidence):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is termined with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState(min_confidence)
class_state = _ClassState()
check_for_copyright(lines, error)
if file_extension == 'h':
check_for_header_guard(filename, lines, error)
remove_multi_line_comments(lines, error)
clean_lines = CleansedLines(lines)
file_state = _FileState(clean_lines, file_extension)
enum_state = _EnumState()
for line in xrange(clean_lines.num_lines()):
process_line(filename, file_extension, clean_lines, line,
include_state, function_state, class_state, file_state,
enum_state, error)
class_state.check_finished(error)
check_for_include_what_you_use(filename, clean_lines, include_state, error)
# We check here rather than inside process_line so that we see raw
# lines rather than "cleaned" lines.
check_for_unicode_replacement_characters(lines, error)
check_for_new_line_at_eof(lines, error)
class CppChecker(object):
"""Processes C++ lines for checking style."""
# This list is used to--
#
# (1) generate an explicit list of all possible categories,
# (2) unit test that all checked categories have valid names, and
# (3) unit test that all categories are getting unit tested.
#
categories = set([
'build/class',
'build/deprecated',
'build/endif_comment',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'build/using_std',
'build/using_namespace',
'legal/copyright',
'readability/braces',
'readability/casting',
'readability/check',
'readability/comparison_to_zero',
'readability/constructors',
'readability/control_flow',
'readability/enum_casing',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/parameter_name',
'readability/naming',
'readability/naming/underscores',
'readability/null',
'readability/pass_ptr',
'readability/streams',
'readability/todo',
'readability/utf8',
'readability/webkit_export',
'runtime/arrays',
'runtime/bitfields',
'runtime/casting',
'runtime/ctype_function',
'runtime/explicit',
'runtime/init',
'runtime/int',
'runtime/invalid_increment',
'runtime/leaky_pattern',
'runtime/max_min_macros',
'runtime/memset',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/rtti',
'runtime/sizeof',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/unsigned',
'runtime/virtual',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/declaration',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo',
])
def __init__(self, file_path, file_extension, handle_style_error,
min_confidence):
"""Create a CppChecker instance.
Args:
file_extension: A string that is the file extension, without
the leading dot.
"""
self.file_extension = file_extension
self.file_path = file_path
self.handle_style_error = handle_style_error
self.min_confidence = min_confidence
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this CppChecker instance is equal to another."""
if self.file_extension != other.file_extension:
return False
if self.file_path != other.file_path:
return False
if self.handle_style_error != other.handle_style_error:
return False
if self.min_confidence != other.min_confidence:
return False
return True
# Useful for unit testing.
def __ne__(self, other):
# Python does not automatically deduce __ne__() from __eq__().
return not self.__eq__(other)
def check(self, lines):
_process_lines(self.file_path, self.file_extension, lines,
self.handle_style_error, self.min_confidence)
# FIXME: Remove this function (requires refactoring unit tests).
def process_file_data(filename, file_extension, lines, error, min_confidence, unit_test_config):
global _unit_test_config
_unit_test_config = unit_test_config
checker = CppChecker(filename, file_extension, error, min_confidence)
checker.check(lines)
_unit_test_config = {}
|
HuimingCheng/AutoGrading | refs/heads/master | learning/web_Haotian/venv/Lib/site-packages/pip/_vendor/requests/sessions.py | 355 | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from collections import Mapping
from datetime import datetime
from .auth import _basic_auth_str
from .compat import cookielib, OrderedDict, urljoin, urlparse
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers, to_native_string
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .packages.urllib3._collections import RecentlyUsedContainer
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
REDIRECT_CACHE_SIZE = 1000
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""Determines appropriate setting for a given request, taking into account
the explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
i = 0
hist = [] # keep track of history
while resp.is_redirect:
prepared_request = req.copy()
if i > 0:
# Update history and keep track of redirects.
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
# Cache the url, unless it redirects to itself.
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
self.rebuild_method(prepared_request, resp)
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/kennethreitz/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# Override the original request.
req = prepared_request
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
:rtype: dict
"""
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and not should_bypass_proxies(url):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get('all', environ_proxies.get(scheme))
if proxy:
new_proxies.setdefault(scheme, proxy)
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
>>> s.get('http://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL certificate default.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
#: 30.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
# Only store 1000 redirects to prevent using infinite memory
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send
in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) whether the SSL cert will be verified.
A CA_BUNDLE path can also be provided. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
json = json,
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""
Send a given PreparedRequest.
:rtype: requests.Response
"""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Resolve URL in redirect cache, if available.
if allow_redirects:
checked_urls = set()
while request.url in self.redirect_cache:
checked_urls.add(request.url)
new_url = self.redirect_cache.get(request.url)
if new_url in checked_urls:
break
request.url = new_url
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""
Check the environment and merge it with some settings.
:rtype: dict
"""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
"""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length.
"""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
state['redirect_cache'] = dict(self.redirect_cache)
return state
def __setstate__(self, state):
redirect_cache = state.pop('redirect_cache', {})
for attr, value in state.items():
setattr(self, attr, value)
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
for redirect, to in redirect_cache.items():
self.redirect_cache[redirect] = to
def session():
"""
Returns a :class:`Session` for context-management.
:rtype: Session
"""
return Session()
|
papouso/odoo | refs/heads/8.0 | addons/account_analytic_plans/__init__.py | 445 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_analytic_plans
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tsdmgz/ansible | refs/heads/devel | lib/ansible/plugins/lookup/dig.py | 87 | # (c) 2015, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: dig
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
version_added: "1.9"
short_description: query DNS using the dnspython library
requirements:
- dnspython (python library, http://www.dnspython.org/)
description:
- The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain name).
It is possible to lookup any DNS record in this manner.
- There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name.
It is also possible to explicitly specify the DNS server(s) to use for lookups.
- In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN
- In addition to (default) A record, it is also possible to specify a different record type that should be queried.
This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried.
- If multiple values are associated with the requested record, the results will be returned as a comma-separated list.
In such cases you may want to pass option wantlist=True to the plugin, which will result in the record values being returned as a list
over which you can iterate later on.
- By default, the lookup will rely on system-wide configured DNS servers for performing the query.
It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation.
This needs to be passed-in as an additional parameter to the lookup
options:
_terms:
description: domain(s) to query
qtype:
description: record type to query
default: 'A'
choices: [A, ALL, AAAA, CNAME, DNAME, DLV, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT]
flat:
description: If 0 each record is returned as a dictionary, otherwise a string
default: 1
notes:
- ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary.
- While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary.
- If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly.
Syntax for specifying the record type is shown in the examples below.
- The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake.
"""
EXAMPLES = """
- name: Simple A record (IPV4 address) lookup for example.com
debug: msg="{{ lookup('dig', 'example.com.')}}"
- name: "The TXT record for example.org."
debug: msg="{{ lookup('dig', 'example.org.', 'qtype=TXT') }}"
- name: "The TXT record for example.org, alternative syntax."
debug: msg="{{ lookup('dig', 'example.org./TXT') }}"
- name: use in a loop
debug: msg="MX record for gmail.com {{ item }}"
with_items: "{{ lookup('dig', 'gmail.com./MX', wantlist=True) }}"
- debug: msg="Reverse DNS for 192.0.2.5 is {{ lookup('dig', '192.0.2.5/PTR') }}"
- debug: msg="Reverse DNS for 192.0.2.5 is {{ lookup('dig', '5.2.0.192.in-addr.arpa./PTR') }}"
- debug: msg="Reverse DNS for 192.0.2.5 is {{ lookup('dig', '5.2.0.192.in-addr.arpa.', 'qtype=PTR') }}"
- debug: msg="Querying 198.51.100.23 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@198.51.100.23') }}"
- debug: msg="XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}"
with_items: "{{ lookup('dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}"
"""
RETURN = """
_list:
description:
- list of composed strings or dictonaries with key and value
If a dictionary, fields shows the keys returned depending on query type
fields:
ALL: owner, ttl, type
A: address
AAAA: address
CNAME: target
DNAME: target
DLV: algorithm, digest_type, key_tag, digest
DNSKEY: flags, algorithm, protocol, key
DS: algorithm, digest_type, key_tag, digest
HINFO: cpu, os
LOC: latitude, longitude, altitude, size, horizontal_precision, vertical_precision
MX: preference, exchange
NAPTR: order, preference, flags, service, regexp, replacement
NS: target
NSEC3PARAM: algorithm, flags, iterations, salt
PTR: target
RP: mbox, txt
SOA: mname, rname, serial, refresh, retry, expire, minimum
SPF: strings
SRV: priority, weight, port, target
SSHFP: algorithm, fp_type, fingerprint
TLSA: usage, selector, mtype, cert
TXT: strings
"""
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_native
import socket
try:
import dns.exception
import dns.name
import dns.resolver
import dns.reversename
import dns.rdataclass
from dns.rdatatype import (A, AAAA, CNAME, DLV, DNAME, DNSKEY, DS, HINFO, LOC,
MX, NAPTR, NS, NSEC3PARAM, PTR, RP, SOA, SPF, SRV, SSHFP, TLSA, TXT)
HAVE_DNS = True
except ImportError:
HAVE_DNS = False
def make_rdata_dict(rdata):
''' While the 'dig' lookup plugin supports anything which dnspython supports
out of the box, the following supported_types list describes which
DNS query types we can convert to a dict.
Note: adding support for RRSIG is hard work. :)
'''
supported_types = {
A: ['address'],
AAAA: ['address'],
CNAME: ['target'],
DNAME: ['target'],
DLV: ['algorithm', 'digest_type', 'key_tag', 'digest'],
DNSKEY: ['flags', 'algorithm', 'protocol', 'key'],
DS: ['algorithm', 'digest_type', 'key_tag', 'digest'],
HINFO: ['cpu', 'os'],
LOC: ['latitude', 'longitude', 'altitude', 'size', 'horizontal_precision', 'vertical_precision'],
MX: ['preference', 'exchange'],
NAPTR: ['order', 'preference', 'flags', 'service', 'regexp', 'replacement'],
NS: ['target'],
NSEC3PARAM: ['algorithm', 'flags', 'iterations', 'salt'],
PTR: ['target'],
RP: ['mbox', 'txt'],
# RRSIG: ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'],
SOA: ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'],
SPF: ['strings'],
SRV: ['priority', 'weight', 'port', 'target'],
SSHFP: ['algorithm', 'fp_type', 'fingerprint'],
TLSA: ['usage', 'selector', 'mtype', 'cert'],
TXT: ['strings'],
}
rd = {}
if rdata.rdtype in supported_types:
fields = supported_types[rdata.rdtype]
for f in fields:
val = rdata.__getattribute__(f)
if isinstance(val, dns.name.Name):
val = dns.name.Name.to_text(val)
if rdata.rdtype == DLV and f == 'digest':
val = dns.rdata._hexify(rdata.digest).replace(' ', '')
if rdata.rdtype == DS and f == 'digest':
val = dns.rdata._hexify(rdata.digest).replace(' ', '')
if rdata.rdtype == DNSKEY and f == 'key':
val = dns.rdata._base64ify(rdata.key).replace(' ', '')
if rdata.rdtype == NSEC3PARAM and f == 'salt':
val = dns.rdata._hexify(rdata.salt).replace(' ', '')
if rdata.rdtype == SSHFP and f == 'fingerprint':
val = dns.rdata._hexify(rdata.fingerprint).replace(' ', '')
if rdata.rdtype == TLSA and f == 'cert':
val = dns.rdata._hexify(rdata.cert).replace(' ', '')
rd[f] = val
return rd
# ==============================================================
# dig: Lookup DNS records
#
# --------------------------------------------------------------
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
'''
terms contains a string with things to `dig' for. We support the
following formats:
example.com # A record
example.com qtype=A # same
example.com/TXT # specific qtype
example.com qtype=txt # same
192.0.2.23/PTR # reverse PTR
^^ shortcut for 23.2.0.192.in-addr.arpa/PTR
example.net/AAAA @nameserver # query specified server
^^^ can be comma-sep list of names/addresses
... flat=0 # returns a dict; default is 1 == string
'''
if HAVE_DNS is False:
raise AnsibleError("The dig lookup requires the python 'dnspython' library and it is not installed")
# Create Resolver object so that we can set NS if necessary
myres = dns.resolver.Resolver(configure=True)
edns_size = 4096
myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size)
domain = None
qtype = 'A'
flat = True
rdclass = dns.rdataclass.from_text('IN')
for t in terms:
if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok.
nsset = t[1:].split(',')
for ns in nsset:
nameservers = []
# Check if we have a valid IP address. If so, use that, otherwise
# try to resolve name to address using system's resolver. If that
# fails we bail out.
try:
socket.inet_aton(ns)
nameservers.append(ns)
except:
try:
nsaddr = dns.resolver.query(ns)[0].address
nameservers.append(nsaddr)
except Exception as e:
raise AnsibleError("dns lookup NS: %s" % to_native(e))
myres.nameservers = nameservers
continue
if '=' in t:
try:
opt, arg = t.split('=')
except:
pass
if opt == 'qtype':
qtype = arg.upper()
elif opt == 'flat':
flat = int(arg)
elif opt == 'class':
try:
rdclass = dns.rdataclass.from_text(arg)
except Exception as e:
raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e))
continue
if '/' in t:
try:
domain, qtype = t.split('/')
except:
domain = t
else:
domain = t
# print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass)
ret = []
if qtype.upper() == 'PTR':
try:
n = dns.reversename.from_address(domain)
domain = n.to_text()
except dns.exception.SyntaxError:
pass
except Exception as e:
raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
try:
answers = myres.query(domain, qtype, rdclass=rdclass)
for rdata in answers:
s = rdata.to_text()
if qtype.upper() == 'TXT':
s = s[1:-1] # Strip outside quotes on TXT rdata
if flat:
ret.append(s)
else:
try:
rd = make_rdata_dict(rdata)
rd['owner'] = answers.canonical_name.to_text()
rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
rd['ttl'] = answers.rrset.ttl
rd['class'] = dns.rdataclass.to_text(rdata.rdclass)
ret.append(rd)
except Exception as e:
ret.append(str(e))
except dns.resolver.NXDOMAIN:
ret.append('NXDOMAIN')
except dns.resolver.NoAnswer:
ret.append("")
except dns.resolver.Timeout:
ret.append('')
except dns.exception.DNSException as e:
raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e))
return ret
|
hj3938/zulip | refs/heads/master | zerver/management/commands/set_default_streams.py | 114 | from __future__ import absolute_import
from django.core.management.base import BaseCommand
from zerver.models import Realm
from zerver.lib.actions import set_default_streams
from optparse import make_option
import sys
class Command(BaseCommand):
help = """Set default streams for a realm
Users created under this realm will start out with these streams. This
command is not additive: if you re-run it on a domain with a different
set of default streams, those will be the new complete set of default
streams.
For example:
python manage.py set_default_streams --domain=foo.com --streams=foo,bar,baz
python manage.py set_default_streams --domain=foo.com --streams="foo,bar,baz with space"
python manage.py set_default_streams --domain=foo.com --streams=
"""
option_list = BaseCommand.option_list + (
make_option('-d', '--domain',
dest='domain',
type='str',
help='The name of the existing realm to which to attach default streams.'),
make_option('-s', '--streams',
dest='streams',
type='str',
help='A comma-separated list of stream names.'),
)
def handle(self, **options):
if options["domain"] is None or options["streams"] is None:
print >>sys.stderr, "Please provide both a domain name and a default \
set of streams (which can be empty, with `--streams=`)."
exit(1)
stream_names = [stream.strip() for stream in options["streams"].split(",")]
realm = Realm.objects.get(domain=options["domain"])
set_default_streams(realm, stream_names)
|
sjsrey/pysal_core | refs/heads/master | pysal_core/io/util/wkt.py | 2 | from ... import cg
import re
__author__ = "Charles R Schmidt <schmidtc@gmail.com>"
__all__ = ['WKTParser']
class WKTParser:
""" Class to represent OGC WKT, supports reading and writing
Modified from...
# URL: http://dev.openlayers.org/releases/OpenLayers-2.7/lib/OpenLayers/Format/WKT.js
#Reg Ex Strings copied from OpenLayers.Format.WKT
Example
-------
>>> from pysal.core.IOHandlers import wkt
>>> import pysal
Create some Well-Known Text objects
>>> p = 'POLYGON((1 1,5 1,5 5,1 5,1 1),(2 2, 3 2, 3 3, 2 3,2 2))'
>>> pt = 'POINT(6 10)'
>>> l = 'LINESTRING(3 4,10 50,20 25)'
Instantiate the parser
>>> wkt = WKTParser()
Inspect our WKT polygon
>>> wkt(p).parts
[[(1.0, 1.0), (1.0, 5.0), (5.0, 5.0), (5.0, 1.0), (1.0, 1.0)], [(2.0, 2.0), (2.0, 3.0), (3.0, 3.0), (3.0, 2.0), (2.0, 2.0)]]
>>> wkt(p).centroid
(2.9705882352941178, 2.9705882352941178)
>>> wkt(p).area
17.0
Inspect pt, our WKT point object
>>> wkt(pt)
(6.0, 10.0)
Inspect our WKT linestring
>>> wkt(l).len
73.45538453219989
>>> wkt(l).parts
[[(3.0, 4.0), (10.0, 50.0), (20.0, 25.0)]]
Read in WKT from a file
>>> f = pysal.open(pysal.examples.get_path('stl_hom.wkt'))
>>> f.mode
'r'
>>> f.header
[]
See local doctest output for the items not tested...
"""
regExes = {'typeStr': re.compile('^\s*([\w\s]+)\s*\(\s*(.*)\s*\)\s*$'),
'spaces': re.compile('\s+'),
'parenComma': re.compile('\)\s*,\s*\('),
'doubleParenComma': re.compile('\)\s*\)\s*,\s*\(\s*\('), # can't use {2} here
'trimParens': re.compile('^\s*\(?(.*?)\)?\s*$')}
def __init__(self):
self.parsers = p = {}
p['point'] = self.Point
p['linestring'] = self.LineString
p['polygon'] = self.Polygon
def Point(self, geoStr):
coords = self.regExes['spaces'].split(geoStr.strip())
return cg.Point((coords[0], coords[1]))
def LineString(self, geoStr):
points = geoStr.strip().split(',')
points = map(self.Point, points)
return cg.Chain(points)
def Polygon(self, geoStr):
rings = self.regExes['parenComma'].split(geoStr.strip())
for i, ring in enumerate(rings):
ring = self.regExes['trimParens'].match(ring).groups()[0]
ring = self.LineString(ring).vertices
rings[i] = ring
return cg.Polygon(rings)
def fromWKT(self, wkt):
matches = self.regExes['typeStr'].match(wkt)
if matches:
geoType, geoStr = matches.groups()
geoType = geoType.lower().strip()
try:
return self.parsers[geoType](geoStr)
except KeyError:
raise NotImplementedError("Unsupported WKT Type: %s" % geoType)
else:
return None
__call__ = fromWKT
if __name__ == '__main__':
p = 'POLYGON((1 1,5 1,5 5,1 5,1 1),(2 2, 3 2, 3 3, 2 3,2 2))'
pt = 'POINT(6 10)'
l = 'LINESTRING(3 4,10 50,20 25)'
wktExamples = ['POINT(6 10)',
'LINESTRING(3 4,10 50,20 25)',
'POLYGON((1 1,5 1,5 5,1 5,1 1),(2 2, 3 2, 3 3, 2 3,2 2))',
'MULTIPOINT(3.5 5.6,4.8 10.5)',
'MULTILINESTRING((3 4,10 50,20 25),(-5 -8,-10 -8,-15 -4))',
'MULTIPOLYGON(((1 1,5 1,5 5,1 5,1 1),(2 2, 3 2, 3 3, 2 3,2 2)),((3 3,6 2,6 4,3 3)))',
'GEOMETRYCOLLECTION(POINT(4 6),LINESTRING(4 6,7 10))',
'POINT ZM (1 1 5 60)',
'POINT M (1 1 80)',
'POINT EMPTY',
'MULTIPOLYGON EMPTY']
wkt = WKTParser()
|
benschmaus/catapult | refs/heads/master | third_party/google-endpoints/apitools/base/protorpclite/test_util.py | 21 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test utilities for message testing.
Includes module interface test to ensure that public parts of module are
correctly declared in __all__.
Includes message types that correspond to those defined in
services_test.proto.
Includes additional test utilities to make sure encoding/decoding libraries
conform.
"""
import cgi
import datetime
import inspect
import os
import re
import socket
import types
import six
from six.moves import range # pylint: disable=redefined-builtin
import unittest2 as unittest
from apitools.base.protorpclite import message_types
from apitools.base.protorpclite import messages
from apitools.base.protorpclite import util
# Unicode of the word "Russian" in cyrillic.
RUSSIAN = u'\u0440\u0443\u0441\u0441\u043a\u0438\u0439'
# All characters binary value interspersed with nulls.
BINARY = b''.join(six.int2byte(value) + b'\0' for value in range(256))
class TestCase(unittest.TestCase):
def assertRaisesWithRegexpMatch(self,
exception,
regexp,
function,
*params,
**kwargs):
"""Check that exception is raised and text matches regular expression.
Args:
exception: Exception type that is expected.
regexp: String regular expression that is expected in error message.
function: Callable to test.
params: Parameters to forward to function.
kwargs: Keyword arguments to forward to function.
"""
try:
function(*params, **kwargs)
self.fail('Expected exception %s was not raised' %
exception.__name__)
except exception as err:
match = bool(re.match(regexp, str(err)))
self.assertTrue(match, 'Expected match "%s", found "%s"' % (regexp,
err))
def assertHeaderSame(self, header1, header2):
"""Check that two HTTP headers are the same.
Args:
header1: Header value string 1.
header2: header value string 2.
"""
value1, params1 = cgi.parse_header(header1)
value2, params2 = cgi.parse_header(header2)
self.assertEqual(value1, value2)
self.assertEqual(params1, params2)
def assertIterEqual(self, iter1, iter2):
"""Check that two iterators or iterables are equal independent of order.
Similar to Python 2.7 assertItemsEqual. Named differently in order to
avoid potential conflict.
Args:
iter1: An iterator or iterable.
iter2: An iterator or iterable.
"""
list1 = list(iter1)
list2 = list(iter2)
unmatched1 = list()
while list1:
item1 = list1[0]
del list1[0]
for index in range(len(list2)):
if item1 == list2[index]:
del list2[index]
break
else:
unmatched1.append(item1)
error_message = []
for item in unmatched1:
error_message.append(
' Item from iter1 not found in iter2: %r' % item)
for item in list2:
error_message.append(
' Item from iter2 not found in iter1: %r' % item)
if error_message:
self.fail('Collections not equivalent:\n' +
'\n'.join(error_message))
class ModuleInterfaceTest(object):
"""Test to ensure module interface is carefully constructed.
A module interface is the set of public objects listed in the
module __all__ attribute. Modules that that are considered public
should have this interface carefully declared. At all times, the
__all__ attribute should have objects intended to be publically
used and all other objects in the module should be considered
unused.
Protected attributes (those beginning with '_') and other imported
modules should not be part of this set of variables. An exception
is for variables that begin and end with '__' which are implicitly
part of the interface (eg. __name__, __file__, __all__ itself,
etc.).
Modules that are imported in to the tested modules are an
exception and may be left out of the __all__ definition. The test
is done by checking the value of what would otherwise be a public
name and not allowing it to be exported if it is an instance of a
module. Modules that are explicitly exported are for the time
being not permitted.
To use this test class a module should define a new class that
inherits first from ModuleInterfaceTest and then from
test_util.TestCase. No other tests should be added to this test
case, making the order of inheritance less important, but if setUp
for some reason is overidden, it is important that
ModuleInterfaceTest is first in the list so that its setUp method
is invoked.
Multiple inheritance is required so that ModuleInterfaceTest is
not itself a test, and is not itself executed as one.
The test class is expected to have the following class attributes
defined:
MODULE: A reference to the module that is being validated for interface
correctness.
Example:
Module definition (hello.py):
import sys
__all__ = ['hello']
def _get_outputter():
return sys.stdout
def hello():
_get_outputter().write('Hello\n')
Test definition:
import unittest
from protorpc import test_util
import hello
class ModuleInterfaceTest(test_util.ModuleInterfaceTest,
test_util.TestCase):
MODULE = hello
class HelloTest(test_util.TestCase):
... Test 'hello' module ...
if __name__ == '__main__':
unittest.main()
"""
def setUp(self):
"""Set up makes sure that MODULE and IMPORTED_MODULES is defined.
This is a basic configuration test for the test itself so does not
get it's own test case.
"""
if not hasattr(self, 'MODULE'):
self.fail(
"You must define 'MODULE' on ModuleInterfaceTest sub-class "
"%s." % type(self).__name__)
def testAllExist(self):
"""Test that all attributes defined in __all__ exist."""
missing_attributes = []
for attribute in self.MODULE.__all__:
if not hasattr(self.MODULE, attribute):
missing_attributes.append(attribute)
if missing_attributes:
self.fail('%s of __all__ are not defined in module.' %
missing_attributes)
def testAllExported(self):
"""Test that all public attributes not imported are in __all__."""
missing_attributes = []
for attribute in dir(self.MODULE):
if not attribute.startswith('_'):
if (attribute not in self.MODULE.__all__ and
not isinstance(getattr(self.MODULE, attribute),
types.ModuleType) and
attribute != 'with_statement'):
missing_attributes.append(attribute)
if missing_attributes:
self.fail('%s are not modules and not defined in __all__.' %
missing_attributes)
def testNoExportedProtectedVariables(self):
"""Test that there are no protected variables listed in __all__."""
protected_variables = []
for attribute in self.MODULE.__all__:
if attribute.startswith('_'):
protected_variables.append(attribute)
if protected_variables:
self.fail('%s are protected variables and may not be exported.' %
protected_variables)
def testNoExportedModules(self):
"""Test that no modules exist in __all__."""
exported_modules = []
for attribute in self.MODULE.__all__:
try:
value = getattr(self.MODULE, attribute)
except AttributeError:
# This is a different error case tested for in testAllExist.
pass
else:
if isinstance(value, types.ModuleType):
exported_modules.append(attribute)
if exported_modules:
self.fail('%s are modules and may not be exported.' %
exported_modules)
class NestedMessage(messages.Message):
"""Simple message that gets nested in another message."""
a_value = messages.StringField(1, required=True)
class HasNestedMessage(messages.Message):
"""Message that has another message nested in it."""
nested = messages.MessageField(NestedMessage, 1)
repeated_nested = messages.MessageField(NestedMessage, 2, repeated=True)
class HasDefault(messages.Message):
"""Has a default value."""
a_value = messages.StringField(1, default=u'a default')
class OptionalMessage(messages.Message):
"""Contains all message types."""
class SimpleEnum(messages.Enum):
"""Simple enumeration type."""
VAL1 = 1
VAL2 = 2
double_value = messages.FloatField(1, variant=messages.Variant.DOUBLE)
float_value = messages.FloatField(2, variant=messages.Variant.FLOAT)
int64_value = messages.IntegerField(3, variant=messages.Variant.INT64)
uint64_value = messages.IntegerField(4, variant=messages.Variant.UINT64)
int32_value = messages.IntegerField(5, variant=messages.Variant.INT32)
bool_value = messages.BooleanField(6, variant=messages.Variant.BOOL)
string_value = messages.StringField(7, variant=messages.Variant.STRING)
bytes_value = messages.BytesField(8, variant=messages.Variant.BYTES)
enum_value = messages.EnumField(SimpleEnum, 10)
class RepeatedMessage(messages.Message):
"""Contains all message types as repeated fields."""
class SimpleEnum(messages.Enum):
"""Simple enumeration type."""
VAL1 = 1
VAL2 = 2
double_value = messages.FloatField(1,
variant=messages.Variant.DOUBLE,
repeated=True)
float_value = messages.FloatField(2,
variant=messages.Variant.FLOAT,
repeated=True)
int64_value = messages.IntegerField(3,
variant=messages.Variant.INT64,
repeated=True)
uint64_value = messages.IntegerField(4,
variant=messages.Variant.UINT64,
repeated=True)
int32_value = messages.IntegerField(5,
variant=messages.Variant.INT32,
repeated=True)
bool_value = messages.BooleanField(6,
variant=messages.Variant.BOOL,
repeated=True)
string_value = messages.StringField(7,
variant=messages.Variant.STRING,
repeated=True)
bytes_value = messages.BytesField(8,
variant=messages.Variant.BYTES,
repeated=True)
enum_value = messages.EnumField(SimpleEnum,
10,
repeated=True)
class HasOptionalNestedMessage(messages.Message):
nested = messages.MessageField(OptionalMessage, 1)
repeated_nested = messages.MessageField(OptionalMessage, 2, repeated=True)
# pylint:disable=anomalous-unicode-escape-in-string
class ProtoConformanceTestBase(object):
"""Protocol conformance test base class.
Each supported protocol should implement two methods that support encoding
and decoding of Message objects in that format:
encode_message(message) - Serialize to encoding.
encode_message(message, encoded_message) - Deserialize from encoding.
Tests for the modules where these functions are implemented should extend
this class in order to support basic behavioral expectations. This ensures
that protocols correctly encode and decode message transparently to the
caller.
In order to support these test, the base class should also extend
the TestCase class and implement the following class attributes
which define the encoded version of certain protocol buffers:
encoded_partial:
<OptionalMessage
double_value: 1.23
int64_value: -100000000000
string_value: u"a string"
enum_value: OptionalMessage.SimpleEnum.VAL2
>
encoded_full:
<OptionalMessage
double_value: 1.23
float_value: -2.5
int64_value: -100000000000
uint64_value: 102020202020
int32_value: 1020
bool_value: true
string_value: u"a string\u044f"
bytes_value: b"a bytes\xff\xfe"
enum_value: OptionalMessage.SimpleEnum.VAL2
>
encoded_repeated:
<RepeatedMessage
double_value: [1.23, 2.3]
float_value: [-2.5, 0.5]
int64_value: [-100000000000, 20]
uint64_value: [102020202020, 10]
int32_value: [1020, 718]
bool_value: [true, false]
string_value: [u"a string\u044f", u"another string"]
bytes_value: [b"a bytes\xff\xfe", b"another bytes"]
enum_value: [OptionalMessage.SimpleEnum.VAL2,
OptionalMessage.SimpleEnum.VAL 1]
>
encoded_nested:
<HasNestedMessage
nested: <NestedMessage
a_value: "a string"
>
>
encoded_repeated_nested:
<HasNestedMessage
repeated_nested: [
<NestedMessage a_value: "a string">,
<NestedMessage a_value: "another string">
]
>
unexpected_tag_message:
An encoded message that has an undefined tag or number in the stream.
encoded_default_assigned:
<HasDefault
a_value: "a default"
>
encoded_nested_empty:
<HasOptionalNestedMessage
nested: <OptionalMessage>
>
encoded_invalid_enum:
<OptionalMessage
enum_value: (invalid value for serialization type)
>
"""
encoded_empty_message = ''
def testEncodeInvalidMessage(self):
message = NestedMessage()
self.assertRaises(messages.ValidationError,
self.PROTOLIB.encode_message, message)
def CompareEncoded(self, expected_encoded, actual_encoded):
"""Compare two encoded protocol values.
Can be overridden by sub-classes to special case comparison.
For example, to eliminate white space from output that is not
relevant to encoding.
Args:
expected_encoded: Expected string encoded value.
actual_encoded: Actual string encoded value.
"""
self.assertEquals(expected_encoded, actual_encoded)
def EncodeDecode(self, encoded, expected_message):
message = self.PROTOLIB.decode_message(type(expected_message), encoded)
self.assertEquals(expected_message, message)
self.CompareEncoded(encoded, self.PROTOLIB.encode_message(message))
def testEmptyMessage(self):
self.EncodeDecode(self.encoded_empty_message, OptionalMessage())
def testPartial(self):
"""Test message with a few values set."""
message = OptionalMessage()
message.double_value = 1.23
message.int64_value = -100000000000
message.int32_value = 1020
message.string_value = u'a string'
message.enum_value = OptionalMessage.SimpleEnum.VAL2
self.EncodeDecode(self.encoded_partial, message)
def testFull(self):
"""Test all types."""
message = OptionalMessage()
message.double_value = 1.23
message.float_value = -2.5
message.int64_value = -100000000000
message.uint64_value = 102020202020
message.int32_value = 1020
message.bool_value = True
message.string_value = u'a string\u044f'
message.bytes_value = b'a bytes\xff\xfe'
message.enum_value = OptionalMessage.SimpleEnum.VAL2
self.EncodeDecode(self.encoded_full, message)
def testRepeated(self):
"""Test repeated fields."""
message = RepeatedMessage()
message.double_value = [1.23, 2.3]
message.float_value = [-2.5, 0.5]
message.int64_value = [-100000000000, 20]
message.uint64_value = [102020202020, 10]
message.int32_value = [1020, 718]
message.bool_value = [True, False]
message.string_value = [u'a string\u044f', u'another string']
message.bytes_value = [b'a bytes\xff\xfe', b'another bytes']
message.enum_value = [RepeatedMessage.SimpleEnum.VAL2,
RepeatedMessage.SimpleEnum.VAL1]
self.EncodeDecode(self.encoded_repeated, message)
def testNested(self):
"""Test nested messages."""
nested_message = NestedMessage()
nested_message.a_value = u'a string'
message = HasNestedMessage()
message.nested = nested_message
self.EncodeDecode(self.encoded_nested, message)
def testRepeatedNested(self):
"""Test repeated nested messages."""
nested_message1 = NestedMessage()
nested_message1.a_value = u'a string'
nested_message2 = NestedMessage()
nested_message2.a_value = u'another string'
message = HasNestedMessage()
message.repeated_nested = [nested_message1, nested_message2]
self.EncodeDecode(self.encoded_repeated_nested, message)
def testStringTypes(self):
"""Test that encoding str on StringField works."""
message = OptionalMessage()
message.string_value = 'Latin'
self.EncodeDecode(self.encoded_string_types, message)
def testEncodeUninitialized(self):
"""Test that cannot encode uninitialized message."""
required = NestedMessage()
self.assertRaisesWithRegexpMatch(messages.ValidationError,
"Message NestedMessage is missing "
"required field a_value",
self.PROTOLIB.encode_message,
required)
def testUnexpectedField(self):
"""Test decoding and encoding unexpected fields."""
loaded_message = self.PROTOLIB.decode_message(
OptionalMessage, self.unexpected_tag_message)
# Message should be equal to an empty message, since unknown
# values aren't included in equality.
self.assertEquals(OptionalMessage(), loaded_message)
# Verify that the encoded message matches the source, including the
# unknown value.
self.assertEquals(self.unexpected_tag_message,
self.PROTOLIB.encode_message(loaded_message))
def testDoNotSendDefault(self):
"""Test that default is not sent when nothing is assigned."""
self.EncodeDecode(self.encoded_empty_message, HasDefault())
def testSendDefaultExplicitlyAssigned(self):
"""Test that default is sent when explcitly assigned."""
message = HasDefault()
message.a_value = HasDefault.a_value.default
self.EncodeDecode(self.encoded_default_assigned, message)
def testEncodingNestedEmptyMessage(self):
"""Test encoding a nested empty message."""
message = HasOptionalNestedMessage()
message.nested = OptionalMessage()
self.EncodeDecode(self.encoded_nested_empty, message)
def testEncodingRepeatedNestedEmptyMessage(self):
"""Test encoding a nested empty message."""
message = HasOptionalNestedMessage()
message.repeated_nested = [OptionalMessage(), OptionalMessage()]
self.EncodeDecode(self.encoded_repeated_nested_empty, message)
def testContentType(self):
self.assertTrue(isinstance(self.PROTOLIB.CONTENT_TYPE, str))
def testDecodeInvalidEnumType(self):
self.assertRaisesWithRegexpMatch(messages.DecodeError,
'Invalid enum value ',
self.PROTOLIB.decode_message,
OptionalMessage,
self.encoded_invalid_enum)
def testDateTimeNoTimeZone(self):
"""Test that DateTimeFields are encoded/decoded correctly."""
class MyMessage(messages.Message):
value = message_types.DateTimeField(1)
value = datetime.datetime(2013, 1, 3, 11, 36, 30, 123000)
message = MyMessage(value=value)
decoded = self.PROTOLIB.decode_message(
MyMessage, self.PROTOLIB.encode_message(message))
self.assertEquals(decoded.value, value)
def testDateTimeWithTimeZone(self):
"""Test DateTimeFields with time zones."""
class MyMessage(messages.Message):
value = message_types.DateTimeField(1)
value = datetime.datetime(2013, 1, 3, 11, 36, 30, 123000,
util.TimeZoneOffset(8 * 60))
message = MyMessage(value=value)
decoded = self.PROTOLIB.decode_message(
MyMessage, self.PROTOLIB.encode_message(message))
self.assertEquals(decoded.value, value)
def pick_unused_port():
"""Find an unused port to use in tests.
Derived from Damon Kohlers example:
http://code.activestate.com/recipes/531822-pick-unused-port
"""
temp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
temp.bind(('localhost', 0))
port = temp.getsockname()[1]
finally:
temp.close()
return port
def get_module_name(module_attribute):
"""Get the module name.
Args:
module_attribute: An attribute of the module.
Returns:
The fully qualified module name or simple module name where
'module_attribute' is defined if the module name is "__main__".
"""
if module_attribute.__module__ == '__main__':
module_file = inspect.getfile(module_attribute)
default = os.path.basename(module_file).split('.')[0]
return default
else:
return module_attribute.__module__
|
tmpgit/intellij-community | refs/heads/master | python/testData/joinLines/ListLBracket.py | 83 | a = <caret>[
111,
222
]
|
pombredanne/brisk-hadoop-common | refs/heads/brisk | src/contrib/hod/testing/testThreads.py | 182 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, os, sys, re, threading, time
myDirectory = os.path.realpath(sys.argv[0])
rootDirectory = re.sub("/testing/.*", "", myDirectory)
sys.path.append(rootDirectory)
from testing.lib import BaseTestSuite
# module specific imports
import os, tempfile, random
excludes = []
import getpass
from hodlib.Common.threads import simpleCommand
from testing.helper import sampleText
# All test-case classes should have the naming convention test_.*
class test_SimpleCommand(unittest.TestCase):
def setUp(self):
self.rootDir = '/tmp/hod-%s' % getpass.getuser()
if not os.path.exists(self.rootDir):
os.mkdir(self.rootDir)
self.prefix= 'ThreadsTestSuite.test_SimpleCommand'
self.testFile = None
pass
def testRedirectedStdout(self):
self.testFile= tempfile.NamedTemporaryFile(dir=self.rootDir, \
prefix=self.prefix)
cmd=simpleCommand('helper','%s %s 1 1>%s' % \
(sys.executable, \
os.path.join(rootDirectory, "testing", "helper.py"), \
self.testFile.name))
cmd.start()
cmd.join()
self.testFile.seek(0)
stdout = self.testFile.read()
# print stdout, sampleText
assert(stdout == sampleText)
pass
def testRedirectedStderr(self):
self.testFile= tempfile.NamedTemporaryFile(dir=self.rootDir, \
prefix=self.prefix)
cmd=simpleCommand('helper','%s %s 2 2>%s' % \
(sys.executable, \
os.path.join(rootDirectory, "testing", "helper.py"), \
self.testFile.name))
cmd.start()
cmd.join()
self.testFile.seek(0)
stderror = self.testFile.read()
# print stderror, sampleText
assert(stderror == sampleText)
pass
def tearDown(self):
if self.testFile: self.testFile.close()
pass
class ThreadsTestSuite(BaseTestSuite):
def __init__(self):
# suite setup
BaseTestSuite.__init__(self, __name__, excludes)
pass
def cleanUp(self):
# suite tearDown
pass
def RunThreadsTests():
# modulename_suite
suite = ThreadsTestSuite()
testResult = suite.runTests()
suite.cleanUp()
return testResult
if __name__ == "__main__":
RunThreadsTests()
|
cryptobanana/ansible | refs/heads/devel | lib/ansible/modules/net_tools/lldp.py | 47 | #!/usr/bin/python -tt
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lldp
requirements: [ lldpctl ]
version_added: 1.6
short_description: get details reported by lldp
description:
- Reads data out of lldpctl
options: {}
author: "Andy Hill (@andyhky)"
notes:
- Requires lldpd running and lldp enabled on switches
'''
EXAMPLES = '''
# Retrieve switch/port information
- name: Gather information from lldp
lldp:
- name: Print each switch/port
debug:
msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}"
with_items: "{{ lldp.keys() }}"
# TASK: [Print each switch/port] ***********************************************************
# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
'''
from ansible.module_utils.basic import AnsibleModule
def gather_lldp(module):
cmd = ['lldpctl', '-f', 'keyvalue']
rc, output, err = module.run_command(cmd)
if output:
output_dict = {}
current_dict = {}
lldp_entries = output.split("\n")
for entry in lldp_entries:
if entry.startswith('lldp'):
path, value = entry.strip().split("=", 1)
path = path.split(".")
path_components, final = path[:-1], path[-1]
else:
value = current_dict[final] + '\n' + entry
current_dict = output_dict
for path_component in path_components:
current_dict[path_component] = current_dict.get(path_component, {})
current_dict = current_dict[path_component]
current_dict[final] = value
return output_dict
def main():
module = AnsibleModule({})
lldp_output = gather_lldp(module)
try:
data = {'lldp': lldp_output['lldp']}
module.exit_json(ansible_facts=data)
except TypeError:
module.fail_json(msg="lldpctl command failed. is lldpd running?")
if __name__ == '__main__':
main()
|
Eigenlabs/EigenD | refs/heads/1.4 | tools/packages/SCons/Variables/BoolVariable.py | 2 | """engine.SCons.Variables.BoolVariable
This file defines the option type for SCons implementing true/false values.
Usage example:
opts = Variables()
opts.Add(BoolVariable('embedded', 'build for an embedded system', 0))
...
if env['embedded'] == 1:
...
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/BoolVariable.py 4577 2009/12/27 19:43:56 scons"
__all__ = ['BoolVariable',]
import string
import SCons.Errors
__true_strings = ('y', 'yes', 'true', 't', '1', 'on' , 'all' )
__false_strings = ('n', 'no', 'false', 'f', '0', 'off', 'none')
def _text2bool(val):
"""
Converts strings to True/False depending on the 'truth' expressed by
the string. If the string can't be converted, the original value
will be returned.
See '__true_strings' and '__false_strings' for values considered
'true' or 'false respectivly.
This is usable as 'converter' for SCons' Variables.
"""
lval = string.lower(val)
if lval in __true_strings: return True
if lval in __false_strings: return False
raise ValueError("Invalid value for boolean option: %s" % val)
def _validator(key, val, env):
"""
Validates the given value to be either '0' or '1'.
This is usable as 'validator' for SCons' Variables.
"""
if not env[key] in (True, False):
raise SCons.Errors.UserError(
'Invalid value for boolean option %s: %s' % (key, env[key]))
def BoolVariable(key, help, default):
"""
The input parameters describe a boolen option, thus they are
returned with the correct converter and validator appended. The
'help' text will by appended by '(yes|no) to show the valid
valued. The result is usable for input to opts.Add().
"""
return (key, '%s (yes|no)' % help, default,
_validator, _text2bool)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
andy928/xpenology | refs/heads/master | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
sputnick-dev/weboob | refs/heads/master | modules/cic/browser.py | 4 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
try:
from urlparse import urlsplit, parse_qsl, urlparse
except ImportError:
from urllib.parse import urlsplit, parse_qsl, urlparse
from datetime import datetime, timedelta
from random import randint
from weboob.tools.compat import basestring
from weboob.browser.browsers import LoginBrowser, need_login
from weboob.browser.profiles import Wget
from weboob.browser.url import URL
from weboob.exceptions import BrowserIncorrectPassword
from weboob.capabilities.bank import Transfer, TransferError
from .pages import LoginPage, LoginErrorPage, AccountsPage, UserSpacePage, \
OperationsPage, CardPage, ComingPage, NoOperationsPage, \
TransfertPage, ChangePasswordPage, VerifCodePage, EmptyPage
__all__ = ['CICBrowser']
class CICBrowser(LoginBrowser):
PROFILE = Wget()
BASEURL = 'https://www.cic.fr'
login = URL('/sb/fr/banques/particuliers/index.html',
'/(?P<subbank>.*)/fr/$',
'/(?P<subbank>.*)/fr/banques/accueil.html',
'/(?P<subbank>.*)/fr/banques/particuliers/index.html',
LoginPage)
login_error = URL('/(?P<subbank>.*)/fr/identification/default.cgi', LoginErrorPage)
accounts = URL('/(?P<subbank>.*)/fr/banque/situation_financiere.cgi',
'/(?P<subbank>.*)/fr/banque/situation_financiere.html',
AccountsPage)
user_space = URL('/(?P<subbank>.*)/fr/banque/espace_personnel.aspx', UserSpacePage)
operations = URL('/(?P<subbank>.*)/fr/banque/mouvements.cgi.*',
'/(?P<subbank>.*)/fr/banque/mouvements.html.*',
'/(?P<subbank>.*)/fr/banque/nr/nr_devbooster.aspx.*',
OperationsPage)
coming = URL('/(?P<subbank>.*)/fr/banque/mvts_instance.cgi.*', ComingPage)
card = URL('/(?P<subbank>.*)/fr/banque/operations_carte.cgi.*', CardPage)
noop = URL('/(?P<subbank>.*)/fr/banque/CR/arrivee.asp.*', NoOperationsPage)
info = URL('/(?P<subbank>.*)/fr/banque/BAD.*', EmptyPage)
transfert = URL('/(?P<subbank>.*)/fr/banque/virements/vplw_vi.html', EmptyPage)
transfert_2 = URL('/(?P<subbank>.*)/fr/banque/virements/vplw_cmweb.aspx.*', TransfertPage)
change_pass = URL('/(?P<subbank>.*)/fr/validation/change_password.cgi', ChangePasswordPage)
verify_pass = URL('/(?P<subbank>.*)/fr/validation/verif_code.cgi.*', VerifCodePage)
empty = URL('/(?P<subbank>.*)/fr/banques/index.html',
'/(?P<subbank>.*)/fr/banque/paci_beware_of_phishing.*',
'/(?P<subbank>.*)/fr/validation/(?!change_password|verif_code).*',
'/(?P<subbank>.*)/fr/banque/paci_engine/static_content_manager.aspx',
'/(?P<subbank>.*)/fr/banque/DELG_Gestion.*',
EmptyPage)
currentSubBank = None
__states__ = ['currentSubBank']
def do_login(self):
self.login.go()
if not self.page.logged:
self.page.login(self.username, self.password)
if not self.page.logged or self.login_error.is_here():
raise BrowserIncorrectPassword()
self.getCurrentSubBank()
@need_login
def get_accounts_list(self):
return self.accounts.stay_or_go(subbank=self.currentSubBank).iter_accounts()
def get_account(self, id):
assert isinstance(id, basestring)
for a in self.get_accounts_list():
if a.id == id:
return a
def getCurrentSubBank(self):
# the account list and history urls depend on the sub bank of the user
url = urlparse(self.url)
self.currentSubBank = url.path.lstrip('/').split('/')[0]
def list_operations(self, page_url):
if page_url.startswith('/') or page_url.startswith('https'):
self.location(page_url)
else:
self.location('%s/%s/fr/banque/%s' % (self.BASEURL, self.currentSubBank, page_url))
if not self.operations.is_here():
return iter([])
return self.pagination(lambda: self.page.get_history())
def get_history(self, account):
transactions = []
last_debit = None
for tr in self.list_operations(account._link_id):
# to prevent redundancy with card transactions, we do not
# store 'RELEVE CARTE' transaction.
if tr.raw != 'RELEVE CARTE':
transactions.append(tr)
elif last_debit is None:
last_debit = (tr.date - timedelta(days=10)).month
coming_link = self.page.get_coming_link() if self.operations.is_here() else None
if coming_link is not None:
for tr in self.list_operations(coming_link):
transactions.append(tr)
month = 0
for card_link in account._card_links:
v = urlsplit(card_link)
args = dict(parse_qsl(v.query))
# useful with 12 -> 1
if int(args['mois']) < month:
month = month + 1
else:
month = int(args['mois'])
for tr in self.list_operations(card_link):
if month > last_debit:
tr._is_coming = True
transactions.append(tr)
transactions.sort(key=lambda tr: tr.rdate, reverse=True)
return transactions
def transfer(self, account, to, amount, reason=None):
# access the transfer page
self.transfert.go(subbank=self.currentSubBank)
# fill the form
form = self.page.get_form(xpath="//form[@id='P:F']")
try:
form['data_input_indiceCompteADebiter'] = self.page.get_from_account_index(account)
form['data_input_indiceCompteACrediter'] = self.page.get_to_account_index(to)
except ValueError as e:
raise TransferError(e.message)
form['[t:dbt%3adouble;]data_input_montant_value_0_'] = '%s' % str(amount).replace('.', ',')
if reason is not None:
form['[t:dbt%3astring;x(27)]data_input_libelleCompteDebite'] = reason
form['[t:dbt%3astring;x(31)]data_input_motifCompteCredite'] = reason
del form['_FID_GoCancel']
del form['_FID_DoValidate']
form['_FID_DoValidate.x'] = str(randint(3, 125))
form['_FID_DoValidate.y'] = str(randint(3, 22))
form.submit()
# look for known errors
content = self.page.get_unicode_content()
insufficient_amount_message = u'Le montant du virement doit être positif, veuillez le modifier'
maximum_allowed_balance_message = u'Montant maximum autorisé au débit pour ce compte'
if insufficient_amount_message in content:
raise TransferError('The amount you tried to transfer is too low.')
if maximum_allowed_balance_message in content:
raise TransferError('The maximum allowed balance for the target account has been / would be reached.')
# look for the known "all right" message
ready_for_transfer_message = u'Confirmer un virement entre vos comptes'
if ready_for_transfer_message not in content:
raise TransferError('The expected message "%s" was not found.' % ready_for_transfer_message)
# submit the confirmation form
form = self.page.get_form(xpath="//form[@id='P:F']")
del form['_FID_DoConfirm']
form['_FID_DoConfirm.x'] = str(randint(3, 125))
form['_FID_DoConfirm.y'] = str(randint(3, 22))
submit_date = datetime.now()
form.submit()
# look for the known "everything went well" message
content = self.page.get_unicode_content()
transfer_ok_message = u'Votre virement a été exécuté'
if transfer_ok_message not in content:
raise TransferError('The expected message "%s" was not found.' % transfer_ok_message)
# We now have to return a Transfer object
transfer = Transfer(submit_date.strftime('%Y%m%d%H%M%S'))
transfer.amount = amount
transfer.origin = account
transfer.recipient = to
transfer.date = submit_date
return transfer
|
DailyActie/Surrogate-Model | refs/heads/master | 01-codes/scikit-learn-master/sklearn/decomposition/online_lda.py | 1 | """
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from ..externals.six.moves import xrange
from ..base import BaseEstimator, TransformerMixin
from ..exceptions import NotFittedError
from ..externals.joblib import Parallel, delayed
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.extmath import logsumexp
from ..utils.validation import check_non_negative
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
.. versionadded:: 0.17
Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
Parameters
----------
n_topics : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_topics`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_topics`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int or RandomState instance or None, optional (default=None)
Pseudo-random number generator seed control.
Attributes
----------
components_ : array, [n_topics, n_features]
Topic word distribution. ``components_[i, j]`` represents word j in
topic `i`. In the literature, this is called lambda.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
def __init__(self, n_topics=10, doc_topic_prior=None,
topic_word_prior=None, learning_method='online',
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None):
self.n_topics = n_topics
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_topics <= 0:
raise ValueError("Invalid 'n_topics' parameter: %r"
% self.n_topics)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online"):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_topics
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_topics
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_topics, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormailzed topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = _get_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=self.verbose)
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total umber of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_topics)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
bound = self.perplexity(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d, perplexity: %.4f'
% (i + 1, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
self.n_iter_ += 1
return self
def transform(self, X):
"""Transform data X according to the fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
# normalize doc_topic_distr
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_topics)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_topics = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_topics)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self.transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def perplexity(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self.transform(X)
else:
n_samples, n_topics = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_topics != self.n_topics:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
|
luckasfb/android_kernel_iocean_x7 | refs/heads/master | mediatek/build/tools/config/merge-project.py | 38 | import re, os, sys
pattern = [
re.compile("^([^=\s]+)\s*=\s*(.+)$"),
re.compile("^([^=\s]+)\s*=$"),
re.compile("\s*#")
]
config = {}
project_file = sys.argv
project_file.remove(sys.argv[0])
for f in project_file:
ff = open(f)
for line in ff.readlines():
result = (filter(lambda x:x,[x.search(line) for x in pattern]) or [None])[0]
if not result: continue
name,value = None,None
if len(result.groups())==0: continue
name = result.group(1)
try:
value = result.group(2)
except IndexError:
value = ""
config[name] = value.strip()
for item in sorted(config.keys()):
print "%s = %s"%(item,config[item])
|
nvoron23/gensim | refs/heads/develop | gensim/test/test_big.py | 71 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking processing/storing large inputs.
"""
import logging
import unittest
import os
import itertools
import tempfile
import numpy
import gensim
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_big.tst')
class BigCorpus(object):
"""A corpus of a large number of docs & large vocab"""
def __init__(self, words_only=False, num_terms=200000, num_docs=1000000, doc_len=100):
self.dictionary = gensim.utils.FakeDict(num_terms)
self.words_only = words_only
self.num_docs = num_docs
self.doc_len = doc_len
def __iter__(self):
for _ in range(self.num_docs):
doc_len = numpy.random.poisson(self.doc_len)
ids = numpy.random.randint(0, len(self.dictionary), doc_len)
if self.words_only:
yield [str(id) for id in ids]
else:
weights = numpy.random.poisson(3, doc_len)
yield sorted(zip(ids, weights))
if os.environ.get('GENSIM_BIG', False):
class TestLargeData(unittest.TestCase):
"""Try common operations, using large models. You'll need ~8GB RAM to run these tests"""
def testWord2Vec(self):
corpus = BigCorpus(words_only=True, num_docs=100000, num_terms=3000000, doc_len=200)
model = gensim.models.Word2Vec(corpus, size=300, workers=4)
model.save(testfile(), ignore=['syn1'])
del model
model = gensim.models.Word2Vec.load(testfile())
def testLsiModel(self):
corpus = BigCorpus(num_docs=50000)
model = gensim.models.LsiModel(corpus, num_topics=500, id2word=corpus.dictionary)
model.save(testfile())
del model
model = gensim.models.LsiModel.load(testfile())
def testLdaModel(self):
corpus = BigCorpus(num_docs=5000)
model = gensim.models.LdaModel(corpus, num_topics=500, id2word=corpus.dictionary)
model.save(testfile())
del model
model = gensim.models.LdaModel.load(testfile())
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
unisport/thumblr | refs/heads/master | thumblr/caching.py | 1 | from django.core.cache import get_cache, cache, InvalidCacheBackendError
from django.db.models.signals import pre_save, pre_delete
from django.dispatch import receiver
from thumblr.models import Image
from thumblr.dto import ImageMetadata, ImageUrlSpec
from thumblr.services.query import get_image_by_id
try:
thumblr_cache = get_cache('thumblr')
except InvalidCacheBackendError:
thumblr_cache = cache
# pinging on cache
thumblr_cache.get("FOO")
def _get_key(f, *args, **kwargs):
return "{func_name}:{arg}:{kwarg}".format(
func_name=f.func_name,
arg="_".join(map(str, args)),
kwarg="_".join(map(str, map(lambda item: "{}:{}".format(*item), kwargs.items()))),
)
def cached(f):
"""Simple cache for functions, cached function **must** get only **positional** arguments, which have unique
__str__ return value"""
def cached_f(*args, **kwargs):
key = _get_key(f, *args, **kwargs)
val = thumblr_cache.get(key)
if val:
return val
val = f(*args, **kwargs)
thumblr_cache.set(key, val)
return val
cached_f.func_name = f.func_name
return cached_f
def drop_cache_for(f, *args, **kwargs):
key = _get_key(f, *args, **kwargs)
thumblr_cache.delete(key)
@receiver(pre_save, sender=Image)
@receiver(pre_delete, sender=Image)
def __drop_url_cache(sender, instance, *args, **kwargs):
assert isinstance(instance, Image)
from thumblr.usecases import get_image_url
if instance.id:
old_inst = get_image_by_id(instance.pk)
dto = ImageMetadata(
image_file_id=old_inst.id,
file_name=old_inst.file_name,
size_slug=old_inst.size.name,
content_type_id=old_inst.content_type_id,
object_id=old_inst.object_id,
)
drop_cache_for(get_image_url, dto.extend(image_file_id=None, file_name=None), ImageUrlSpec.CDN_URL,)
drop_cache_for(get_image_url, dto.extend(image_file_id=None, file_name=None), ImageUrlSpec.CDN_URL, one=True)
drop_cache_for(get_image_url, dto.extend(image_file_id=None, file_name=None), ImageUrlSpec.CDN_URL, one=False)
drop_cache_for(get_image_url, dto.extend(image_file_id=None, file_name=None), ImageUrlSpec.S3_URL,)
drop_cache_for(get_image_url, dto.extend(image_file_id=None, file_name=None), ImageUrlSpec.S3_URL, one=True)
drop_cache_for(get_image_url, dto.extend(image_file_id=None, file_name=None), ImageUrlSpec.S3_URL, one=False)
drop_cache_for(get_image_url, dto, ImageUrlSpec.CDN_URL,)
drop_cache_for(get_image_url, dto, ImageUrlSpec.CDN_URL, one=True,)
drop_cache_for(get_image_url, dto, ImageUrlSpec.CDN_URL, one=False,)
drop_cache_for(get_image_url, dto, ImageUrlSpec.S3_URL,)
drop_cache_for(get_image_url, dto, ImageUrlSpec.S3_URL, one=True,)
drop_cache_for(get_image_url, dto, ImageUrlSpec.S3_URL, one=False,)
|
qiuzhong/xwalk-test-suite-build | refs/heads/master | iot/scripts/check_ostro_image_ver.py | 1 | #!/usr/bin/env python3
import json
import config
import requests
import mail
def get_latest_ostro_build_number(latest_image_url):
'''
Get the latest ostro image build number from latest_image_url.
'''
conn = requests.get(latest_image_url)
# print(conn.status_code)
version = conn.content
# print(str(version))
try:
number = int(version.decode('utf-8').strip().split('-')[-1])
except Exception as e:
print(e)
return 0
return number
def check_ostro_build_number():
'''
Check the Ostro image build number and write the results to OSTRO_CONFIG_FILE.
'''
configuration = None
with open(config.OSTRO_CONFIG_FILE) as fp:
configuration = json.load(fp, encoding = "utf-8")
last_build_number = configuration.get('last_build_number')
this_build_number = configuration.get('this_build_number')
number = get_latest_ostro_build_number(config.OSTRO_LATEST_IMAGE_URL)
update = False
if number and (number > last_build_number):
print("New release!")
update = True
this_build_number = number
else:
print("No new release.")
data = {}
data['last_build_number'] = last_build_number
data['this_build_number'] = this_build_number
if update:
with open(config.OSTRO_CONFIG_FILE, "w") as fp:
json.dump(data, fp, encoding='utf-8', indent = 4)
mail.mail_results(config.OSTRO_CONFIG_FILE)
if __name__ == '__main__':
check_ostro_build_number() |
leafclick/intellij-community | refs/heads/master | python/testData/intentions/PyConvertCollectionLiteralIntentionTest/convertOneElementTupleWithCommentToList.py | 37 | (
42 <caret> # foo
,
) |
cmelange/ansible | refs/heads/devel | lib/ansible/modules/cloud/cloudstack/cs_securitygroup.py | 78 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_securitygroup
short_description: Manages security groups on Apache CloudStack based clouds.
description:
- Create and remove security groups.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the security group.
required: true
description:
description:
- Description of the security group.
required: false
default: null
state:
description:
- State of the security group.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
domain:
description:
- Domain the security group is related to.
required: false
default: null
account:
description:
- Account the security group is related to.
required: false
default: null
project:
description:
- Name of the project the security group to be created in.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a security group
- local_action:
module: cs_securitygroup
name: default
description: default security group
# Remove a security group
- local_action:
module: cs_securitygroup
name: default
state: absent
'''
RETURN = '''
---
id:
description: UUID of the security group.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of security group.
returned: success
type: string
sample: app
description:
description: Description of security group.
returned: success
type: string
sample: application security group
tags:
description: List of resource tags associated with the security group.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
project:
description: Name of project the security group is related to.
returned: success
type: string
sample: Production
domain:
description: Domain the security group is related to.
returned: success
type: string
sample: example domain
account:
description: Account the security group is related to.
returned: success
type: string
sample: example account
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackSecurityGroup(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackSecurityGroup, self).__init__(module)
self.security_group = None
def get_security_group(self):
if not self.security_group:
args = {}
args['projectid'] = self.get_project(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['securitygroupname'] = self.module.params.get('name')
sgs = self.cs.listSecurityGroups(**args)
if sgs:
self.security_group = sgs['securitygroup'][0]
return self.security_group
def create_security_group(self):
security_group = self.get_security_group()
if not security_group:
self.result['changed'] = True
args = {}
args['name'] = self.module.params.get('name')
args['projectid'] = self.get_project(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['description'] = self.module.params.get('description')
if not self.module.check_mode:
res = self.cs.createSecurityGroup(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
security_group = res['securitygroup']
return security_group
def remove_security_group(self):
security_group = self.get_security_group()
if security_group:
self.result['changed'] = True
args = {}
args['name'] = self.module.params.get('name')
args['projectid'] = self.get_project(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
if not self.module.check_mode:
res = self.cs.deleteSecurityGroup(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
return security_group
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
description = dict(default=None),
state = dict(choices=['present', 'absent'], default='present'),
project = dict(default=None),
account = dict(default=None),
domain = dict(default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_sg = AnsibleCloudStackSecurityGroup(module)
state = module.params.get('state')
if state in ['absent']:
sg = acs_sg.remove_security_group()
else:
sg = acs_sg.create_security_group()
result = acs_sg.get_result(sg)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
Distrotech/intellij-community | refs/heads/master | python/testData/psi/EqYieldEq.py | 83 | bb = yield dd = cc
|
paurosello/frappe | refs/heads/develop | frappe/core/doctype/page/test_page.py | 72 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
test_records = frappe.get_test_records('Page')
class TestPage(unittest.TestCase):
pass
|
RsrchBoy/dpkg-offlineimap | refs/heads/master | offlineimap/__init__.py | 5 | __all__ = ['OfflineImap']
__productname__ = 'OfflineIMAP'
__version__ = "6.5.7"
__revision__ = ""
__bigversion__ = __version__ + __revision__
__copyright__ = "Copyright 2002-2015 John Goerzen & contributors"
__author__ = "John Goerzen"
__author_email__= "offlineimap-project@lists.alioth.debian.org"
__description__ = "Disconnected Universal IMAP Mail Synchronization/Reader Support"
__license__ = "Licensed under the GNU GPL v2 or any later version (with an OpenSSL exception)"
__bigcopyright__ = """%(__productname__)s %(__bigversion__)s
%(__license__)s""" % locals()
__homepage__ = "http://offlineimap.org"
banner = __bigcopyright__
from offlineimap.error import OfflineImapError
# put this last, so we don't run into circular dependencies using
# e.g. offlineimap.__version__.
from offlineimap.init import OfflineImap
|
socketubs/pyhn | refs/heads/master | pyhn/__init__.py | 1 | # -*- coding: utf-8 -*-
__title__ = 'pyhn'
__version__ = '0.3.9'
__author__ = 'Geoffrey Lehée'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Geoffrey Lehée'
|
tillahoffmann/tensorflow | refs/heads/master | tensorflow/contrib/timeseries/python/timeseries/saved_model_utils.py | 25 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convenience functions for working with time series saved_models.
@@predict_continuation
@@filter_continuation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import feature_keys as _feature_keys
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline as _input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import model_utils as _model_utils
from tensorflow.python.util.all_util import remove_undocumented
def _colate_features_to_feeds_and_fetches(continue_from, signature, features,
graph):
"""Uses a saved model signature to construct feed and fetch dictionaries."""
if _feature_keys.FilteringResults.STATE_TUPLE in continue_from:
# We're continuing from an evaluation, so we need to unpack/flatten state.
state_values = _model_utils.state_to_dictionary(
continue_from[_feature_keys.FilteringResults.STATE_TUPLE])
else:
state_values = continue_from
input_feed_tensors_by_name = {
input_key: graph.as_graph_element(input_value.name)
for input_key, input_value in signature.inputs.items()
}
output_tensors_by_name = {
output_key: graph.as_graph_element(output_value.name)
for output_key, output_value in signature.outputs.items()
}
feed_dict = {}
for state_key, state_value in state_values.items():
feed_dict[input_feed_tensors_by_name[state_key]] = state_value
for feature_key, feature_value in features.items():
feed_dict[input_feed_tensors_by_name[feature_key]] = feature_value
return output_tensors_by_name, feed_dict
def predict_continuation(continue_from,
signatures,
session,
steps=None,
times=None,
exogenous_features=None):
"""Perform prediction using an exported saved model.
Analogous to _input_pipeline.predict_continuation_input_fn, but operates on a
saved model rather than feeding into Estimator's predict method.
Args:
continue_from: A dictionary containing the results of either an Estimator's
evaluate method or filter_continuation. Used to determine the model
state to make predictions starting from.
signatures: The `MetaGraphDef` protocol buffer returned from
`tf.saved_model.loader.load`. Used to determine the names of Tensors to
feed and fetch. Must be from the same model as `continue_from`.
session: The session to use. The session's graph must be the one into which
`tf.saved_model.loader.load` loaded the model.
steps: The number of steps to predict (scalar), starting after the
evaluation or filtering. If `times` is specified, `steps` must not be; one
is required.
times: A [batch_size x window_size] array of integers (not a Tensor)
indicating times to make predictions for. These times must be after the
corresponding evaluation or filtering. If `steps` is specified, `times`
must not be; one is required. If the batch dimension is omitted, it is
assumed to be 1.
exogenous_features: Optional dictionary. If specified, indicates exogenous
features for the model to use while making the predictions. Values must
have shape [batch_size x window_size x ...], where `batch_size` matches
the batch dimension used when creating `continue_from`, and `window_size`
is either the `steps` argument or the `window_size` of the `times`
argument (depending on which was specified).
Returns:
A dictionary with model-specific predictions (typically having keys "mean"
and "covariance") and a feature_keys.PredictionResults.TIMES key indicating
the times for which the predictions were computed.
Raises:
ValueError: If `times` or `steps` are misspecified.
"""
if exogenous_features is None:
exogenous_features = {}
predict_times = _model_utils.canonicalize_times_or_steps_from_output(
times=times, steps=steps, previous_model_output=continue_from)
features = {_feature_keys.PredictionFeatures.TIMES: predict_times}
features.update(exogenous_features)
predict_signature = signatures.signature_def[
_feature_keys.SavedModelLabels.PREDICT]
output_tensors_by_name, feed_dict = _colate_features_to_feeds_and_fetches(
continue_from=continue_from,
signature=predict_signature,
features=features,
graph=session.graph)
output = session.run(output_tensors_by_name, feed_dict=feed_dict)
output[_feature_keys.PredictionResults.TIMES] = features[
_feature_keys.PredictionFeatures.TIMES]
return output
def filter_continuation(continue_from, signatures, session, features):
"""Perform filtering using an exported saved model.
Filtering refers to updating model state based on new observations.
Predictions based on the returned model state will be conditioned on these
observations.
Args:
continue_from: A dictionary containing the results of either an Estimator's
evaluate method or a previous filter_continuation. Used to determine the
model state to start filtering from.
signatures: The `MetaGraphDef` protocol buffer returned from
`tf.saved_model.loader.load`. Used to determine the names of Tensors to
feed and fetch. Must be from the same model as `continue_from`.
session: The session to use. The session's graph must be the one into which
`tf.saved_model.loader.load` loaded the model.
features: A dictionary mapping keys to Numpy arrays, with several possible
shapes (requires keys `FilteringFeatures.TIMES` and
`FilteringFeatures.VALUES`):
Single example; `TIMES` is a scalar and `VALUES` is either a scalar or a
vector of length [number of features].
Sequence; `TIMES` is a vector of shape [series length], `VALUES` either
has shape [series length] (univariate) or [series length x number of
features] (multivariate).
Batch of sequences; `TIMES` is a vector of shape [batch size x series
length], `VALUES` has shape [batch size x series length] or [batch
size x series length x number of features].
In any case, `VALUES` and any exogenous features must have their shapes
prefixed by the shape of the value corresponding to the `TIMES` key.
Returns:
A dictionary containing model state updated to account for the observations
in `features`.
"""
filter_signature = signatures.signature_def[
_feature_keys.SavedModelLabels.FILTER]
features = _input_pipeline._canonicalize_numpy_data( # pylint: disable=protected-access
data=features,
require_single_batch=False)
output_tensors_by_name, feed_dict = _colate_features_to_feeds_and_fetches(
continue_from=continue_from,
signature=filter_signature,
features=features,
graph=session.graph)
output = session.run(output_tensors_by_name, feed_dict=feed_dict)
# Make it easier to chain filter -> predict by keeping track of the current
# time.
output[_feature_keys.FilteringResults.TIMES] = features[
_feature_keys.FilteringFeatures.TIMES]
return output
remove_undocumented(module_name=__name__)
|
tethysplatform/tethys | refs/heads/master | tests/unit_tests/test_tethys_compute/test_models/test_dask/test_DaskJobResult.py | 2 | """
********************************************************************************
* Name: test_DaskJobResult
* Author: nswain
* Created On: November 14, 2018
* Copyright: (c) Aquaveo 2018
********************************************************************************
"""
from tethys_sdk.testing import TethysTestCase
from tethys_compute.models.dask.dask_scheduler import DaskScheduler
from tethys_compute.models.dask.dask_job import DaskJob
from django.contrib.auth.models import User
from unittest import mock
class DaskJobMockedResultsPropertyTests(TethysTestCase):
def set_up(self):
self.user = User.objects.create_user('tethys_super', 'user@example.com', 'pass')
self.scheduler = DaskScheduler(
name='test_dask_scheduler',
host='127.0.0.1:8000',
timeout=10,
heartbeat_interval=5,
dashboard='test_dashboard',
)
self.scheduler.save()
def tear_down(self):
pass
@mock.patch('tethys_compute.models.tethys_job.TethysFunctionExtractor')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.log')
def test_process_result_serialize_exception(self, mock_log, mock_re_lock, mock_apl, mock_client, mock_future,
mock_tfe):
mock_function_extractor = mock.MagicMock()
mock_function = mock.MagicMock(return_value='foo')
mock_function_extractor.valid = True
mock_function_extractor.function = mock_function
mock_tfe.return_value = mock_function_extractor
mock_apl.return_value = True
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler,
_process_results_function='test_function'
)
# NOTE: To mock the "result" property, we must mock it on the type object, not the instance.
# Unfortunately, this will persist for any test in the same test case that runs after this test.
# That's why this test is pulled out in a separate test case (the other tests on "result" won't work
# after this one runs).
type(djob).result = mock.PropertyMock(side_effect=[Exception, 'foo', 'foo'])
djob._process_results()
# check the result
mock_client.gather.assert_called_with(mock_future)
mock_function.assert_called_with(mock_client.gather())
mock_log.exception.assert_called_with('Results Serialization Error')
mock_re_lock.assert_called()
self.assertEqual('ERR', djob._status)
|
dhermes/google-cloud-python | refs/heads/master | iot/docs/conf.py | 2 | # -*- coding: utf-8 -*-
#
# google-cloud-iot documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-iot"
copyright = u"2017, Google"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-iot-doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-iot.tex",
u"google-cloud-iot Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "google-cloud-iot", u"google-cloud-iot Documentation", [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-iot",
u"google-cloud-iot Documentation",
author,
"google-cloud-iot",
"GAPIC library for the {metadata.shortName} v1 service",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
vntarasov/openpilot | refs/heads/eon-0710 | selfdrive/controls/lib/vehicle_model.py | 1 | #!/usr/bin/env python3
"""
Dynamic bycicle model from "The Science of Vehicle Dynamics (2014), M. Guiggiani"
The state is x = [v, r]^T
with v lateral speed [m/s], and r rotational speed [rad/s]
The input u is the steering angle [rad]
The system is defined by
x_dot = A*x + B*u
A depends on longitudinal speed, u [m/s], and vehicle parameters CP
"""
import numpy as np
from numpy.linalg import solve
from typing import Tuple
from cereal import car
class VehicleModel:
def __init__(self, CP: car.CarParams):
"""
Args:
CP: Car Parameters
"""
# for math readability, convert long names car params into short names
self.m = CP.mass
self.j = CP.rotationalInertia
self.l = CP.wheelbase
self.aF = CP.centerToFront
self.aR = CP.wheelbase - CP.centerToFront
self.chi = CP.steerRatioRear
self.cF_orig = CP.tireStiffnessFront
self.cR_orig = CP.tireStiffnessRear
self.update_params(1.0, CP.steerRatio)
def update_params(self, stiffness_factor: float, steer_ratio: float) -> None:
"""Update the vehicle model with a new stiffness factor and steer ratio"""
self.cF = stiffness_factor * self.cF_orig
self.cR = stiffness_factor * self.cR_orig
self.sR = steer_ratio
def steady_state_sol(self, sa: float, u: float) -> np.ndarray:
"""Returns the steady state solution.
If the speed is too small we can't use the dynamic model (tire slip is undefined),
we then have to use the kinematic model
Args:
sa: Steering wheel angle [rad]
u: Speed [m/s]
Returns:
2x1 matrix with steady state solution (lateral speed, rotational speed)
"""
if u > 0.1:
return dyn_ss_sol(sa, u, self)
else:
return kin_ss_sol(sa, u, self)
def calc_curvature(self, sa: float, u: float) -> float:
"""Returns the curvature. Multiplied by the speed this will give the yaw rate.
Args:
sa: Steering wheel angle [rad]
u: Speed [m/s]
Returns:
Curvature factor [1/m]
"""
return self.curvature_factor(u) * sa / self.sR
def curvature_factor(self, u: float) -> float:
"""Returns the curvature factor.
Multiplied by wheel angle (not steering wheel angle) this will give the curvature.
Args:
u: Speed [m/s]
Returns:
Curvature factor [1/m]
"""
sf = calc_slip_factor(self)
return (1. - self.chi) / (1. - sf * u**2) / self.l
def get_steer_from_curvature(self, curv: float, u: float) -> float:
"""Calculates the required steering wheel angle for a given curvature
Args:
curv: Desired curvature [1/m]
u: Speed [m/s]
Returns:
Steering wheel angle [rad]
"""
return curv * self.sR * 1.0 / self.curvature_factor(u)
def get_steer_from_yaw_rate(self, yaw_rate: float, u: float) -> float:
"""Calculates the required steering wheel angle for a given yaw_rate
Args:
yaw_rate: Desired yaw rate [rad/s]
u: Speed [m/s]
Returns:
Steering wheel angle [rad]
"""
curv = yaw_rate / u
return self.get_steer_from_curvature(curv, u)
def yaw_rate(self, sa: float, u: float) -> float:
"""Calculate yaw rate
Args:
sa: Steering wheel angle [rad]
u: Speed [m/s]
Returns:
Yaw rate [rad/s]
"""
return self.calc_curvature(sa, u) * u
def kin_ss_sol(sa: float, u: float, VM: VehicleModel) -> np.ndarray:
"""Calculate the steady state solution at low speeds
At low speeds the tire slip is undefined, so a kinematic
model is used.
Args:
sa: Steering angle [rad]
u: Speed [m/s]
VM: Vehicle model
Returns:
2x1 matrix with steady state solution
"""
K = np.zeros((2, 1))
K[0, 0] = VM.aR / VM.sR / VM.l * u
K[1, 0] = 1. / VM.sR / VM.l * u
return K * sa
def create_dyn_state_matrices(u: float, VM: VehicleModel) -> Tuple[np.ndarray, np.ndarray]:
"""Returns the A and B matrix for the dynamics system
Args:
u: Vehicle speed [m/s]
VM: Vehicle model
Returns:
A tuple with the 2x2 A matrix, and 2x1 B matrix
Parameters in the vehicle model:
cF: Tire stiffnes Front [N/rad]
cR: Tire stiffnes Front [N/rad]
aF: Distance from CG to front wheels [m]
aR: Distance from CG to rear wheels [m]
m: Mass [kg]
j: Rotational inertia [kg m^2]
sR: Steering ratio [-]
chi: Steer ratio rear [-]
"""
A = np.zeros((2, 2))
B = np.zeros((2, 1))
A[0, 0] = - (VM.cF + VM.cR) / (VM.m * u)
A[0, 1] = - (VM.cF * VM.aF - VM.cR * VM.aR) / (VM.m * u) - u
A[1, 0] = - (VM.cF * VM.aF - VM.cR * VM.aR) / (VM.j * u)
A[1, 1] = - (VM.cF * VM.aF**2 + VM.cR * VM.aR**2) / (VM.j * u)
B[0, 0] = (VM.cF + VM.chi * VM.cR) / VM.m / VM.sR
B[1, 0] = (VM.cF * VM.aF - VM.chi * VM.cR * VM.aR) / VM.j / VM.sR
return A, B
def dyn_ss_sol(sa: float, u: float, VM: VehicleModel) -> np.ndarray:
"""Calculate the steady state solution when x_dot = 0,
Ax + Bu = 0 => x = A^{-1} B u
Args:
sa: Steering angle [rad]
u: Speed [m/s]
VM: Vehicle model
Returns:
2x1 matrix with steady state solution
"""
A, B = create_dyn_state_matrices(u, VM)
return -solve(A, B) * sa
def calc_slip_factor(VM):
"""The slip factor is a measure of how the curvature changes with speed
it's positive for Oversteering vehicle, negative (usual case) otherwise.
"""
return VM.m * (VM.cF * VM.aF - VM.cR * VM.aR) / (VM.l**2 * VM.cF * VM.cR)
|
aliyun-beta/aliyun-oss-cpp-sdk | refs/heads/master | third_party/googletest/scripts/upload_gtest.py | 1963 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
--cc=googletestframework@googlegroups.com to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = 'googletestframework@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
|
crdroid-devices/android_kernel_htc_msm8974 | refs/heads/6.0.0 | tools/perf/python/twatch.py | 7370 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
vitan/hue | refs/heads/master | desktop/core/ext-py/python-ldap-2.3.13/Demo/simple.py | 40 | import sys,getpass
import ldap
#l = ldap.open("localhost", 31001)
l = ldap.open("marta.it.uq.edu.au")
login_dn = "cn=root,ou=CSEE,o=UQ,c=AU"
login_pw = getpass.getpass("Password for %s: " % login_dn)
l.simple_bind_s(login_dn, login_pw)
#
# create a new sub organisation
#
try:
dn = "ou=CSEE,o=UQ,c=AU"
print "Adding", repr(dn)
l.add_s(dn,
[
("objectclass",["organizationalUnit"]),
("ou", ["CSEE"]),
("description", [
"Department of Computer Science and Electrical Engineering"]),
]
)
except _ldap.LDAPError:
pass
#
# create an entry for me
#
dn = "cn=David Leonard,ou=CSEE,o=UQ,c=AU"
print "Updating", repr(dn)
try:
l.delete_s(dn)
except:
pass
l.add_s(dn,
[
("objectclass", ["organizationalPerson"]),
("sn", ["Leonard"]),
("cn", ["David Leonard"]),
("description", ["Ph.D. student"]),
("display-name", ["David Leonard"]),
#("commonname", ["David Leonard"]),
("mail", ["david.leonard@csee.uq.edu.au"]),
("othermailbox", ["d@openbsd.org"]),
("givenname", ["David"]),
("surname", ["Leonard"]),
("seeAlso", ["http://www.csee.uq.edu.au/~leonard/"]),
("url", ["http://www.csee.uq.edu.au/~leonard/"]),
#("homephone", []),
#("fax", []),
#("otherfacsimiletelephonenumber",[]),
#("officefax", []),
#("mobile", []),
#("otherpager", []),
#("officepager", []),
#("pager", []),
("info", ["info"]),
("title", ["Mr"]),
#("telephonenumber", []),
("l", ["Brisbane"]),
("st", ["Queensland"]),
("c", ["AU"]),
("co", ["co"]),
("o", ["UQ"]),
("ou", ["CSEE"]),
#("homepostaladdress", []),
#("postaladdress", []),
#("streetaddress", []),
#("street", []),
("department", ["CSEE"]),
("comment", ["comment"]),
#("postalcode", []),
("physicaldeliveryofficename", ["Bldg 78, UQ, St Lucia"]),
("preferredDeliveryMethod", ["email"]),
("initials", ["DRL"]),
("conferenceinformation", ["MS-conferenceinformation"]),
#("usercertificate", []),
("labeleduri", ["labeleduri"]),
("manager", ["cn=Jaga Indulska"]),
("reports", ["reports"]),
("jpegPhoto", [open("/www/leonard/leonard.jpg","r").read()]),
("uid", ["leonard"]),
("userPassword", [""])
])
#
# search beneath the CSEE/UQ/AU tree
#
res = l.search_s(
"ou=CSEE, o=UQ, c=AU",
_ldap.SCOPE_SUBTREE,
"objectclass=*",
)
print res
l.unbind()
|
Jgarcia-IAS/SITE | refs/heads/master | addons/purchase_double_validation/__openerp__.py | 52 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Double Validation on Purchases',
'version' : '1.1',
'category': 'Purchase Management',
'images' : ['images/purchase_validation.jpeg'],
'depends' : ['base','purchase'],
'author' : 'OpenERP SA',
'description': """
Double-validation for purchases exceeding minimum amount.
=========================================================
This module modifies the purchase workflow in order to validate purchases that
exceeds minimum amount set by configuration wizard.
""",
'website': 'https://www.odoo.com/page/purchase',
'data': [
'purchase_double_validation_workflow.xml',
'purchase_double_validation_installer.xml',
'purchase_double_validation_view.xml',
],
'test': [
'test/purchase_double_validation_demo.yml',
'test/purchase_double_validation_test.yml'
],
'demo': [],
'installable': True,
'auto_install': False
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
matmutant/sl4a | refs/heads/master | python-build/python-libs/ase/scripts/notify_weather.py | 87 | """Display the weather report in a notification."""
__author__ = 'Damon Kohler <damonkohler@gmail.com>'
__copyright__ = 'Copyright (c) 2009, Google Inc.'
__license__ = 'Apache License, Version 2.0'
import android
import weather
def notify_weather(droid):
"""Display the weather at the current location in a notification."""
print 'Finding ZIP code.'
location = droid.getLastKnownLocation().result
if location['gps'] is not None:
location = location['gps']
else:
location = location['network']
addresses = droid.geocode(location['latitude'], location['longitude'])
zip = addresses.result[0]['postal_code']
if zip is None:
msg = 'Failed to find location.'
else:
print 'Fetching weather report.'
result = weather.fetch_weather(zip)
msg = '%(temperature)s degrees and %(conditions)s, in %(city)s.' % result
droid.notify('Weather Report', msg)
if __name__ == '__main__':
droid = android.Android()
notify_weather(droid)
|
Vogtinator/micropython | refs/heads/nspire | tests/basics/types2.py | 117 | # Types are hashable
print(hash(type) != 0)
print(hash(int) != 0)
print(hash(list) != 0)
class Foo: pass
print(hash(Foo) != 0)
print(int == int)
print(int != list)
d = {}
d[int] = list
d[list] = int
print(len(d))
|
tonyseek/python-stdnum | refs/heads/master | getnumlist.py | 3 | #!/usr/bin/env python
# getnumlist.py - script to get a list of number formats in stdnum
#
# Copyright (C) 2012, 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""This script uses introspection to present a list of number formats
suitable to be included in the README and stdnum package description."""
import pydoc
from stdnum import util
# these are excluded
algortihms = ('stdnum.verhoeff', 'stdnum.luhn', 'stdnum.iso7064')
def get_number_modules():
"""Provides the number modules that are not algorithms."""
for module in util.get_number_modules():
if module.__name__ not in algortihms and \
not module.__name__.startswith('stdnum.iso7064'):
yield module
if __name__ == '__main__':
print 'For README:'
print ''
for module in get_number_modules():
print ' * %s' % util.get_module_name(module)
print ''
print 'For stdnum/__init__.py:'
print ''
for module in get_number_modules():
print '* %s: %s' % (
module.__name__.replace('stdnum.', ''),
util.get_module_name(module),
)
print ''
print 'For docs/index.rst:'
print ''
for module in get_number_modules():
print ' %s' % module.__name__.replace('stdnum.', '')
print ''
print 'For index.xml:'
print ''
for module in get_number_modules():
print ' <li>%s</li>' % util.get_module_name(module)
|
gangadharkadam/saloon_erp | refs/heads/master | erpnext/stock/report/stock_balance/stock_balance.py | 17 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, getdate
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
item_map = get_item_details(filters)
iwb_map = get_item_warehouse_map(filters)
data = []
for company in sorted(iwb_map):
for item in sorted(iwb_map[company]):
for wh in sorted(iwb_map[company][item]):
qty_dict = iwb_map[company][item][wh]
data.append([item, item_map[item]["item_name"],
item_map[item]["item_group"],
item_map[item]["brand"],
item_map[item]["description"], wh,
item_map[item]["stock_uom"], qty_dict.opening_qty,
qty_dict.opening_val, qty_dict.in_qty,
qty_dict.in_val, qty_dict.out_qty,
qty_dict.out_val, qty_dict.bal_qty,
qty_dict.bal_val, qty_dict.val_rate,
company
])
return columns, data
def get_columns(filters):
"""return columns based on filters"""
columns = ["Item:Link/Item:100", "Item Name::150", "Item Group::100", "Brand::90", \
"Description::140", "Warehouse:Link/Warehouse:100", "Stock UOM:Link/UOM:90", "Opening Qty:Float:100", \
"Opening Value:Float:110", "In Qty:Float:80", "In Value:Float:80", "Out Qty:Float:80", \
"Out Value:Float:80", "Balance Qty:Float:100", "Balance Value:Float:100", \
"Valuation Rate:Float:90", "Company:Link/Company:100"]
return columns
def get_conditions(filters):
conditions = ""
if not filters.get("from_date"):
frappe.throw(_("'From Date' is required"))
if filters.get("to_date"):
conditions += " and posting_date <= '%s'" % filters["to_date"]
else:
frappe.throw(_("'To Date' is required"))
if filters.get("item_code"):
conditions += " and item_code = '%s'" % frappe.db.escape(filters.get("item_code"))
return conditions
#get all details
def get_stock_ledger_entries(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""select item_code, warehouse, posting_date, actual_qty, valuation_rate,
company, voucher_type, qty_after_transaction, stock_value_difference
from `tabStock Ledger Entry`
where docstatus < 2 %s order by posting_date, posting_time, name""" %
conditions, as_dict=1)
def get_item_warehouse_map(filters):
sle = get_stock_ledger_entries(filters)
iwb_map = {}
for d in sle:
iwb_map.setdefault(d.company, {}).setdefault(d.item_code, {}).\
setdefault(d.warehouse, frappe._dict({\
"opening_qty": 0.0, "opening_val": 0.0,
"in_qty": 0.0, "in_val": 0.0,
"out_qty": 0.0, "out_val": 0.0,
"bal_qty": 0.0, "bal_val": 0.0,
"val_rate": 0.0, "uom": None
}))
qty_dict = iwb_map[d.company][d.item_code][d.warehouse]
if d.voucher_type == "Stock Reconciliation":
qty_diff = flt(d.qty_after_transaction) - qty_dict.bal_qty
else:
qty_diff = flt(d.actual_qty)
value_diff = flt(d.stock_value_difference)
if d.posting_date < getdate(filters["from_date"]):
qty_dict.opening_qty += qty_diff
qty_dict.opening_val += value_diff
elif d.posting_date >= getdate(filters["from_date"]) and d.posting_date <= getdate(filters["to_date"]):
if qty_diff > 0:
qty_dict.in_qty += qty_diff
qty_dict.in_val += value_diff
else:
qty_dict.out_qty += abs(qty_diff)
qty_dict.out_val += abs(value_diff)
qty_dict.val_rate = d.valuation_rate
qty_dict.bal_qty += qty_diff
qty_dict.bal_val += value_diff
return iwb_map
def get_item_details(filters):
item_map = {}
for d in frappe.db.sql("select name, item_name, stock_uom, item_group, brand, \
description from tabItem", as_dict=1):
item_map.setdefault(d.name, d)
return item_map
|
jlmadurga/django-oscar | refs/heads/master | tests/_site/apps/customer/__init__.py | 55 | default_app_config = 'tests._site.apps.customer.config.CustomerConfig'
|
AmandaMoen/AmandaMoen | refs/heads/master | notes/resources/UW_IntroClass/class06/code/simple_classes.py | 1 | #!/usr/bin/env python
"""
simple_classes.py
demonstrating the basics of a class
"""
## create a point class
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
## create an instance of that class
p = Point(3,4)
## access the attributes
print "p.x is:", p.x
print "p.y is:", p.y
class Point2(object):
size = 4
color= "red"
def __init__(self, x, y):
self.x = x
self.y = y
p2 = Point2(4,5)
print p2.size
print p2.color
class Point3(object):
size = 4
color= "red"
def __init__(self, x, y):
self.x = x
self.y = y
def get_color(self):
return self.color
p3 = Point3(4,5)
print p3.size
print p3.get_color()
class Circle(object):
color = "red"
def __init__(self, diameter):
self.diameter = diameter
def grow(self, factor=2):
"""
grows the circle's diameter
:param factor=2: factor by which to grow the circle
"""
self.diameter = self.diameter * factor
def get_area(self):
return math.pi * self.diameter / 2.0
class NewCircle(Circle):
color = "blue"
def grow(self, factor=2):
"""grows the area by factor..."""
self.diameter = self.diameter * math.sqrt(2)
nc = NewCircle
print nc.color
class CircleR(Circle):
def __init__(self, radius):
diameter = radius*2
Circle.__init__(self, diameter)
class CircleR2(Circle):
def __init__(self, radius):
self.radius = radius
def get_area(self):
return Circle.get_area(self, self.radius*2)
|
serialx/spark-ec2 | refs/heads/branch-1.5 | deploy_templates.py | 35 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
# Deploy the configuration file templates in the spark-ec2/templates directory
# to the root filesystem, substituting variables such as the master hostname,
# ZooKeeper URL, etc as read from the environment.
# Find system memory in KB and compute Spark's default limit from that
mem_command = "cat /proc/meminfo | grep MemTotal | awk '{print $2}'"
cpu_command = "nproc"
master_ram_kb = int(
os.popen(mem_command).read().strip())
# This is the master's memory. Try to find slave's memory as well
first_slave = os.popen("cat /root/spark-ec2/slaves | head -1").read().strip()
slave_mem_command = "ssh -t -o StrictHostKeyChecking=no %s %s" %\
(first_slave, mem_command)
slave_cpu_command = "ssh -t -o StrictHostKeyChecking=no %s %s" %\
(first_slave, cpu_command)
slave_ram_kb = int(os.popen(slave_mem_command).read().strip())
slave_cpus = int(os.popen(slave_cpu_command).read().strip())
system_ram_kb = min(slave_ram_kb, master_ram_kb)
system_ram_mb = system_ram_kb / 1024
slave_ram_mb = slave_ram_kb / 1024
# Leave some RAM for the OS, Hadoop daemons, and system caches
if slave_ram_mb > 100*1024:
slave_ram_mb = slave_ram_mb - 15 * 1024 # Leave 15 GB RAM
elif slave_ram_mb > 60*1024:
slave_ram_mb = slave_ram_mb - 10 * 1024 # Leave 10 GB RAM
elif slave_ram_mb > 40*1024:
slave_ram_mb = slave_ram_mb - 6 * 1024 # Leave 6 GB RAM
elif slave_ram_mb > 20*1024:
slave_ram_mb = slave_ram_mb - 3 * 1024 # Leave 3 GB RAM
elif slave_ram_mb > 10*1024:
slave_ram_mb = slave_ram_mb - 2 * 1024 # Leave 2 GB RAM
else:
slave_ram_mb = max(512, slave_ram_mb - 1300) # Leave 1.3 GB RAM
# Make tachyon_mb as slave_ram_mb for now.
tachyon_mb = slave_ram_mb
worker_instances_str = ""
worker_cores = slave_cpus
if os.getenv("SPARK_WORKER_INSTANCES") != "":
worker_instances = int(os.getenv("SPARK_WORKER_INSTANCES", 1))
worker_instances_str = "%d" % worker_instances
# Distribute equally cpu cores among worker instances
worker_cores = max(slave_cpus / worker_instances, 1)
template_vars = {
"master_list": os.getenv("MASTERS"),
"active_master": os.getenv("MASTERS").split("\n")[0],
"slave_list": os.getenv("SLAVES"),
"hdfs_data_dirs": os.getenv("HDFS_DATA_DIRS"),
"mapred_local_dirs": os.getenv("MAPRED_LOCAL_DIRS"),
"spark_local_dirs": os.getenv("SPARK_LOCAL_DIRS"),
"spark_worker_mem": "%dm" % slave_ram_mb,
"spark_worker_instances": worker_instances_str,
"spark_worker_cores": "%d" % worker_cores,
"spark_master_opts": os.getenv("SPARK_MASTER_OPTS", ""),
"spark_version": os.getenv("SPARK_VERSION"),
"tachyon_version": os.getenv("TACHYON_VERSION"),
"hadoop_major_version": os.getenv("HADOOP_MAJOR_VERSION"),
"java_home": os.getenv("JAVA_HOME"),
"default_tachyon_mem": "%dMB" % tachyon_mb,
"system_ram_mb": "%d" % system_ram_mb,
"aws_access_key_id": os.getenv("AWS_ACCESS_KEY_ID"),
"aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY"),
}
template_dir="/root/spark-ec2/templates"
for path, dirs, files in os.walk(template_dir):
if path.find(".svn") == -1:
dest_dir = os.path.join('/', path[len(template_dir):])
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
with open(os.path.join(path, filename)) as src:
with open(dest_file, "w") as dest:
print("Configuring " + dest_file)
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key] or '')
dest.write(text)
dest.close()
|
RouxRC/weboob | refs/heads/master | modules/allrecipes/module.py | 6 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.recipe import CapRecipe, Recipe
from weboob.tools.backend import Module
from .browser import AllrecipesBrowser
from urllib import quote_plus
__all__ = ['AllrecipesModule']
class AllrecipesModule(Module, CapRecipe):
NAME = 'allrecipes'
MAINTAINER = u'Julien Veyssier'
EMAIL = 'julien.veyssier@aiur.fr'
VERSION = '1.1'
DESCRIPTION = u'Allrecipes English recipe website'
LICENSE = 'AGPLv3+'
BROWSER = AllrecipesBrowser
def get_recipe(self, id):
return self.browser.get_recipe(id)
def iter_recipes(self, pattern):
return self.browser.iter_recipes(quote_plus(pattern.encode('utf-8')))
def fill_recipe(self, recipe, fields):
if 'nb_person' in fields or 'instructions' in fields:
rec = self.get_recipe(recipe.id)
recipe.picture_url = rec.picture_url
recipe.instructions = rec.instructions
recipe.ingredients = rec.ingredients
recipe.comments = rec.comments
recipe.author = rec.author
recipe.nb_person = rec.nb_person
recipe.cooking_time = rec.cooking_time
recipe.preparation_time = rec.preparation_time
return recipe
OBJECTS = {
Recipe: fill_recipe,
}
|
towerjoo/DjangoNotes | refs/heads/master | Django-1.5.1/django/core/files/locks.py | 128 | """
Portable file locking utilities.
Based partially on example by Jonathan Feignberg <jdf@pobox.com> in the Python
Cookbook, licensed under the Python Software License.
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
Example Usage::
>>> from django.core.files import locks
>>> with open('./file', 'wb') as f:
... locks.lock(f, locks.LOCK_EX)
... f.write('Django')
"""
__all__ = ('LOCK_EX','LOCK_SH','LOCK_NB','lock','unlock')
system_type = None
try:
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
__overlapped = pywintypes.OVERLAPPED()
system_type = 'nt'
except (ImportError, AttributeError):
pass
try:
import fcntl
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
system_type = 'posix'
except (ImportError, AttributeError):
pass
def fd(f):
"""Get a filedescriptor from something which could be a file or an fd."""
return hasattr(f, 'fileno') and f.fileno() or f
if system_type == 'nt':
def lock(file, flags):
hfile = win32file._get_osfhandle(fd(file))
win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped)
def unlock(file):
hfile = win32file._get_osfhandle(fd(file))
win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped)
elif system_type == 'posix':
def lock(file, flags):
fcntl.lockf(fd(file), flags)
def unlock(file):
fcntl.lockf(fd(file), fcntl.LOCK_UN)
else:
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = None
# Dummy functions that don't do anything.
def lock(file, flags):
pass
def unlock(file):
pass
|
pgmillon/ansible | refs/heads/devel | lib/ansible/plugins/callback/log_plays.py | 27 | # (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: log_plays
type: notification
short_description: write playbook output to log file
version_added: historical
description:
- This callback writes playbook output to a file per host in the `/var/log/ansible/hosts` directory
requirements:
- Whitelist in configuration
- A writeable /var/log/ansible/hosts directory by the user executing Ansible on the controller
options:
log_folder:
version_added: '2.9'
default: /var/log/ansible/hosts
description: The folder where log files will be created.
env:
- name: ANSIBLE_LOG_FOLDER
ini:
- section: callback_log_plays
key: log_folder
'''
import os
import time
import json
from ansible.utils.path import makedirs_safe
from ansible.module_utils._text import to_bytes
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
# NOTE: in Ansible 1.2 or later general logging is available without
# this plugin, just set ANSIBLE_LOG_PATH as an environment variable
# or log_path in the DEFAULTS section of your ansible configuration
# file. This callback is an example of per hosts logging for those
# that want it.
class CallbackModule(CallbackBase):
"""
logs playbook results, per host, in /var/log/ansible/hosts
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'log_plays'
CALLBACK_NEEDS_WHITELIST = True
TIME_FORMAT = "%b %d %Y %H:%M:%S"
MSG_FORMAT = "%(now)s - %(category)s - %(data)s\n\n"
def __init__(self):
super(CallbackModule, self).__init__()
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.log_folder = self.get_option("log_folder")
if not os.path.exists(self.log_folder):
makedirs_safe(self.log_folder)
def log(self, host, category, data):
if isinstance(data, MutableMapping):
if '_ansible_verbose_override' in data:
# avoid logging extraneous data
data = 'omitted'
else:
data = data.copy()
invocation = data.pop('invocation', None)
data = json.dumps(data, cls=AnsibleJSONEncoder)
if invocation is not None:
data = json.dumps(invocation) + " => %s " % data
path = os.path.join(self.log_folder, host)
now = time.strftime(self.TIME_FORMAT, time.localtime())
msg = to_bytes(self.MSG_FORMAT % dict(now=now, category=category, data=data))
with open(path, "ab") as fd:
fd.write(msg)
def runner_on_failed(self, host, res, ignore_errors=False):
self.log(host, 'FAILED', res)
def runner_on_ok(self, host, res):
self.log(host, 'OK', res)
def runner_on_skipped(self, host, item=None):
self.log(host, 'SKIPPED', '...')
def runner_on_unreachable(self, host, res):
self.log(host, 'UNREACHABLE', res)
def runner_on_async_failed(self, host, res, jid):
self.log(host, 'ASYNC_FAILED', res)
def playbook_on_import_for_host(self, host, imported_file):
self.log(host, 'IMPORTED', imported_file)
def playbook_on_not_import_for_host(self, host, missing_file):
self.log(host, 'NOTIMPORTED', missing_file)
|
cafecivet/django_girls_tutorial | refs/heads/master | Lib/site-packages/django/contrib/auth/models.py | 48 | from __future__ import unicode_literals
from django.core.mail import send_mail
from django.core import validators
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils.crypto import get_random_string, salted_hmac
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib import auth
from django.contrib.auth.hashers import (
check_password, make_password, is_password_usable)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import python_2_unicode_compatible
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
user_logged_in.connect(update_last_login)
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label,
model),
)
@python_2_unicode_compatible
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __str__(self):
return "%s | %s | %s" % (
six.text_type(self.content_type.app_label),
six.text_type(self.content_type),
six.text_type(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
class UserManager(BaseUserManager):
def _create_user(self, username, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True, True,
**extra_fields)
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), default=timezone.now)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the identifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __str__(self):
return self.get_username()
def natural_key(self):
return (self.get_username(),)
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_full_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_full_name() method')
def get_short_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_short_name() method.')
def get_session_auth_hash(self):
"""
Returns an HMAC of the password field.
"""
key_salt = "django.contrib.auth.models.AbstractBaseUser.get_session_auth_hash"
return salted_hmac(key_salt, self.password).hexdigest()
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(user, obj))
return permissions
def _user_has_perm(user, perm, obj):
for backend in auth.get_backends():
if hasattr(backend, "has_perm"):
if backend.has_perm(user, perm, obj):
return True
return False
def _user_has_module_perms(user, app_label):
for backend in auth.get_backends():
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class PermissionsMixin(models.Model):
"""
A mixin class that adds the fields and methods necessary to support
Django's Group and Permission model using the ModelBackend.
"""
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'his/her group.'),
related_name="user_set", related_query_name="user")
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text=_('Specific permissions for this user.'),
related_name="user_set", related_query_name="user")
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through their
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, digits and '
'@/./+/-/_ only.'),
validators=[
validators.RegexValidator(r'^[\w.@+-]+$', _('Enter a valid username.'), 'invalid')
])
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
class User(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username, password and email are required. Other fields are optional.
"""
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
@python_2_unicode_compatible
class AnonymousUser(object):
id = None
pk = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager(Group)
_user_permissions = EmptyManager(Permission)
def __init__(self):
pass
def __str__(self):
return 'AnonymousUser'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def delete(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def set_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def check_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
|
harrieshc/autokey | refs/heads/master | src/lib/interface.py | 46 | # -*- coding: utf-8 -*-
# Copyright (C) 2011 Chris Dekter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__all__ = ["XRecordInterface", "AtSpiInterface"]
import os, threading, re, time, socket, select, logging, Queue, subprocess
try:
import pyatspi
HAS_ATSPI = True
except ImportError:
HAS_ATSPI = False
from Xlib import X, XK, display, error
try:
from Xlib.ext import record, xtest
HAS_RECORD = True
except ImportError:
HAS_RECORD = False
from Xlib.protocol import rq, event
import common
if common.USING_QT:
from PyQt4.QtGui import QClipboard, QApplication
else:
from gi.repository import Gtk, Gdk
logger = logging.getLogger("interface")
MASK_INDEXES = [
(X.ShiftMapIndex, X.ShiftMask),
(X.ControlMapIndex, X.ControlMask),
(X.LockMapIndex, X.LockMask),
(X.Mod1MapIndex, X.Mod1Mask),
(X.Mod2MapIndex, X.Mod2Mask),
(X.Mod3MapIndex, X.Mod3Mask),
(X.Mod4MapIndex, X.Mod4Mask),
(X.Mod5MapIndex, X.Mod5Mask),
]
CAPSLOCK_LEDMASK = 1<<0
NUMLOCK_LEDMASK = 1<<1
class XInterfaceBase(threading.Thread):
"""
Encapsulates the common functionality for the two X interface classes.
"""
def __init__(self, mediator, app):
threading.Thread.__init__(self)
self.setDaemon(True)
self.setName("XInterface-thread")
self.mediator = mediator
self.app = app
self.lastChars = [] # QT4 Workaround
self.__enableQT4Workaround = False # QT4 Workaround
self.shutdown = False
# Event loop
self.eventThread = threading.Thread(target=self.__eventLoop)
self.queue = Queue.Queue()
# Event listener
self.listenerThread = threading.Thread(target=self.__flushEvents)
if common.USING_QT:
self.clipBoard = QApplication.clipboard()
else:
self.clipBoard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.selection = Gtk.Clipboard.get(Gdk.SELECTION_PRIMARY)
self.__initMappings()
# Set initial lock state
ledMask = self.localDisplay.get_keyboard_control().led_mask
mediator.set_modifier_state(Key.CAPSLOCK, (ledMask & CAPSLOCK_LEDMASK) != 0)
mediator.set_modifier_state(Key.NUMLOCK, (ledMask & NUMLOCK_LEDMASK) != 0)
# Window name atoms
self.__NameAtom = self.localDisplay.intern_atom("_NET_WM_NAME", True)
self.__VisibleNameAtom = self.localDisplay.intern_atom("_NET_WM_VISIBLE_NAME", True)
if not common.USING_QT:
self.keyMap = Gdk.Keymap.get_default()
self.keyMap.connect("keys-changed", self.on_keys_changed)
self.__ignoreRemap = False
self.eventThread.start()
self.listenerThread.start()
def __eventLoop(self):
while True:
method, args = self.queue.get()
if method is None and args is None:
break
try:
method(*args)
except Exception, e:
logger.exception("Error in X event loop thread")
self.queue.task_done()
def __enqueue(self, method, *args):
self.queue.put_nowait((method, args))
def on_keys_changed(self, data=None):
if not self.__ignoreRemap:
logger.debug("Recorded keymap change event")
self.__ignoreRemap = True
time.sleep(0.2)
self.__enqueue(self.__ungrabAllHotkeys)
self.__enqueue(self.__delayedInitMappings)
else:
logger.debug("Ignored keymap change event")
def __delayedInitMappings(self):
self.__initMappings()
self.__ignoreRemap = False
def __initMappings(self):
self.localDisplay = display.Display()
self.rootWindow = self.localDisplay.screen().root
self.rootWindow.change_attributes(event_mask=X.SubstructureNotifyMask|X.StructureNotifyMask)
altList = self.localDisplay.keysym_to_keycodes(XK.XK_ISO_Level3_Shift)
self.__usableOffsets = (0, 1)
for code, offset in altList:
if code == 108 and offset == 0:
self.__usableOffsets += (4, 5)
logger.debug("Enabling sending using Alt-Grid")
break
# Build modifier mask mapping
self.modMasks = {}
mapping = self.localDisplay.get_modifier_mapping()
for keySym, ak in XK_TO_AK_MAP.iteritems():
if ak in MODIFIERS:
keyCodeList = self.localDisplay.keysym_to_keycodes(keySym)
found = False
for keyCode, lvl in keyCodeList:
for index, mask in MASK_INDEXES:
if keyCode in mapping[index]:
self.modMasks[ak] = mask
found = True
break
if found: break
logger.debug("Modifier masks: %r", self.modMasks)
self.__grabHotkeys()
self.localDisplay.flush()
# --- get list of keycodes that are unused in the current keyboard mapping
keyCode = 8
avail = []
for keyCodeMapping in self.localDisplay.get_keyboard_mapping(keyCode, 200):
codeAvail = True
for offset in keyCodeMapping:
if offset != 0:
codeAvail = False
break
if codeAvail:
avail.append(keyCode)
keyCode += 1
self.__availableKeycodes = avail
self.remappedChars = {}
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
self.keymap_test()
def keymap_test(self):
code = self.localDisplay.keycode_to_keysym(108, 0)
for attr in XK.__dict__.iteritems():
if attr[0].startswith("XK"):
if attr[1] == code:
logger.debug("Alt-Grid: %s, %s", attr[0], attr[1])
logger.debug(repr(self.localDisplay.keysym_to_keycodes(XK.XK_ISO_Level3_Shift)))
logger.debug("X Server Keymap")
for char in "\\|`1234567890-=~!@#$%^&*()qwertyuiop[]asdfghjkl;'zxcvbnm,./QWERTYUIOP{}ASDFGHJKL:\"ZXCVBNM<>?":
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
if len(keyCodeList) > 0:
logger.debug("[%s] : %s", char, keyCodeList)
else:
logger.debug("No mapping for [%s]", char)
def __needsMutterWorkaround(self, item):
if Key.SUPER not in item.modifiers:
return False
try:
output = subprocess.check_output(["ps", "-eo", "command"])
lines = output.splitlines()
for line in lines:
if "gnome-shell" in line or "cinnamon" in line or "unity" in line:
return True
except:
pass # since this is just a nasty workaround, if anything goes wrong just disable it
return False
def __grabHotkeys(self):
"""
Run during startup to grab global and specific hotkeys in all open windows
"""
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
# Grab global hotkeys in root window
for item in c.globalHotkeys:
if item.enabled:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
# Grab hotkeys without a filter in root window
for item in hotkeys:
if item.get_applicable_regex() is None:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
self.__enqueue(self.__recurseTree, self.rootWindow, hotkeys)
def __recurseTree(self, parent, hotkeys):
# Grab matching hotkeys in all open child windows
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
try:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
if title or klass:
for item in hotkeys:
if item.get_applicable_regex() is not None and item._should_trigger_window_title((title, klass)):
self.__grabHotkey(item.hotKey, item.modifiers, window)
self.__grabRecurse(item, window, False)
self.__enqueue(self.__recurseTree, window, hotkeys)
except:
logger.exception("grab on window failed")
def __ungrabAllHotkeys(self):
"""
Ungrab all hotkeys in preparation for keymap change
"""
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
# Ungrab global hotkeys in root window, recursively
for item in c.globalHotkeys:
if item.enabled:
self.__ungrabHotkey(item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__ungrabRecurse(item, self.rootWindow, False)
# Ungrab hotkeys without a filter in root window, recursively
for item in hotkeys:
if item.get_applicable_regex() is None:
self.__ungrabHotkey(item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__ungrabRecurse(item, self.rootWindow, False)
self.__recurseTreeUngrab(self.rootWindow, hotkeys)
def __recurseTreeUngrab(self, parent, hotkeys):
# Ungrab matching hotkeys in all open child windows
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
try:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
if title or klass:
for item in hotkeys:
if item.get_applicable_regex() is not None and item._should_trigger_window_title((title, klass)):
self.__ungrabHotkey(item.hotKey, item.modifiers, window)
self.__ungrabRecurse(item, window, False)
self.__enqueue(self.__recurseTreeUngrab, window, hotkeys)
except:
logger.exception("ungrab on window failed")
def __grabHotkeysForWindow(self, window):
"""
Grab all hotkeys relevant to the window
Used when a new window is created
"""
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
title = self.get_window_title(window)
klass = self.get_window_class(window)
for item in hotkeys:
if item.get_applicable_regex() is not None and item._should_trigger_window_title((title, klass)):
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, window)
elif self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, window)
def __grabHotkey(self, key, modifiers, window):
"""
Grab a specific hotkey in the given window
"""
logger.debug("Grabbing hotkey: %r %r", modifiers, key)
try:
keycode = self.__lookupKeyCode(key)
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
window.grab_key(keycode, mask, True, X.GrabModeAsync, X.GrabModeAsync)
if Key.NUMLOCK in self.modMasks:
window.grab_key(keycode, mask|self.modMasks[Key.NUMLOCK], True, X.GrabModeAsync, X.GrabModeAsync)
if Key.CAPSLOCK in self.modMasks:
window.grab_key(keycode, mask|self.modMasks[Key.CAPSLOCK], True, X.GrabModeAsync, X.GrabModeAsync)
if Key.CAPSLOCK in self.modMasks and Key.NUMLOCK in self.modMasks:
window.grab_key(keycode, mask|self.modMasks[Key.CAPSLOCK]|self.modMasks[Key.NUMLOCK], True, X.GrabModeAsync, X.GrabModeAsync)
except Exception, e:
logger.warn("Failed to grab hotkey %r %r: %s", modifiers, key, str(e))
def grab_hotkey(self, item):
"""
Grab a hotkey.
If the hotkey has no filter regex, it is global and is grabbed recursively from the root window
If it has a filter regex, iterate over all children of the root and grab from matching windows
"""
if item.get_applicable_regex() is None:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
else:
self.__enqueue(self.__grabRecurse, item, self.rootWindow)
def __grabRecurse(self, item, parent, checkWinInfo=True):
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
shouldTrigger = False
if checkWinInfo:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
shouldTrigger = item._should_trigger_window_title((title, klass))
if shouldTrigger or not checkWinInfo:
self.__grabHotkey(item.hotKey, item.modifiers, window)
self.__grabRecurse(item, window, False)
else:
self.__grabRecurse(item, window)
def ungrab_hotkey(self, item):
"""
Ungrab a hotkey.
If the hotkey has no filter regex, it is global and is grabbed recursively from the root window
If it has a filter regex, iterate over all children of the root and ungrab from matching windows
"""
import copy
newItem = copy.copy(item)
if item.get_applicable_regex() is None:
self.__enqueue(self.__ungrabHotkey, newItem.hotKey, newItem.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__ungrabRecurse, newItem, self.rootWindow, False)
else:
self.__enqueue(self.__ungrabRecurse, newItem, self.rootWindow)
def __ungrabRecurse(self, item, parent, checkWinInfo=True):
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
shouldTrigger = False
if checkWinInfo:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
shouldTrigger = item._should_trigger_window_title((title, klass))
if shouldTrigger or not checkWinInfo:
self.__ungrabHotkey(item.hotKey, item.modifiers, window)
self.__ungrabRecurse(item, window, False)
else:
self.__ungrabRecurse(item, window)
def __ungrabHotkey(self, key, modifiers, window):
"""
Ungrab a specific hotkey in the given window
"""
logger.debug("Ungrabbing hotkey: %r %r", modifiers, key)
try:
keycode = self.__lookupKeyCode(key)
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
window.ungrab_key(keycode, mask)
if Key.NUMLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.NUMLOCK])
if Key.CAPSLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.CAPSLOCK])
if Key.CAPSLOCK in self.modMasks and Key.NUMLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.CAPSLOCK]|self.modMasks[Key.NUMLOCK])
except Exception, e:
logger.warn("Failed to ungrab hotkey %r %r: %s", modifiers, key, str(e))
def lookup_string(self, keyCode, shifted, numlock, altGrid):
if keyCode == 0:
return "<unknown>"
keySym = self.localDisplay.keycode_to_keysym(keyCode, 0)
if keySym in XK_TO_AK_NUMLOCKED and numlock and not (numlock and shifted):
return XK_TO_AK_NUMLOCKED[keySym]
elif keySym in XK_TO_AK_MAP:
return XK_TO_AK_MAP[keySym]
else:
try:
index = 0
if shifted: index += 1
if altGrid: index += 4
return unichr(self.localDisplay.keycode_to_keysym(keyCode, index))
except ValueError:
return "<code%d>" % keyCode
def send_string_clipboard(self, string, pasteCommand):
self.__enqueue(self.__sendStringClipboard, string, pasteCommand)
def __sendStringClipboard(self, string, pasteCommand):
logger.debug("Sending string: %r", string)
if pasteCommand is None:
if common.USING_QT:
self.sem = threading.Semaphore(0)
self.app.exec_in_main(self.__fillSelection, string)
self.sem.acquire()
else:
self.__fillSelection(string)
focus = self.localDisplay.get_input_focus().focus
xtest.fake_input(focus, X.ButtonPress, X.Button2)
xtest.fake_input(focus, X.ButtonRelease, X.Button2)
else:
if common.USING_QT:
self.sem = threading.Semaphore(0)
self.app.exec_in_main(self.__fillClipboard, string)
self.sem.acquire()
else:
self.__fillClipboard(string)
self.mediator.send_string(pasteCommand)
if common.USING_QT:
self.app.exec_in_main(self.__restoreClipboard)
logger.debug("Send via clipboard done")
def __restoreClipboard(self):
if self.__savedClipboard != "":
if common.USING_QT:
self.clipBoard.setText(self.__savedClipboard, QClipboard.Clipboard)
else:
Gdk.threads_enter()
self.clipBoard.set_text(self.__savedClipboard)
Gdk.threads_leave()
def __fillSelection(self, string):
if common.USING_QT:
self.clipBoard.setText(string, QClipboard.Selection)
self.sem.release()
else:
Gdk.threads_enter()
self.selection.set_text(string.encode("utf-8"))
Gdk.threads_leave()
def __fillClipboard(self, string):
if common.USING_QT:
self.__savedClipboard = self.clipBoard.text()
self.clipBoard.setText(string, QClipboard.Clipboard)
self.sem.release()
else:
Gdk.threads_enter()
text = self.clipBoard.wait_for_text()
self.__savedClipboard = ''
if text is not None: self.__savedClipboard = text
self.clipBoard.set_text(string.encode("utf-8"))
Gdk.threads_leave()
def begin_send(self):
self.__enqueue(self.__grab_keyboard)
def finish_send(self):
self.__enqueue(self.__ungrabKeyboard)
def grab_keyboard(self):
self.__enqueue(self.__grab_keyboard)
def __grab_keyboard(self):
focus = self.localDisplay.get_input_focus().focus
focus.grab_keyboard(True, X.GrabModeAsync, X.GrabModeAsync, X.CurrentTime)
self.localDisplay.flush()
def ungrab_keyboard(self):
self.__enqueue(self.__ungrabKeyboard)
def __ungrabKeyboard(self):
self.localDisplay.ungrab_keyboard(X.CurrentTime)
self.localDisplay.flush()
def __findUsableKeycode(self, codeList):
for code, offset in codeList:
if offset in self.__usableOffsets:
return code, offset
return None, None
def send_string(self, string):
self.__enqueue(self.__sendString, string)
def __sendString(self, string):
"""
Send a string of printable characters.
"""
logger.debug("Sending string: %r", string)
# Determine if workaround is needed
if not ConfigManager.SETTINGS[ENABLE_QT4_WORKAROUND]:
self.__checkWorkaroundNeeded()
# First find out if any chars need remapping
remapNeeded = False
for char in string:
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
usableCode, offset = self.__findUsableKeycode(keyCodeList)
if usableCode is None and char not in self.remappedChars:
remapNeeded = True
break
# Now we know chars need remapping, do it
if remapNeeded:
self.__ignoreRemap = True
self.remappedChars = {}
remapChars = []
for char in string:
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
usableCode, offset = self.__findUsableKeycode(keyCodeList)
if usableCode is None:
remapChars.append(char)
logger.debug("Characters requiring remapping: %r", remapChars)
availCodes = self.__availableKeycodes
logger.debug("Remapping with keycodes in the range: %r", availCodes)
mapping = self.localDisplay.get_keyboard_mapping(8, 200)
firstCode = 8
for i in xrange(len(availCodes) - 1):
code = availCodes[i]
sym1 = 0
sym2 = 0
if len(remapChars) > 0:
char = remapChars.pop(0)
self.remappedChars[char] = (code, 0)
sym1 = ord(char)
if len(remapChars) > 0:
char = remapChars.pop(0)
self.remappedChars[char] = (code, 1)
sym2 = ord(char)
if sym1 != 0:
mapping[code - firstCode][0] = sym1
mapping[code - firstCode][1] = sym2
mapping = [tuple(l) for l in mapping]
self.localDisplay.change_keyboard_mapping(firstCode, mapping)
self.localDisplay.flush()
focus = self.localDisplay.get_input_focus().focus
for char in string:
try:
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
keyCode, offset = self.__findUsableKeycode(keyCodeList)
if keyCode is not None:
if offset == 0:
self.__sendKeyCode(keyCode, theWindow=focus)
if offset == 1:
self.__pressKey(Key.SHIFT)
self.__sendKeyCode(keyCode, self.modMasks[Key.SHIFT], focus)
self.__releaseKey(Key.SHIFT)
if offset == 4:
self.__pressKey(Key.ALT_GR)
self.__sendKeyCode(keyCode, self.modMasks[Key.ALT_GR], focus)
self.__releaseKey(Key.ALT_GR)
if offset == 5:
self.__pressKey(Key.ALT_GR)
self.__pressKey(Key.SHIFT)
self.__sendKeyCode(keyCode, self.modMasks[Key.ALT_GR]|self.modMasks[Key.SHIFT], focus)
self.__releaseKey(Key.SHIFT)
self.__releaseKey(Key.ALT_GR)
elif char in self.remappedChars:
keyCode, offset = self.remappedChars[char]
if offset == 0:
self.__sendKeyCode(keyCode, theWindow=focus)
if offset == 1:
self.__pressKey(Key.SHIFT)
self.__sendKeyCode(keyCode, self.modMasks[Key.SHIFT], focus)
self.__releaseKey(Key.SHIFT)
else:
logger.warn("Unable to send character %r", char)
except Exception, e:
logger.exception("Error sending char %r: %s", char, str(e))
self.__ignoreRemap = False
def send_key(self, keyName):
"""
Send a specific non-printing key, eg Up, Left, etc
"""
self.__enqueue(self.__sendKey, keyName)
def __sendKey(self, keyName):
logger.debug("Send special key: [%r]", keyName)
self.__sendKeyCode(self.__lookupKeyCode(keyName))
def fake_keypress(self, keyName):
self.__enqueue(self.__fakeKeypress, keyName)
def __fakeKeypress(self, keyName):
keyCode = self.__lookupKeyCode(keyName)
xtest.fake_input(self.rootWindow, X.KeyPress, keyCode)
xtest.fake_input(self.rootWindow, X.KeyRelease, keyCode)
def fake_keydown(self, keyName):
self.__enqueue(self.__fakeKeydown, keyName)
def __fakeKeydown(self, keyName):
keyCode = self.__lookupKeyCode(keyName)
xtest.fake_input(self.rootWindow, X.KeyPress, keyCode)
def fake_keyup(self, keyName):
self.__enqueue(self.__fakeKeyup, keyName)
def __fakeKeyup(self, keyName):
keyCode = self.__lookupKeyCode(keyName)
xtest.fake_input(self.rootWindow, X.KeyRelease, keyCode)
def send_modified_key(self, keyName, modifiers):
"""
Send a modified key (e.g. when emulating a hotkey)
"""
self.__enqueue(self.__sendModifiedKey, keyName, modifiers)
def __sendModifiedKey(self, keyName, modifiers):
logger.debug("Send modified key: modifiers: %s key: %s", modifiers, keyName)
try:
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
keyCode = self.__lookupKeyCode(keyName)
for mod in modifiers: self.__pressKey(mod)
self.__sendKeyCode(keyCode, mask)
for mod in modifiers: self.__releaseKey(mod)
except Exception, e:
logger.warn("Error sending modified key %r %r: %s", modifiers, keyName, str(e))
def send_mouse_click(self, xCoord, yCoord, button, relative):
self.__enqueue(self.__sendMouseClick, xCoord, yCoord, button, relative)
def __sendMouseClick(self, xCoord, yCoord, button, relative):
# Get current pointer position so we can return it there
pos = self.rootWindow.query_pointer()
if relative:
focus = self.localDisplay.get_input_focus().focus
focus.warp_pointer(xCoord, yCoord)
xtest.fake_input(focus, X.ButtonPress, button, x=xCoord, y=yCoord)
xtest.fake_input(focus, X.ButtonRelease, button, x=xCoord, y=yCoord)
else:
self.rootWindow.warp_pointer(xCoord, yCoord)
xtest.fake_input(self.rootWindow, X.ButtonPress, button, x=xCoord, y=yCoord)
xtest.fake_input(self.rootWindow, X.ButtonRelease, button, x=xCoord, y=yCoord)
self.rootWindow.warp_pointer(pos.root_x, pos.root_y)
self.__flush()
def send_mouse_click_relative(self, xoff, yoff, button):
self.__enqueue(self.__sendMouseClickRelative, xoff, yoff, button)
def __sendMouseClickRelative(self, xoff, yoff, button):
# Get current pointer position
pos = self.rootWindow.query_pointer()
xCoord = pos.root_x + xoff
yCoord = pos.root_y + yoff
self.rootWindow.warp_pointer(xCoord, yCoord)
xtest.fake_input(self.rootWindow, X.ButtonPress, button, x=xCoord, y=yCoord)
xtest.fake_input(self.rootWindow, X.ButtonRelease, button, x=xCoord, y=yCoord)
self.rootWindow.warp_pointer(pos.root_x, pos.root_y)
self.__flush()
def flush(self):
self.__enqueue(self.__flush)
def __flush(self):
self.localDisplay.flush()
self.lastChars = []
def press_key(self, keyName):
self.__enqueue(self.__pressKey, keyName)
def __pressKey(self, keyName):
self.__sendKeyPressEvent(self.__lookupKeyCode(keyName), 0)
def release_key(self, keyName):
self.__enqueue(self.__releaseKey, keyName)
def __releaseKey(self, keyName):
self.__sendKeyReleaseEvent(self.__lookupKeyCode(keyName), 0)
def __flushEvents(self):
while True:
try:
readable, w, e = select.select([self.localDisplay], [], [], 1)
time.sleep(1)
if self.localDisplay in readable:
createdWindows = []
destroyedWindows = []
for x in xrange(self.localDisplay.pending_events()):
event = self.localDisplay.next_event()
if event.type == X.CreateNotify:
createdWindows.append(event.window)
if event.type == X.DestroyNotify:
destroyedWindows.append(event.window)
for window in createdWindows:
if window not in destroyedWindows:
self.__enqueue(self.__grabHotkeysForWindow, window)
if self.shutdown:
break
except:
pass
def handle_keypress(self, keyCode):
self.__enqueue(self.__handleKeyPress, keyCode)
def __handleKeyPress(self, keyCode):
focus = self.localDisplay.get_input_focus().focus
modifier = self.__decodeModifier(keyCode)
if modifier is not None:
self.mediator.handle_modifier_down(modifier)
else:
self.mediator.handle_keypress(keyCode, self.get_window_title(focus), self.get_window_class(focus))
def handle_keyrelease(self, keyCode):
self.__enqueue(self.__handleKeyrelease, keyCode)
def __handleKeyrelease(self, keyCode):
modifier = self.__decodeModifier(keyCode)
if modifier is not None:
self.mediator.handle_modifier_up(modifier)
def handle_mouseclick(self, button, x, y):
self.__enqueue(self.__handleMouseclick, button, x, y)
def __handleMouseclick(self, button, x, y):
title = self.get_window_title()
klass = self.get_window_class()
info = (title, klass)
if x is None and y is None:
ret = self.localDisplay.get_input_focus().focus.query_pointer()
self.mediator.handle_mouse_click(ret.root_x, ret.root_y, ret.win_x, ret.win_y, button, info)
else:
focus = self.localDisplay.get_input_focus().focus
try:
rel = focus.translate_coords(self.rootWindow, x, y)
self.mediator.handle_mouse_click(x, y, rel.x, rel.y, button, info)
except:
self.mediator.handle_mouse_click(x, y, 0, 0, button, info)
def __decodeModifier(self, keyCode):
"""
Checks if the given keyCode is a modifier key. If it is, returns the modifier name
constant as defined in the iomediator module. If not, returns C{None}
"""
keyName = self.lookup_string(keyCode, False, False, False)
if keyName in MODIFIERS:
return keyName
return None
def __sendKeyCode(self, keyCode, modifiers=0, theWindow=None):
if ConfigManager.SETTINGS[ENABLE_QT4_WORKAROUND] or self.__enableQT4Workaround:
self.__doQT4Workaround(keyCode)
self.__sendKeyPressEvent(keyCode, modifiers, theWindow)
self.__sendKeyReleaseEvent(keyCode, modifiers, theWindow)
def __checkWorkaroundNeeded(self):
focus = self.localDisplay.get_input_focus().focus
windowName = self.get_window_title(focus)
windowClass = self.get_window_class(focus)
w = self.app.configManager.workAroundApps
if w.match(windowName) or w.match(windowClass):
self.__enableQT4Workaround = True
else:
self.__enableQT4Workaround = False
def __doQT4Workaround(self, keyCode):
if len(self.lastChars) > 0:
if keyCode in self.lastChars:
self.localDisplay.flush()
time.sleep(0.0125)
self.lastChars.append(keyCode)
if len(self.lastChars) > 10:
self.lastChars.pop(0)
def __sendKeyPressEvent(self, keyCode, modifiers, theWindow=None):
if theWindow is None:
focus = self.localDisplay.get_input_focus().focus
else:
focus = theWindow
keyEvent = event.KeyPress(
detail=keyCode,
time=X.CurrentTime,
root=self.rootWindow,
window=focus,
child=X.NONE,
root_x=1,
root_y=1,
event_x=1,
event_y=1,
state=modifiers,
same_screen=1
)
focus.send_event(keyEvent)
def __sendKeyReleaseEvent(self, keyCode, modifiers, theWindow=None):
if theWindow is None:
focus = self.localDisplay.get_input_focus().focus
else:
focus = theWindow
keyEvent = event.KeyRelease(
detail=keyCode,
time=X.CurrentTime,
root=self.rootWindow,
window=focus,
child=X.NONE,
root_x=1,
root_y=1,
event_x=1,
event_y=1,
state=modifiers,
same_screen=1
)
focus.send_event(keyEvent)
def __lookupKeyCode(self, char):
if char in AK_TO_XK_MAP:
return self.localDisplay.keysym_to_keycode(AK_TO_XK_MAP[char])
elif char.startswith("<code"):
return int(char[5:-1])
else:
try:
return self.localDisplay.keysym_to_keycode(ord(char))
except Exception, e:
logger.error("Unknown key name: %s", char)
raise
def get_window_title(self, window=None, traverse=True):
try:
if window is None:
windowvar = self.localDisplay.get_input_focus().focus
else:
windowvar = window
return self.__getWinTitle(windowvar, traverse)
except:
return ""
def __getWinTitle(self, windowvar, traverse):
atom = windowvar.get_property(self.__VisibleNameAtom, 0, 0, 255)
if atom is None:
atom = windowvar.get_property(self.__NameAtom, 0, 0, 255)
if atom:
return atom.value.decode("utf-8")
elif traverse:
return self.__getWinTitle(windowvar.query_tree().parent, True)
else:
return ""
def get_window_class(self, window=None, traverse=True):
try:
if window is None:
windowvar = self.localDisplay.get_input_focus().focus
else:
windowvar = window
return self.__getWinClass(windowvar, traverse)
except:
return ""
def __getWinClass(self, windowvar, traverse):
wmclass = windowvar.get_wm_class()
if (wmclass == None or wmclass == ""):
if traverse:
return self.__getWinClass(windowvar.query_tree().parent, True)
else:
return ""
return wmclass[0] + '.' + wmclass[1]
def cancel(self):
self.queue.put_nowait((None, None))
self.shutdown = True
self.listenerThread.join()
self.eventThread.join()
self.localDisplay.flush()
self.localDisplay.close()
self.join()
class XRecordInterface(XInterfaceBase):
def initialise(self):
self.recordDisplay = display.Display()
self.__locksChecked = False
# Check for record extension
if not self.recordDisplay.has_extension("RECORD"):
raise Exception("Your X-Server does not have the RECORD extension available/enabled.")
def run(self):
# Create a recording context; we only want key and mouse events
self.ctx = self.recordDisplay.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (X.KeyPress, X.ButtonPress), #X.KeyRelease,
'errors': (0, 0),
'client_started': False,
'client_died': False,
}])
# Enable the context; this only returns after a call to record_disable_context,
# while calling the callback function in the meantime
logger.info("XRecord interface thread starting")
self.recordDisplay.record_enable_context(self.ctx, self.__processEvent)
# Finally free the context
self.recordDisplay.record_free_context(self.ctx)
self.recordDisplay.close()
def cancel(self):
self.localDisplay.record_disable_context(self.ctx)
XInterfaceBase.cancel(self)
def __processEvent(self, reply):
if reply.category != record.FromServer:
return
if reply.client_swapped:
return
if not len(reply.data) or ord(reply.data[0]) < 2:
# not an event
return
data = reply.data
while len(data):
event, data = rq.EventField(None).parse_binary_value(data, self.recordDisplay.display, None, None)
if event.type == X.KeyPress:
self.handle_keypress(event.detail)
elif event.type == X.KeyRelease:
self.handle_keyrelease(event.detail)
elif event.type == X.ButtonPress:
self.handle_mouseclick(event.detail, event.root_x, event.root_y)
class AtSpiInterface(XInterfaceBase):
def initialise(self):
self.registry = pyatspi.Registry
def start(self):
logger.info("AT-SPI interface thread starting")
self.registry.registerKeystrokeListener(self.__processKeyEvent, mask=pyatspi.allModifiers())
self.registry.registerEventListener(self.__processMouseEvent, 'mouse:button')
def cancel(self):
self.registry.deregisterKeystrokeListener(self.__processKeyEvent, mask=pyatspi.allModifiers())
self.registry.deregisterEventListener(self.__processMouseEvent, 'mouse:button')
self.registry.stop()
XInterfaceBase.cancel(self)
def __processKeyEvent(self, event):
if event.type == pyatspi.KEY_PRESSED_EVENT:
self.handle_keypress(event.hw_code)
else:
self.handle_keyrelease(event.hw_code)
def __processMouseEvent(self, event):
if event.type[-1] == 'p':
button = int(event.type[-2])
self.handle_mouseclick(button, event.detail1, event.detail2)
def __pumpEvents(self):
pyatspi.Registry.pumpQueuedEvents()
return True
from iomediator import Key, MODIFIERS
from configmanager import *
XK.load_keysym_group('xkb')
XK_TO_AK_MAP = {
XK.XK_Shift_L : Key.SHIFT,
XK.XK_Shift_R : Key.SHIFT,
XK.XK_Caps_Lock : Key.CAPSLOCK,
XK.XK_Control_L : Key.CONTROL,
XK.XK_Control_R : Key.CONTROL,
XK.XK_Alt_L : Key.ALT,
XK.XK_Alt_R : Key.ALT,
XK.XK_ISO_Level3_Shift : Key.ALT_GR,
XK.XK_Super_L : Key.SUPER,
XK.XK_Super_R : Key.SUPER,
XK.XK_Hyper_L : Key.HYPER,
XK.XK_Hyper_R : Key.HYPER,
XK.XK_Meta_L : Key.META,
XK.XK_Meta_R : Key.META,
XK.XK_Num_Lock : Key.NUMLOCK,
#SPACE : Key.SPACE,
XK.XK_Tab : Key.TAB,
XK.XK_Left : Key.LEFT,
XK.XK_Right : Key.RIGHT,
XK.XK_Up : Key.UP,
XK.XK_Down : Key.DOWN,
XK.XK_Return : Key.ENTER,
XK.XK_BackSpace : Key.BACKSPACE,
XK.XK_Scroll_Lock : Key.SCROLL_LOCK,
XK.XK_Print : Key.PRINT_SCREEN,
XK.XK_Pause : Key.PAUSE,
XK.XK_Menu : Key.MENU,
XK.XK_F1 : Key.F1,
XK.XK_F2 : Key.F2,
XK.XK_F3 : Key.F3,
XK.XK_F4 : Key.F4,
XK.XK_F5 : Key.F5,
XK.XK_F6 : Key.F6,
XK.XK_F7 : Key.F7,
XK.XK_F8 : Key.F8,
XK.XK_F9 : Key.F9,
XK.XK_F10 : Key.F10,
XK.XK_F11 : Key.F11,
XK.XK_F12 : Key.F12,
XK.XK_Escape : Key.ESCAPE,
XK.XK_Insert : Key.INSERT,
XK.XK_Delete : Key.DELETE,
XK.XK_Home : Key.HOME,
XK.XK_End : Key.END,
XK.XK_Page_Up : Key.PAGE_UP,
XK.XK_Page_Down : Key.PAGE_DOWN,
XK.XK_KP_Insert : Key.NP_INSERT,
XK.XK_KP_Delete : Key.NP_DELETE,
XK.XK_KP_End : Key.NP_END,
XK.XK_KP_Down : Key.NP_DOWN,
XK.XK_KP_Page_Down : Key.NP_PAGE_DOWN,
XK.XK_KP_Left : Key.NP_LEFT,
XK.XK_KP_Begin : Key.NP_5,
XK.XK_KP_Right : Key.NP_RIGHT,
XK.XK_KP_Home : Key.NP_HOME,
XK.XK_KP_Up: Key.NP_UP,
XK.XK_KP_Page_Up : Key.NP_PAGE_UP,
XK.XK_KP_Divide : Key.NP_DIVIDE,
XK.XK_KP_Multiply : Key.NP_MULTIPLY,
XK.XK_KP_Add : Key.NP_ADD,
XK.XK_KP_Subtract : Key.NP_SUBTRACT,
XK.XK_KP_Enter : Key.ENTER,
XK.XK_space : ' '
}
AK_TO_XK_MAP = dict((v,k) for k, v in XK_TO_AK_MAP.iteritems())
XK_TO_AK_NUMLOCKED = {
XK.XK_KP_Insert : "0",
XK.XK_KP_Delete : ".",
XK.XK_KP_End : "1",
XK.XK_KP_Down : "2",
XK.XK_KP_Page_Down : "3",
XK.XK_KP_Left : "4",
XK.XK_KP_Begin : "5",
XK.XK_KP_Right : "6",
XK.XK_KP_Home : "7",
XK.XK_KP_Up: "8",
XK.XK_KP_Page_Up : "9",
XK.XK_KP_Divide : "/",
XK.XK_KP_Multiply : "*",
XK.XK_KP_Add : "+",
XK.XK_KP_Subtract : "-",
XK.XK_KP_Enter : Key.ENTER
}
class MockMediator:
"""
Mock IoMediator for testing purposes.
"""
def handle_modifier_down(self, modifier):
pass
def handle_modifier_up(self, modifier):
pass
def handle_keypress(self, keyCode, windowName):
pass
def handle_mouse_click(self):
pass
if __name__ == "__main__":
import time
x = XLibInterface(MockMediator(), True)
x.start()
x.keymap_test()
time.sleep(10.0)
#time.sleep(4.0)
#x.send_unicode_key([0, 3, 9, 4])
x.cancel()
print "Test completed. Thank you for your assistance in improving AutoKey!"
|
aniruddhkanojia/qtile | refs/heads/develop | libqtile/notify.py | 12 | # Copyright (c) 2010 dequis
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2013 Mickael FALCK
# Copyright (c) 2013 Tao Sauvage
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
If dbus is available, this module implements a
org.freedesktop.Notifications service.
"""
import logging
try:
import dbus
from dbus import service
from dbus.mainloop.glib import DBusGMainLoop
except ImportError:
dbus = None
BUS_NAME = 'org.freedesktop.Notifications'
SERVICE_PATH = '/org/freedesktop/Notifications'
if dbus:
class NotificationService(service.Object):
def __init__(self, manager):
bus_name = service.BusName(BUS_NAME, bus=dbus.SessionBus())
service.Object.__init__(self, bus_name, SERVICE_PATH)
self.manager = manager
@service.method(BUS_NAME, in_signature='', out_signature='as')
def GetCapabilities(self):
return ('body')
@service.method(
BUS_NAME, in_signature='susssasa{sv}i', out_signature='u'
)
def Notify(self, app_name, replaces_id, app_icon, summary,
body, actions, hints, timeout):
notif = Notification(summary, body, timeout, hints)
return self.manager.add(notif)
@service.method(BUS_NAME, in_signature='u', out_signature='')
def CloseNotification(self, id):
pass
@service.signal(BUS_NAME, signature='uu')
def NotificationClosed(self, id_in, reason_in):
pass
@service.method(BUS_NAME, in_signature='', out_signature='ssss')
def GetServerInformation(self):
return ("qtile-notify-daemon", "qtile", "1.0", "1")
class Notification(object):
def __init__(self, summary, body='', timeout=-1, hints=None):
self.summary = summary
self.hints = hints or {}
self.body = body
self.timeout = timeout
class NotificationManager(object):
def __init__(self):
self.notifications = []
self.callbacks = []
self._service = None
@property
def service(self):
if dbus and self._service is None:
try:
DBusGMainLoop(set_as_default=True)
self._service = NotificationService(self)
except Exception:
logging.getLogger('qtile').exception('Dbus connection failed')
self._service = None
return self._service
def register(self, callback):
if not self.service:
logging.getLogger('qtile').warning(
'Registering %s without any dbus connection existing',
callback.__name__,
)
self.callbacks.append(callback)
def add(self, notif):
self.notifications.append(notif)
notif.id = len(self.notifications)
for callback in self.callbacks:
callback(notif)
return len(self.notifications)
def show(self, *args, **kwargs):
notif = Notification(*args, **kwargs)
return (notif, self.add(notif))
notifier = NotificationManager()
|
colemanja91/PyEloqua-Examples | refs/heads/master | venv/lib/python3.4/site-packages/pip/_vendor/html5lib/treebuilders/_base.py | 915 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from ..constants import scopingElements, tableInsertModeElements, namespaces
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
listElementsMap = {
None: (frozenset(scopingElements), False),
"button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
"list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")])), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __str__(self):
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in
self.attributes.items()])
if attributesStr:
return "<%s %s>" % (self.name, attributesStr)
else:
return "<%s>" % (self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
# Document class
documentClass = None
# The class to use for creating a node
elementClass = None
# The class to use for creating comments
commentClass = None
# The class to use for creating doctypes
doctypeClass = None
# Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
# XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# If we pass a node in we match that. if we pass a string
# match any node with that name
exactNode = hasattr(target, "nameTuple")
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if (node.name == target and not exactNode or
node == target and exactNode):
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
# This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type": "StartTag",
"name": clone.name,
"namespace": clone.namespace,
"data": clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert isinstance(name, text_type), "Element %s not unicode" % name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt"))
and name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
|
wavesoft/robob | refs/heads/master | robob/component.py | 1 |
class ComponentBase(object):
"""
Global base class for all components with shared context and specs
"""
def __init__(self, context):
"""
Keep context
"""
self.context = context
|
pschmitt/home-assistant | refs/heads/dev | homeassistant/components/evohome/__init__.py | 1 | """Support for (EMEA/EU-based) Honeywell TCC climate systems.
Such systems include evohome, Round Thermostat, and others.
"""
from datetime import datetime as dt, timedelta
import logging
import re
from typing import Any, Dict, Optional, Tuple
import aiohttp.client_exceptions
import evohomeasync
import evohomeasync2
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
HTTP_SERVICE_UNAVAILABLE,
HTTP_TOO_MANY_REQUESTS,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.service import verify_domain_control
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
import homeassistant.util.dt as dt_util
from .const import DOMAIN, EVO_FOLLOW, GWS, STORAGE_KEY, STORAGE_VER, TCS, UTC_OFFSET
_LOGGER = logging.getLogger(__name__)
ACCESS_TOKEN = "access_token"
ACCESS_TOKEN_EXPIRES = "access_token_expires"
REFRESH_TOKEN = "refresh_token"
USER_DATA = "user_data"
CONF_LOCATION_IDX = "location_idx"
SCAN_INTERVAL_DEFAULT = timedelta(seconds=300)
SCAN_INTERVAL_MINIMUM = timedelta(seconds=60)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_LOCATION_IDX, default=0): cv.positive_int,
vol.Optional(
CONF_SCAN_INTERVAL, default=SCAN_INTERVAL_DEFAULT
): vol.All(cv.time_period, vol.Range(min=SCAN_INTERVAL_MINIMUM)),
}
)
},
extra=vol.ALLOW_EXTRA,
)
ATTR_SYSTEM_MODE = "mode"
ATTR_DURATION_DAYS = "period"
ATTR_DURATION_HOURS = "duration"
ATTR_ZONE_TEMP = "setpoint"
ATTR_DURATION_UNTIL = "duration"
SVC_REFRESH_SYSTEM = "refresh_system"
SVC_SET_SYSTEM_MODE = "set_system_mode"
SVC_RESET_SYSTEM = "reset_system"
SVC_SET_ZONE_OVERRIDE = "set_zone_override"
SVC_RESET_ZONE_OVERRIDE = "clear_zone_override"
RESET_ZONE_OVERRIDE_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.entity_id})
SET_ZONE_OVERRIDE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_ZONE_TEMP): vol.All(
vol.Coerce(float), vol.Range(min=4.0, max=35.0)
),
vol.Optional(ATTR_DURATION_UNTIL): vol.All(
cv.time_period, vol.Range(min=timedelta(days=0), max=timedelta(days=1))
),
}
)
# system mode schemas are built dynamically, below
def _dt_local_to_aware(dt_naive: dt) -> dt:
dt_aware = dt_util.now() + (dt_naive - dt.now())
if dt_aware.microsecond >= 500000:
dt_aware += timedelta(seconds=1)
return dt_aware.replace(microsecond=0)
def _dt_aware_to_naive(dt_aware: dt) -> dt:
dt_naive = dt.now() + (dt_aware - dt_util.now())
if dt_naive.microsecond >= 500000:
dt_naive += timedelta(seconds=1)
return dt_naive.replace(microsecond=0)
def convert_until(status_dict: dict, until_key: str) -> None:
"""Reformat a dt str from "%Y-%m-%dT%H:%M:%SZ" as local/aware/isoformat."""
if until_key in status_dict: # only present for certain modes
dt_utc_naive = dt_util.parse_datetime(status_dict[until_key])
status_dict[until_key] = dt_util.as_local(dt_utc_naive).isoformat()
def convert_dict(dictionary: Dict[str, Any]) -> Dict[str, Any]:
"""Recursively convert a dict's keys to snake_case."""
def convert_key(key: str) -> str:
"""Convert a string to snake_case."""
string = re.sub(r"[\-\.\s]", "_", str(key))
return (string[0]).lower() + re.sub(
r"[A-Z]", lambda matched: f"_{matched.group(0).lower()}", string[1:]
)
return {
(convert_key(k) if isinstance(k, str) else k): (
convert_dict(v) if isinstance(v, dict) else v
)
for k, v in dictionary.items()
}
def _handle_exception(err) -> bool:
"""Return False if the exception can't be ignored."""
try:
raise err
except evohomeasync2.AuthenticationError:
_LOGGER.error(
"Failed to authenticate with the vendor's server. "
"Check your network and the vendor's service status page. "
"Also check that your username and password are correct. "
"Message is: %s",
err,
)
return False
except aiohttp.ClientConnectionError:
# this appears to be a common occurrence with the vendor's servers
_LOGGER.warning(
"Unable to connect with the vendor's server. "
"Check your network and the vendor's service status page. "
"Message is: %s",
err,
)
return False
except aiohttp.ClientResponseError:
if err.status == HTTP_SERVICE_UNAVAILABLE:
_LOGGER.warning(
"The vendor says their server is currently unavailable. "
"Check the vendor's service status page"
)
return False
if err.status == HTTP_TOO_MANY_REQUESTS:
_LOGGER.warning(
"The vendor's API rate limit has been exceeded. "
"If this message persists, consider increasing the %s",
CONF_SCAN_INTERVAL,
)
return False
raise # we don't expect/handle any other Exceptions
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Create a (EMEA/EU-based) Honeywell TCC system."""
async def load_auth_tokens(store) -> Tuple[Dict, Optional[Dict]]:
app_storage = await store.async_load()
tokens = dict(app_storage if app_storage else {})
if tokens.pop(CONF_USERNAME, None) != config[DOMAIN][CONF_USERNAME]:
# any tokens won't be valid, and store might be be corrupt
await store.async_save({})
return ({}, None)
# evohomeasync2 requires naive/local datetimes as strings
if tokens.get(ACCESS_TOKEN_EXPIRES) is not None:
tokens[ACCESS_TOKEN_EXPIRES] = _dt_aware_to_naive(
dt_util.parse_datetime(tokens[ACCESS_TOKEN_EXPIRES])
)
user_data = tokens.pop(USER_DATA, None)
return (tokens, user_data)
store = hass.helpers.storage.Store(STORAGE_VER, STORAGE_KEY)
tokens, user_data = await load_auth_tokens(store)
client_v2 = evohomeasync2.EvohomeClient(
config[DOMAIN][CONF_USERNAME],
config[DOMAIN][CONF_PASSWORD],
**tokens,
session=async_get_clientsession(hass),
)
try:
await client_v2.login()
except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err:
_handle_exception(err)
return False
finally:
config[DOMAIN][CONF_PASSWORD] = "REDACTED"
loc_idx = config[DOMAIN][CONF_LOCATION_IDX]
try:
loc_config = client_v2.installation_info[loc_idx]
except IndexError:
_LOGGER.error(
"Config error: '%s' = %s, but the valid range is 0-%s. "
"Unable to continue. Fix any configuration errors and restart HA",
CONF_LOCATION_IDX,
loc_idx,
len(client_v2.installation_info) - 1,
)
return False
if _LOGGER.isEnabledFor(logging.DEBUG):
_config = {"locationInfo": {"timeZone": None}, GWS: [{TCS: None}]}
_config["locationInfo"]["timeZone"] = loc_config["locationInfo"]["timeZone"]
_config[GWS][0][TCS] = loc_config[GWS][0][TCS]
_LOGGER.debug("Config = %s", _config)
client_v1 = evohomeasync.EvohomeClient(
client_v2.username,
client_v2.password,
user_data=user_data,
session=async_get_clientsession(hass),
)
hass.data[DOMAIN] = {}
hass.data[DOMAIN]["broker"] = broker = EvoBroker(
hass, client_v2, client_v1, store, config[DOMAIN]
)
await broker.save_auth_tokens()
await broker.async_update() # get initial state
hass.async_create_task(async_load_platform(hass, "climate", DOMAIN, {}, config))
if broker.tcs.hotwater:
hass.async_create_task(
async_load_platform(hass, "water_heater", DOMAIN, {}, config)
)
hass.helpers.event.async_track_time_interval(
broker.async_update, config[DOMAIN][CONF_SCAN_INTERVAL]
)
setup_service_functions(hass, broker)
return True
@callback
def setup_service_functions(hass: HomeAssistantType, broker):
"""Set up the service handlers for the system/zone operating modes.
Not all Honeywell TCC-compatible systems support all operating modes. In addition,
each mode will require any of four distinct service schemas. This has to be
enumerated before registering the appropriate handlers.
It appears that all TCC-compatible systems support the same three zones modes.
"""
@verify_domain_control(hass, DOMAIN)
async def force_refresh(call) -> None:
"""Obtain the latest state data via the vendor's RESTful API."""
await broker.async_update()
@verify_domain_control(hass, DOMAIN)
async def set_system_mode(call) -> None:
"""Set the system mode."""
payload = {
"unique_id": broker.tcs.systemId,
"service": call.service,
"data": call.data,
}
async_dispatcher_send(hass, DOMAIN, payload)
@verify_domain_control(hass, DOMAIN)
async def set_zone_override(call) -> None:
"""Set the zone override (setpoint)."""
entity_id = call.data[ATTR_ENTITY_ID]
registry = await hass.helpers.entity_registry.async_get_registry()
registry_entry = registry.async_get(entity_id)
if registry_entry is None or registry_entry.platform != DOMAIN:
raise ValueError(f"'{entity_id}' is not a known {DOMAIN} entity")
if registry_entry.domain != "climate":
raise ValueError(f"'{entity_id}' is not an {DOMAIN} controller/zone")
payload = {
"unique_id": registry_entry.unique_id,
"service": call.service,
"data": call.data,
}
async_dispatcher_send(hass, DOMAIN, payload)
hass.services.async_register(DOMAIN, SVC_REFRESH_SYSTEM, force_refresh)
# Enumerate which operating modes are supported by this system
modes = broker.config["allowedSystemModes"]
# Not all systems support "AutoWithReset": register this handler only if required
if [m["systemMode"] for m in modes if m["systemMode"] == "AutoWithReset"]:
hass.services.async_register(DOMAIN, SVC_RESET_SYSTEM, set_system_mode)
system_mode_schemas = []
modes = [m for m in modes if m["systemMode"] != "AutoWithReset"]
# Permanent-only modes will use this schema
perm_modes = [m["systemMode"] for m in modes if not m["canBeTemporary"]]
if perm_modes: # any of: "Auto", "HeatingOff": permanent only
schema = vol.Schema({vol.Required(ATTR_SYSTEM_MODE): vol.In(perm_modes)})
system_mode_schemas.append(schema)
modes = [m for m in modes if m["canBeTemporary"]]
# These modes are set for a number of hours (or indefinitely): use this schema
temp_modes = [m["systemMode"] for m in modes if m["timingMode"] == "Duration"]
if temp_modes: # any of: "AutoWithEco", permanent or for 0-24 hours
schema = vol.Schema(
{
vol.Required(ATTR_SYSTEM_MODE): vol.In(temp_modes),
vol.Optional(ATTR_DURATION_HOURS): vol.All(
cv.time_period,
vol.Range(min=timedelta(hours=0), max=timedelta(hours=24)),
),
}
)
system_mode_schemas.append(schema)
# These modes are set for a number of days (or indefinitely): use this schema
temp_modes = [m["systemMode"] for m in modes if m["timingMode"] == "Period"]
if temp_modes: # any of: "Away", "Custom", "DayOff", permanent or for 1-99 days
schema = vol.Schema(
{
vol.Required(ATTR_SYSTEM_MODE): vol.In(temp_modes),
vol.Optional(ATTR_DURATION_DAYS): vol.All(
cv.time_period,
vol.Range(min=timedelta(days=1), max=timedelta(days=99)),
),
}
)
system_mode_schemas.append(schema)
if system_mode_schemas:
hass.services.async_register(
DOMAIN,
SVC_SET_SYSTEM_MODE,
set_system_mode,
schema=vol.Any(*system_mode_schemas),
)
# The zone modes are consistent across all systems and use the same schema
hass.services.async_register(
DOMAIN,
SVC_RESET_ZONE_OVERRIDE,
set_zone_override,
schema=RESET_ZONE_OVERRIDE_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SVC_SET_ZONE_OVERRIDE,
set_zone_override,
schema=SET_ZONE_OVERRIDE_SCHEMA,
)
class EvoBroker:
"""Container for evohome client and data."""
def __init__(self, hass, client, client_v1, store, params) -> None:
"""Initialize the evohome client and its data structure."""
self.hass = hass
self.client = client
self.client_v1 = client_v1
self._store = store
self.params = params
loc_idx = params[CONF_LOCATION_IDX]
self.config = client.installation_info[loc_idx][GWS][0][TCS][0]
self.tcs = client.locations[loc_idx]._gateways[0]._control_systems[0]
self.tcs_utc_offset = timedelta(
minutes=client.locations[loc_idx].timeZone[UTC_OFFSET]
)
self.temps = {}
async def save_auth_tokens(self) -> None:
"""Save access tokens and session IDs to the store for later use."""
# evohomeasync2 uses naive/local datetimes
access_token_expires = _dt_local_to_aware(self.client.access_token_expires)
app_storage = {CONF_USERNAME: self.client.username}
app_storage[REFRESH_TOKEN] = self.client.refresh_token
app_storage[ACCESS_TOKEN] = self.client.access_token
app_storage[ACCESS_TOKEN_EXPIRES] = access_token_expires.isoformat()
if self.client_v1 and self.client_v1.user_data:
app_storage[USER_DATA] = {
"userInfo": {"userID": self.client_v1.user_data["userInfo"]["userID"]},
"sessionId": self.client_v1.user_data["sessionId"],
}
else:
app_storage[USER_DATA] = None
await self._store.async_save(app_storage)
async def call_client_api(self, api_function, refresh=True) -> Any:
"""Call a client API."""
try:
result = await api_function
except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err:
if not _handle_exception(err):
return
if refresh:
self.hass.helpers.event.async_call_later(1, self.async_update())
return result
async def _update_v1(self, *args, **kwargs) -> None:
"""Get the latest high-precision temperatures of the default Location."""
def get_session_id(client_v1) -> Optional[str]:
user_data = client_v1.user_data if client_v1 else None
return user_data.get("sessionId") if user_data else None
session_id = get_session_id(self.client_v1)
try:
temps = list(await self.client_v1.temperatures(force_refresh=True))
except aiohttp.ClientError as err:
_LOGGER.warning(
"Unable to obtain the latest high-precision temperatures. "
"Check your network and the vendor's service status page. "
"Proceeding with low-precision temperatures. "
"Message is: %s",
err,
)
self.temps = None # these are now stale, will fall back to v2 temps
else:
if (
str(self.client_v1.location_id)
!= self.client.locations[self.params[CONF_LOCATION_IDX]].locationId
):
_LOGGER.warning(
"The v2 API's configured location doesn't match "
"the v1 API's default location (there is more than one location), "
"so the high-precision feature will be disabled"
)
self.client_v1 = self.temps = None
else:
self.temps = {str(i["id"]): i["temp"] for i in temps}
_LOGGER.debug("Temperatures = %s", self.temps)
if session_id != get_session_id(self.client_v1):
await self.save_auth_tokens()
async def _update_v2(self, *args, **kwargs) -> None:
"""Get the latest modes, temperatures, setpoints of a Location."""
access_token = self.client.access_token
loc_idx = self.params[CONF_LOCATION_IDX]
try:
status = await self.client.locations[loc_idx].status()
except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err:
_handle_exception(err)
else:
async_dispatcher_send(self.hass, DOMAIN)
_LOGGER.debug("Status = %s", status)
if access_token != self.client.access_token:
await self.save_auth_tokens()
async def async_update(self, *args, **kwargs) -> None:
"""Get the latest state data of an entire Honeywell TCC Location.
This includes state data for a Controller and all its child devices, such as the
operating mode of the Controller and the current temp of its children (e.g.
Zones, DHW controller).
"""
await self._update_v2()
if self.client_v1:
await self._update_v1()
# inform the evohome devices that state data has been updated
async_dispatcher_send(self.hass, DOMAIN)
class EvoDevice(Entity):
"""Base for any evohome device.
This includes the Controller, (up to 12) Heating Zones and (optionally) a
DHW controller.
"""
def __init__(self, evo_broker, evo_device) -> None:
"""Initialize the evohome entity."""
self._evo_device = evo_device
self._evo_broker = evo_broker
self._evo_tcs = evo_broker.tcs
self._unique_id = self._name = self._icon = self._precision = None
self._supported_features = None
self._device_state_attrs = {}
async def async_refresh(self, payload: Optional[dict] = None) -> None:
"""Process any signals."""
if payload is None:
self.async_schedule_update_ha_state(force_refresh=True)
return
if payload["unique_id"] != self._unique_id:
return
if payload["service"] in [SVC_SET_ZONE_OVERRIDE, SVC_RESET_ZONE_OVERRIDE]:
await self.async_zone_svc_request(payload["service"], payload["data"])
return
await self.async_tcs_svc_request(payload["service"], payload["data"])
async def async_tcs_svc_request(self, service: dict, data: dict) -> None:
"""Process a service request (system mode) for a controller."""
raise NotImplementedError
async def async_zone_svc_request(self, service: dict, data: dict) -> None:
"""Process a service request (setpoint override) for a zone."""
raise NotImplementedError
@property
def should_poll(self) -> bool:
"""Evohome entities should not be polled."""
return False
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name of the evohome entity."""
return self._name
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the evohome-specific state attributes."""
status = self._device_state_attrs
if "systemModeStatus" in status:
convert_until(status["systemModeStatus"], "timeUntil")
if "setpointStatus" in status:
convert_until(status["setpointStatus"], "until")
if "stateStatus" in status:
convert_until(status["stateStatus"], "until")
return {"status": convert_dict(status)}
@property
def icon(self) -> str:
"""Return the icon to use in the frontend UI."""
return self._icon
@property
def supported_features(self) -> int:
"""Get the flag of supported features of the device."""
return self._supported_features
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass."""
async_dispatcher_connect(self.hass, DOMAIN, self.async_refresh)
@property
def precision(self) -> float:
"""Return the temperature precision to use in the frontend UI."""
return self._precision
@property
def temperature_unit(self) -> str:
"""Return the temperature unit to use in the frontend UI."""
return TEMP_CELSIUS
class EvoChild(EvoDevice):
"""Base for any evohome child.
This includes (up to 12) Heating Zones and (optionally) a DHW controller.
"""
def __init__(self, evo_broker, evo_device) -> None:
"""Initialize a evohome Controller (hub)."""
super().__init__(evo_broker, evo_device)
self._schedule = {}
self._setpoints = {}
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature of a Zone."""
if self._evo_broker.temps:
if self._evo_broker.temps[self._evo_device.zoneId] != 128:
return self._evo_broker.temps[self._evo_device.zoneId]
if self._evo_device.temperatureStatus["isAvailable"]:
return self._evo_device.temperatureStatus["temperature"]
@property
def setpoints(self) -> Dict[str, Any]:
"""Return the current/next setpoints from the schedule.
Only Zones & DHW controllers (but not the TCS) can have schedules.
"""
def _dt_evo_to_aware(dt_naive: dt, utc_offset: timedelta) -> dt:
dt_aware = dt_naive.replace(tzinfo=dt_util.UTC) - utc_offset
return dt_util.as_local(dt_aware)
if not self._schedule["DailySchedules"]:
return {} # no schedule {'DailySchedules': []}, so no scheduled setpoints
day_time = dt_util.now()
day_of_week = int(day_time.strftime("%w")) # 0 is Sunday
time_of_day = day_time.strftime("%H:%M:%S")
try:
# Iterate today's switchpoints until past the current time of day...
day = self._schedule["DailySchedules"][day_of_week]
sp_idx = -1 # last switchpoint of the day before
for i, tmp in enumerate(day["Switchpoints"]):
if time_of_day > tmp["TimeOfDay"]:
sp_idx = i # current setpoint
else:
break
# Did the current SP start yesterday? Does the next start SP tomorrow?
this_sp_day = -1 if sp_idx == -1 else 0
next_sp_day = 1 if sp_idx + 1 == len(day["Switchpoints"]) else 0
for key, offset, idx in [
("this", this_sp_day, sp_idx),
("next", next_sp_day, (sp_idx + 1) * (1 - next_sp_day)),
]:
sp_date = (day_time + timedelta(days=offset)).strftime("%Y-%m-%d")
day = self._schedule["DailySchedules"][(day_of_week + offset) % 7]
switchpoint = day["Switchpoints"][idx]
dt_aware = _dt_evo_to_aware(
dt_util.parse_datetime(f"{sp_date}T{switchpoint['TimeOfDay']}"),
self._evo_broker.tcs_utc_offset,
)
self._setpoints[f"{key}_sp_from"] = dt_aware.isoformat()
try:
self._setpoints[f"{key}_sp_temp"] = switchpoint["heatSetpoint"]
except KeyError:
self._setpoints[f"{key}_sp_state"] = switchpoint["DhwState"]
except IndexError:
self._setpoints = {}
_LOGGER.warning(
"Failed to get setpoints, report as an issue if this error persists",
exc_info=True,
)
return self._setpoints
async def _update_schedule(self) -> None:
"""Get the latest schedule, if any."""
if "DailySchedules" in self._schedule and not self._schedule["DailySchedules"]:
if not self._evo_device.setpointStatus["setpointMode"] == EVO_FOLLOW:
return # avoid unnecessary I/O - there's nothing to update
self._schedule = await self._evo_broker.call_client_api(
self._evo_device.schedule(), refresh=False
)
_LOGGER.debug("Schedule['%s'] = %s", self.name, self._schedule)
async def async_update(self) -> None:
"""Get the latest state data."""
next_sp_from = self._setpoints.get("next_sp_from", "2000-01-01T00:00:00+00:00")
if dt_util.now() >= dt_util.parse_datetime(next_sp_from):
await self._update_schedule() # no schedule, or it's out-of-date
self._device_state_attrs = {"setpoints": self.setpoints}
|
MichaelNedzelsky/intellij-community | refs/heads/master | python/testData/mover/multiLineSelection_afterUp.py | 166 | class Test(object):
def q(self):
c = 3
<selection> a = 1
b = 2
<caret></selection> |
fharenheit/template-spark-app | refs/heads/master | src/main/python/streaming/kafka_wordcount.py | 83 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Counts words in UTF8 encoded, '\n' delimited text received from the network every second.
Usage: kafka_wordcount.py <zk> <topic>
To run this on your local machine, you need to setup Kafka and create a producer first, see
http://kafka.apache.org/documentation.html#quickstart
and then run the example
`$ bin/spark-submit --jars \
external/kafka-assembly/target/scala-*/spark-streaming-kafka-assembly-*.jar \
examples/src/main/python/streaming/kafka_wordcount.py \
localhost:2181 test`
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: kafka_wordcount.py <zk> <topic>", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="PythonStreamingKafkaWordCount")
ssc = StreamingContext(sc, 1)
zkQuorum, topic = sys.argv[1:]
kvs = KafkaUtils.createStream(ssc, zkQuorum, "spark-streaming-consumer", {topic: 1})
lines = kvs.map(lambda x: x[1])
counts = lines.flatMap(lambda line: line.split(" ")) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a+b)
counts.pprint()
ssc.start()
ssc.awaitTermination()
|
ZEROFAIL/goblin | refs/heads/master | goblin/provider.py | 2 | class Provider:
"""Superclass for provider plugins"""
DEFAULT_OP_ARGS = {}
@classmethod
def get_default_op_args(cls, processor):
return cls.DEFAULT_OP_ARGS.get(processor, dict())
class TinkerGraph(Provider): # TODO
"""Default provider"""
@staticmethod
def get_hashable_id(val):
return val
|
j00bar/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/ec2_asg_facts.py | 44 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_asg_facts
short_description: Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
description:
- Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
version_added: "2.2"
author: "Rob White (@wimnat)"
options:
name:
description:
- The prefix or name of the auto scaling group(s) you are searching for.
- "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match."
required: false
tags:
description:
- >
A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling
group(s) you are searching for.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Find all groups
- ec2_asg_facts:
register: asgs
# Find a group with matching name/prefix
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
# Find a group with matching tags
- ec2_asg_facts:
tags:
project: webapp
env: production
register: asgs
# Find a group with matching name/prefix and tags
- ec2_asg_facts:
name: myproject
tags:
env: production
register: asgs
# Fail if no groups are found
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
failed_when: "{{ asgs.results | length == 0 }}"
# Fail if more than 1 group is found
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
failed_when: "{{ asgs.results | length > 1 }}"
'''
RETURN = '''
---
auto_scaling_group_arn:
description: The Amazon Resource Name of the ASG
returned: success
type: string
sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1"
auto_scaling_group_name:
description: Name of autoscaling group
returned: success
type: str
sample: "public-webapp-production-1"
availability_zones:
description: List of Availability Zones that are enabled for this ASG.
returned: success
type: list
sample: ["us-west-2a", "us-west-2b", "us-west-2a"]
created_time:
description: The date and time this ASG was created, in ISO 8601 format.
returned: success
type: string
sample: "2015-11-25T00:05:36.309Z"
default_cooldown:
description: The default cooldown time in seconds.
returned: success
type: int
sample: 300
desired_capacity:
description: The number of EC2 instances that should be running in this group.
returned: success
type: int
sample: 3
health_check_period:
description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
returned: success
type: int
sample: 30
health_check_type:
description: The service you want the health status from, one of "EC2" or "ELB".
returned: success
type: str
sample: "ELB"
instances:
description: List of EC2 instances and their status as it relates to the ASG.
returned: success
type: list
sample: [
{
"availability_zone": "us-west-2a",
"health_status": "Healthy",
"instance_id": "i-es22ad25",
"launch_configuration_name": "public-webapp-production-1",
"lifecycle_state": "InService",
"protected_from_scale_in": "false"
}
]
launch_configuration_name:
description: Name of launch configuration associated with the ASG.
returned: success
type: str
sample: "public-webapp-production-1"
load_balancer_names:
description: List of load balancers names attached to the ASG.
returned: success
type: list
sample: ["elb-webapp-prod"]
max_size:
description: Maximum size of group
returned: success
type: int
sample: 3
min_size:
description: Minimum size of group
returned: success
type: int
sample: 1
new_instances_protected_from_scale_in:
description: Whether or not new instances a protected from automatic scaling in.
returned: success
type: boolean
sample: "false"
placement_group:
description: Placement group into which instances are launched, if any.
returned: success
type: str
sample: None
status:
description: The current state of the group when DeleteAutoScalingGroup is in progress.
returned: success
type: str
sample: None
tags:
description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
returned: success
type: list
sample: [
{
"key": "Name",
"value": "public-webapp-production-1",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
},
{
"key": "env",
"value": "production",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
}
]
termination_policies:
description: A list of termination policies for the group.
returned: success
type: str
sample: ["Default"]
'''
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def match_asg_tags(tags_to_match, asg):
for key, value in tags_to_match.items():
for tag in asg['Tags']:
if key == tag['Key'] and value == tag['Value']:
break
else:
return False
return True
def find_asgs(conn, module, name=None, tags=None):
"""
Args:
conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
name (str): Optional name of the ASG you are looking for.
tags (dict): Optional dictionary of tags and values to search for.
Basic Usage:
>>> name = 'public-webapp-production'
>>> tags = { 'env': 'production' }
>>> conn = boto3.client('autoscaling', region_name='us-west-2')
>>> results = find_asgs(name, conn)
Returns:
List
[
{
"auto_scaling_group_arn": (
"arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:"
"autoScalingGroupName/public-webapp-production"
),
"auto_scaling_group_name": "public-webapp-production",
"availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
"created_time": "2016-02-02T23:28:42.481000+00:00",
"default_cooldown": 300,
"desired_capacity": 2,
"enabled_metrics": [],
"health_check_grace_period": 300,
"health_check_type": "ELB",
"instances":
[
{
"availability_zone": "us-west-2c",
"health_status": "Healthy",
"instance_id": "i-047a12cb",
"launch_configuration_name": "public-webapp-production-1",
"lifecycle_state": "InService",
"protected_from_scale_in": false
},
{
"availability_zone": "us-west-2a",
"health_status": "Healthy",
"instance_id": "i-7a29df2c",
"launch_configuration_name": "public-webapp-production-1",
"lifecycle_state": "InService",
"protected_from_scale_in": false
}
],
"launch_configuration_name": "public-webapp-production-1",
"load_balancer_names": ["public-webapp-production-lb"],
"max_size": 4,
"min_size": 2,
"new_instances_protected_from_scale_in": false,
"placement_group": None,
"status": None,
"suspended_processes": [],
"tags":
[
{
"key": "Name",
"propagate_at_launch": true,
"resource_id": "public-webapp-production",
"resource_type": "auto-scaling-group",
"value": "public-webapp-production"
},
{
"key": "env",
"propagate_at_launch": true,
"resource_id": "public-webapp-production",
"resource_type": "auto-scaling-group",
"value": "production"
}
],
"termination_policies":
[
"Default"
],
"vpc_zone_identifier":
[
"subnet-a1b1c1d1",
"subnet-a2b2c2d2",
"subnet-a3b3c3d3"
]
}
]
"""
try:
asgs_paginator = conn.get_paginator('describe_auto_scaling_groups')
asgs = asgs_paginator.paginate().build_full_result()
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
matched_asgs = []
if name is not None:
# if the user didn't specify a name
name_prog = re.compile(r'^' + name)
for asg in asgs['AutoScalingGroups']:
if name:
matched_name = name_prog.search(asg['AutoScalingGroupName'])
else:
matched_name = True
if tags:
matched_tags = match_asg_tags(tags, asg)
else:
matched_tags = True
if matched_name and matched_tags:
matched_asgs.append(camel_dict_to_snake_dict(asg))
return matched_asgs
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str'),
tags=dict(type='dict'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
asg_name = module.params.get('name')
asg_tags = module.params.get('tags')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
autoscaling = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags)
module.exit_json(results=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
PankeshGupta/pynotes | refs/heads/master | use_of_super.py | 1 | #author : pankesh gupta
#email : post4pankesh@gmail.com
# this is an example of overiding and concept of super in python
class Contact():
all_contacts= list()
def __init__(self,name,email):
self.name = name
self.email = email
Contact.all_contacts.append(self)
def try0(self):
print("hi im inside the contacts class")
def try1(self):
print("hi this method is for testing the tr1 in contacts")
print("this method is an instace method")
def try2():
print("this is tr2 this method is inside the tr2 method in contacts class")
class Friend(Contact):
def __init__(self,name,email,ph):
super().__init__(name,email)
self.ph = ph
def try0(self):
super.try0()
print("hi i m inside the class friends")
if __name__=="main":
main()
#=========== console output ======== #
#>>> c = Contact('c','c@gmail.com')
#>>> f = Friend('f','f@gmail.com',1234)
#>>> f.try0()
#hi i m inside the class friends
#>>> f.tr1()
#hi this method is for testing the tr1 in contacts
#this method is an instace method
#>>> f.tr1
#<bound method Contact.tr1 of <__main__.Friend object at 0x028ED4F0>>
#>>> c.tr1
#<bound method Contact.tr1 of <__main__.Contact object at 0x028D2BD0>>
#>>>
|
ZuluPro/namebench | refs/heads/master | nb_third_party/jinja2/tests.py | 285 | # -*- coding: utf-8 -*-
"""
jinja2.tests
~~~~~~~~~~~~
Jinja test functions. Used with the "is" operator.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from jinja2.runtime import Undefined
# nose, nothing here to test
__test__ = False
number_re = re.compile(r'^-?\d+(\.\d+)?$')
regex_type = type(number_re)
try:
test_callable = callable
except NameError:
def test_callable(x):
return hasattr(x, '__call__')
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value, num):
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value):
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value):
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
def test_none(value):
"""Return true if the variable is none."""
return value is None
def test_lower(value):
"""Return true if the variable is lowercased."""
return unicode(value).islower()
def test_upper(value):
"""Return true if the variable is uppercased."""
return unicode(value).isupper()
def test_string(value):
"""Return true if the object is a string."""
return isinstance(value, basestring)
def test_number(value):
"""Return true if the variable is a number."""
return isinstance(value, (int, long, float, complex))
def test_sequence(value):
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except:
return False
return True
def test_sameas(value, other):
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value):
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value):
"""Check if the value is escaped."""
return hasattr(value, '__html__')
TESTS = {
'odd': test_odd,
'even': test_even,
'divisibleby': test_divisibleby,
'defined': test_defined,
'undefined': test_undefined,
'none': test_none,
'lower': test_lower,
'upper': test_upper,
'string': test_string,
'number': test_number,
'sequence': test_sequence,
'iterable': test_iterable,
'callable': test_callable,
'sameas': test_sameas,
'escaped': test_escaped
}
|
ykaneko/quantum | refs/heads/master | quantum/plugins/services/agent_loadbalancer/plugin.py | 2 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
from quantum.common import exceptions as q_exc
from quantum.common import rpc as q_rpc
from quantum.common import topics
from quantum.db import api as qdbapi
from quantum.db.loadbalancer import loadbalancer_db
from quantum.openstack.common import log as logging
from quantum.openstack.common import rpc
from quantum.openstack.common.rpc import proxy
from quantum.plugins.common import constants
LOG = logging.getLogger(__name__)
ACTIVE_PENDING = (
constants.ACTIVE,
constants.PENDING_CREATE,
constants.PENDING_UPDATE
)
class LoadBalancerCallbacks(object):
RPC_API_VERSION = '1.0'
def __init__(self, plugin):
self.plugin = plugin
def create_rpc_dispatcher(self):
return q_rpc.PluginRpcDispatcher([self])
def get_ready_devices(self, context, host=None):
with context.session.begin(subtransactions=True):
qry = (context.session.query(loadbalancer_db.Pool.id).
join(loadbalancer_db.Vip))
qry = qry.filter(loadbalancer_db.Vip.status.in_(ACTIVE_PENDING))
qry = qry.filter(loadbalancer_db.Pool.status.in_(ACTIVE_PENDING))
up = True # makes pep8 and sqlalchemy happy
qry = qry.filter(loadbalancer_db.Vip.admin_state_up == up)
qry = qry.filter(loadbalancer_db.Pool.admin_state_up == up)
return [id for id, in qry]
def get_logical_device(self, context, pool_id=None, activate=True,
**kwargs):
with context.session.begin(subtransactions=True):
qry = context.session.query(loadbalancer_db.Pool)
qry = qry.filter_by(id=pool_id)
pool = qry.one()
if activate:
# set all resources to active
if pool.status in ACTIVE_PENDING:
pool.status = constants.ACTIVE
if pool.vip.status in ACTIVE_PENDING:
pool.vip.status = constants.ACTIVE
for m in pool.members:
if m.status in ACTIVE_PENDING:
m.status = constants.ACTIVE
for hm in pool.monitors:
if hm.healthmonitor.status in ACTIVE_PENDING:
hm.healthmonitor.status = constants.ACTIVE
if (pool.status != constants.ACTIVE
or pool.vip.status != constants.ACTIVE):
raise Exception(_('Expected active pool and vip'))
retval = {}
retval['pool'] = self.plugin._make_pool_dict(pool)
retval['vip'] = self.plugin._make_vip_dict(pool.vip)
retval['vip']['port'] = (
self.plugin._core_plugin._make_port_dict(pool.vip.port)
)
for fixed_ip in retval['vip']['port']['fixed_ips']:
fixed_ip['subnet'] = (
self.plugin._core_plugin.get_subnet(
context,
fixed_ip['subnet_id']
)
)
retval['members'] = [
self.plugin._make_member_dict(m)
for m in pool.members if m.status == constants.ACTIVE
]
retval['healthmonitors'] = [
self.plugin._make_health_monitor_dict(hm.healthmonitor)
for hm in pool.monitors
if hm.healthmonitor.status == constants.ACTIVE
]
return retval
def pool_destroyed(self, context, pool_id=None, host=None):
"""Agent confirmation hook that a pool has been destroyed.
This method exists for subclasses to change the deletion
behavior.
"""
pass
def plug_vip_port(self, context, port_id=None, host=None):
if not port_id:
return
try:
port = self.plugin._core_plugin.get_port(
context,
port_id
)
except q_exc.PortNotFound:
msg = _('Unable to find port %s to plug.')
LOG.debug(msg, port_id)
return
port['admin_state_up'] = True
port['device_owner'] = 'quantum:' + constants.LOADBALANCER
port['device_id'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, str(host)))
self.plugin._core_plugin.update_port(
context,
port_id,
{'port': port}
)
def unplug_vip_port(self, context, port_id=None, host=None):
if not port_id:
return
try:
port = self.plugin._core_plugin.get_port(
context,
port_id
)
except q_exc.PortNotFound:
msg = _('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.')
LOG.debug(msg, port_id)
return
port['admin_state_up'] = False
port['device_owner'] = ''
port['device_id'] = ''
try:
self.plugin._core_plugin.update_port(
context,
port_id,
{'port': port}
)
except q_exc.PortNotFound:
msg = _('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.')
LOG.debug(msg, port_id)
def update_pool_stats(self, context, pool_id=None, stats=None, host=None):
# TODO(markmcclain): add stats collection
pass
class LoadBalancerAgentApi(proxy.RpcProxy):
"""Plugin side of plugin to agent RPC API."""
API_VERSION = '1.0'
def __init__(self, topic, host):
super(LoadBalancerAgentApi, self).__init__(topic, self.API_VERSION)
self.host = host
def reload_pool(self, context, pool_id):
return self.cast(
context,
self.make_msg('reload_pool', pool_id=pool_id, host=self.host),
topic=self.topic
)
def destroy_pool(self, context, pool_id):
return self.cast(
context,
self.make_msg('destroy_pool', pool_id=pool_id, host=self.host),
topic=self.topic
)
def modify_pool(self, context, pool_id):
return self.cast(
context,
self.make_msg('modify_pool', pool_id=pool_id, host=self.host),
topic=self.topic
)
class LoadBalancerPlugin(loadbalancer_db.LoadBalancerPluginDb):
"""Implementation of the Quantum Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_db.LoadBalancerPluginDb.
"""
supported_extension_aliases = ["lbaas"]
def __init__(self):
"""Do the initialization for the loadbalancer service plugin here."""
qdbapi.register_models()
self.callbacks = LoadBalancerCallbacks(self)
self.conn = rpc.create_connection(new=True)
self.conn.create_consumer(
topics.LOADBALANCER_PLUGIN,
self.callbacks.create_rpc_dispatcher(),
fanout=False)
self.conn.consume_in_thread()
self.agent_rpc = LoadBalancerAgentApi(
topics.LOADBALANCER_AGENT,
cfg.CONF.host
)
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return "Quantum LoadBalancer Service Plugin"
def create_vip(self, context, vip):
vip['vip']['status'] = constants.PENDING_CREATE
v = super(LoadBalancerPlugin, self).create_vip(context, vip)
self.agent_rpc.reload_pool(context, v['pool_id'])
return v
def update_vip(self, context, id, vip):
if 'status' not in vip['vip']:
vip['vip']['status'] = constants.PENDING_UPDATE
v = super(LoadBalancerPlugin, self).update_vip(context, id, vip)
if v['status'] in ACTIVE_PENDING:
self.agent_rpc.reload_pool(context, v['pool_id'])
else:
self.agent_rpc.destroy_pool(context, v['pool_id'])
return v
def delete_vip(self, context, id):
vip = self.get_vip(context, id)
super(LoadBalancerPlugin, self).delete_vip(context, id)
self.agent_rpc.destroy_pool(context, vip['pool_id'])
def create_pool(self, context, pool):
p = super(LoadBalancerPlugin, self).create_pool(context, pool)
# don't notify here because a pool needs a vip to be useful
return p
def update_pool(self, context, id, pool):
if 'status' not in pool['pool']:
pool['pool']['status'] = constants.PENDING_UPDATE
p = super(LoadBalancerPlugin, self).update_pool(context, id, pool)
if p['status'] in ACTIVE_PENDING:
self.agent_rpc.reload_pool(context, p['id'])
else:
self.agent_rpc.destroy_pool(context, p['id'])
return p
def delete_pool(self, context, id):
super(LoadBalancerPlugin, self).delete_pool(context, id)
self.agent_rpc.destroy_pool(context, id)
def create_member(self, context, member):
m = super(LoadBalancerPlugin, self).create_member(context, member)
self.agent_rpc.modify_pool(context, m['pool_id'])
return m
def update_member(self, context, id, member):
if 'status' not in member['member']:
member['member']['status'] = constants.PENDING_UPDATE
m = super(LoadBalancerPlugin, self).update_member(context, id, member)
self.agent_rpc.modify_pool(context, m['pool_id'])
return m
def delete_member(self, context, id):
m = self.get_member(context, id)
super(LoadBalancerPlugin, self).delete_member(context, id)
self.agent_rpc.modify_pool(context, m['pool_id'])
def update_health_monitor(self, context, id, health_monitor):
if 'status' not in health_monitor['health_monitor']:
health_monitor['health_monitor']['status'] = (
constants.PENDING_UPDATE
)
hm = super(LoadBalancerPlugin, self).update_health_monitor(
context,
id,
health_monitor
)
with context.session.begin(subtransactions=True):
qry = context.session.query(
loadbalancer_db.PoolMonitorAssociation
)
qry = qry.filter_by(monitor_id=hm['id'])
for assoc in qry:
self.agent_rpc.modify_pool(context, assoc['pool_id'])
return hm
def delete_health_monitor(self, context, id):
with context.session.begin(subtransactions=True):
qry = context.session.query(
loadbalancer_db.PoolMonitorAssociation
)
qry = qry.filter_by(monitor_id=id)
pool_ids = [a['pool_id'] for a in qry]
super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
for pid in pool_ids:
self.agent_rpc.modify_pool(context, pid)
def create_pool_health_monitor(self, context, health_monitor, pool_id):
retval = super(LoadBalancerPlugin, self).create_pool_health_monitor(
context,
health_monitor,
pool_id
)
self.agent_rpc.modify_pool(context, pool_id)
return retval
def delete_pool_health_monitor(self, context, id, pool_id):
retval = super(LoadBalancerPlugin, self).delete_pool_health_monitor(
context,
id,
pool_id
)
self.agent_rpc.modify_pool(context, pool_id)
return retval
|
fabianvf/scrapi | refs/heads/develop | scrapi/harvesters/vtech.py | 9 | """
Harvests Virginia Tech VTechWorks metadata for ingestion into the SHARE service
Information about VTechWorks at https://github.com/CenterForOpenScience/SHARE/blob/master/providers/edu.vt.vtechworks.md
Example API call: http://vtechworks.lib.vt.edu/oai/request?verb=ListRecords&metadataPrefix=oai_dc&from=2014-09-29
"""
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class VTechHarvester(OAIHarvester):
short_name = 'vtech'
long_name = 'Virginia Tech VTechWorks'
url = 'https://vtechworks.lib.vt.edu'
base_url = 'http://vtechworks.lib.vt.edu/oai/request'
property_list = [
'type', 'source', 'format', 'date',
'identifier', 'setSpec', 'rights', 'relation'
]
|
kernelci/kernelci-backend | refs/heads/kernelci.org | app/utils/batch/tests/test_batch_common.py | 1 | # Copyright (C) Collabora Limited 2018
# Author: Guillaume Tucker <guillaume.tucker@collabora.com>
# Author: Ana Guerrero Lopez <ana.guerrero@collabora.com>
#
# Copyright (C) Linaro Limited 2014,2015
# Author: Milo Casagrande <milo.casagrande@linaro.org>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import unittest
from utils.batch.batch_op import (
BatchBuildOperation,
BatchCountOperation,
BatchDistinctOperation,
BatchJobOperation,
BatchOperation,
BatchTestCaseOperation,
BatchTestGroupOperation
)
from utils.batch.common import (
create_batch_operation,
get_batch_query_args
)
class TestBatch(unittest.TestCase):
def test_get_batch_query_args_empty(self):
query = ""
self.assertEqual({}, get_batch_query_args(query))
def test_get_batch_query_base_case(self):
query = "?foo=bar"
expected = {"foo": ["bar"]}
self.assertEqual(expected, get_batch_query_args(query))
def test_get_batch_query_base_case_wrong(self):
query = "?foo"
self.assertEqual({}, get_batch_query_args(query))
def test_get_batch_query_base_case_wrong_and_correct(self):
query = "?foo&bar=foo"
expected = {"bar": ["foo"]}
self.assertEqual(expected, get_batch_query_args(query))
def test_get_batch_query_simple_with_question(self):
query = "?foo=bar&bar=foo"
expected = {"bar": ["foo"], "foo": ["bar"]}
self.assertEqual(expected, get_batch_query_args(query))
def test_get_batch_query_simple_no_question(self):
query = "foo=bar&bar=foo"
expected = {"bar": ["foo"], "foo": ["bar"]}
self.assertEqual(expected, get_batch_query_args(query))
def test_get_batch_query_multiple_values(self):
query = "bar=foo&foo=bar&bar=foo&foo=baz&bar=foo"
expected = {"foo": ["baz", "bar"], "bar": ["foo"]}
self.assertEqual(expected, get_batch_query_args(query))
def test_create_batch_op_generic(self):
json_obj = {
"resource": "build",
"query": "status=FAIL&job=mainline",
"operation_id": "foo"
}
op = create_batch_operation(json_obj, {})
self.assertIsInstance(op, BatchOperation)
def test_create_batch_op_count(self):
json_obj = {
"resource": "count",
"document": "build",
"query": "status=FAIL&job=mainline",
"operation_id": "op_id"
}
op = create_batch_operation(json_obj, {})
self.assertIsInstance(op, BatchCountOperation)
self.assertEqual("build", op.document)
self.assertEqual("count", op.resource)
self.assertEqual("op_id", op.operation_id)
def test_create_batch_op_none(self):
op = create_batch_operation(None, None)
self.assertIsNone(op)
def test_create_batch_op_no_collection(self):
json_obj = {
"resource": "foo",
"query": "status=FAIL&job=mainline"
}
op = create_batch_operation(json_obj, {})
self.assertIsNone(op)
def test_create_batch_job_op(self):
json_obj = {
"resource": "job",
"query": "status=PASS&job=foo",
"operation_id": "foo"
}
op = create_batch_operation(json_obj, {})
self.assertIsInstance(op, BatchJobOperation)
self.assertEqual("job", op.resource)
def test_create_batch_build_op(self):
json_obj = {
"resource": "build",
"query": "status=PASS&job=foo",
"operation_id": "foo"
}
op = create_batch_operation(json_obj, {})
self.assertIsInstance(op, BatchBuildOperation)
self.assertEqual("build", op.resource)
def test_create_batch_test_case_op(self):
json_obj = {
"resource": "test_case",
"query": "status=PASS&job=foo",
"operation_id": "foo"
}
op = create_batch_operation(json_obj, {})
self.assertIsInstance(op, BatchTestCaseOperation)
self.assertEqual("test_case", op.resource)
def test_create_batch_test_group_op(self):
json_obj = {
"resource": "test_group",
"query": "status=PASS&job=foo",
"operation_id": "foo"
}
op = create_batch_operation(json_obj, {})
self.assertIsInstance(op, BatchTestGroupOperation)
self.assertEqual("test_group", op.resource)
def test_create_batch_distinct(self):
json_obj = {
"resource": "build",
"distinct": "arch",
"operation_id": "distinct-arch"
}
op = create_batch_operation(json_obj, {})
self.assertIsInstance(op, BatchDistinctOperation)
self.assertEqual("arch", op.distinct)
|
alfredoavanzosc/odoomrp-wip-1 | refs/heads/8.0 | quality_control/models/qc_test.py | 15 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api, exceptions, _
class QcTest(models.Model):
"""A test is a group of questions to with the values that make them valid.
"""
_name = 'qc.test'
_description = 'Quality control test'
@api.multi
def _links_get(self):
link_obj = self.env['res.request.link']
return [(r.object, r.name) for r in link_obj.search([])]
active = fields.Boolean('Active', default=True)
name = fields.Char(
string='Name', required=True, translate=True, select=True)
test_lines = fields.One2many(
comodel_name='qc.test.question', inverse_name='test',
string='Questions', copy=True)
object_id = fields.Reference(
string='Reference object', selection=_links_get,)
fill_correct_values = fields.Boolean(
string='Pre-fill with correct values')
type = fields.Selection(
[('generic', 'Generic'),
('related', 'Related')],
string='Type', select=True, required=True, default='generic')
category = fields.Many2one(
comodel_name='qc.test.category', string='Category')
company_id = fields.Many2one(
comodel_name='res.company', string='Company',
default=lambda self: self.env['res.company']._company_default_get(
'qc.test'))
class QcTestQuestion(models.Model):
"""Each test line is a question with its valid value(s)."""
_name = 'qc.test.question'
_description = 'Quality control question'
_order = 'sequence, id'
@api.one
@api.constrains('ql_values')
def _check_valid_answers(self):
if self.type == 'quantitative':
return
for value in self.ql_values:
if value.ok:
return
raise exceptions.Warning(
_("There isn't any value with OK marked. You have to mark at "
"least one."))
@api.one
@api.constrains('min_value', 'max_value')
def _check_valid_range(self):
if self.type == 'qualitative':
return
if self.min_value > self.max_value:
raise exceptions.Warning(
_("Minimum value can't be higher than maximum value."))
sequence = fields.Integer(
string='Sequence', required=True, default="10")
test = fields.Many2one(comodel_name='qc.test', string='Test')
name = fields.Char(
string='Name', required=True, select=True, translate=True)
type = fields.Selection(
[('qualitative', 'Qualitative'),
('quantitative', 'Quantitative')], string='Type', required=True)
ql_values = fields.One2many(
comodel_name='qc.test.question.value', inverse_name="test_line",
string='Qualitative values', copy=True)
notes = fields.Text(string='Notes')
min_value = fields.Float(string='Min', digits=(16, 5))
max_value = fields.Float(string='Max', digits=(15, 5))
uom_id = fields.Many2one(comodel_name='product.uom', string='Uom')
class QcTestQuestionValue(models.Model):
_name = 'qc.test.question.value'
_description = 'Possible values of qualitative questions.'
test_line = fields.Many2one(
comodel_name="qc.test.question", string="Test question")
name = fields.Char(
string='Name', required=True, select=True, translate=True)
ok = fields.Boolean(
string='Correct answer?',
help="When this field is marked, the answer is considered correct.")
|
davehensley/fanduel-nba-optimizer | refs/heads/master | nba-optimizer.py | 1 | # NBA Optimizer
#
# by Dave Hensley
#
# Picks an ideal fantasy NBA team using a modified knapsack algorithm
#
# Usage: python nba-optimizer.py players.csv
import csv, sys
from ortools.linear_solver import pywraplp
salaryCap = 60000
def getPositionNumber(name):
return {
'Center': 0,
'Point Guard': 1,
'Power Forward' : 2,
'Shooting Guard': 3,
'Small Forward': 4
}[name]
def main(players, salaryCap):
solver = pywraplp.Solver('CoinsGridCLP', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
rangeC = range(len(players[0]))
rangePG = range(len(players[1]))
rangePF = range(len(players[2]))
rangeSG = range(len(players[3]))
rangeSF = range(len(players[4]))
takeC = [solver.IntVar(0, 1, 'takeC[%i]' % j) for j in rangeC]
takePG = [solver.IntVar(0, 1, 'takePG[%i]' % j) for j in rangePG]
takePF = [solver.IntVar(0, 1, 'takePF[%i]' % j) for j in rangePF]
takeSG = [solver.IntVar(0, 1, 'takeSG[%i]' % j) for j in rangeSG]
takeSF = [solver.IntVar(0, 1, 'takeSF[%i]' % j) for j in rangeSF]
teamsC = []
teamsPG = []
teamsPF = []
teamsSG = []
teamsSF = []
for teamNumber in range(0, 29):
teamsC.insert(teamNumber, solver.Sum([(players[0][i][3] == teamNumber + 1) * takeC[i] for i in rangeC]))
teamsPG.insert(teamNumber, solver.Sum([(players[1][i][3] == teamNumber + 1) * takePG[i] for i in rangePG]))
teamsPF.insert(teamNumber, solver.Sum([(players[2][i][3] == teamNumber + 1) * takePF[i] for i in rangePF]))
teamsSG.insert(teamNumber, solver.Sum([(players[3][i][3] == teamNumber + 1) * takeSG[i] for i in rangeSG]))
teamsSF.insert(teamNumber, solver.Sum([(players[4][i][3] == teamNumber + 1) * takeSF[i] for i in rangeSF]))
valueC = solver.Sum([players[0][i][1] * takeC[i] for i in rangeC])
valuePG = solver.Sum([players[1][i][1] * takePG[i] for i in rangePG])
valuePF = solver.Sum([players[2][i][1] * takePF[i] for i in rangePF])
valueSG = solver.Sum([players[3][i][1] * takeSG[i] for i in rangeSG])
valueSF = solver.Sum([players[4][i][1] * takeSF[i] for i in rangeSF])
salaryC = solver.Sum([players[0][i][2] * takeC[i] for i in rangeC])
salaryPG = solver.Sum([players[1][i][2] * takePG[i] for i in rangePG])
salaryPF = solver.Sum([players[2][i][2] * takePF[i] for i in rangePF])
salarySG = solver.Sum([players[3][i][2] * takeSG[i] for i in rangeSG])
salarySF = solver.Sum([players[4][i][2] * takeSF[i] for i in rangeSF])
solver.Add(salaryC + salaryPG + salaryPF + salarySG + salarySF <= salaryCap)
solver.Add(solver.Sum(takeC[i] for i in rangeC) == 1)
solver.Add(solver.Sum(takePG[i] for i in rangePG) == 2)
solver.Add(solver.Sum(takePF[i] for i in rangePF) == 2)
solver.Add(solver.Sum(takeSG[i] for i in rangeSG) == 2)
solver.Add(solver.Sum(takeSF[i] for i in rangeSF) == 2)
# Max 4 players per team
for i in range(0, 29):
solver.Add(teamsC[i] + teamsPG[i] + teamsPF[i] + teamsSG[i] + teamsSF[i] <= 4)
solver.Maximize(valueC + valuePG + valuePF + valueSG + valueSF)
solver.Solve()
assert solver.VerifySolution(1e-7, True)
print 'Solved in', solver.wall_time(), 'milliseconds!', "\n"
salary = 0
for i in rangeC:
if (takeC[i].SolutionValue()):
salary += players[0][i][2]
print players[0][i][0], '(C): ${:,d}'.format(players[0][i][2]), '(' + str(players[0][i][1]) + ')'
for i in rangePG:
if (takePG[i].SolutionValue()):
salary += players[1][i][2]
print players[1][i][0], '(PG): ${:,d}'.format(players[1][i][2]), '(' + str(players[1][i][1]) + ')'
for i in rangePF:
if (takePF[i].SolutionValue()):
salary += players[2][i][2]
print players[2][i][0], '(PF): ${:,d}'.format(players[2][i][2]), '(' + str(players[2][i][1]) + ')'
for i in rangeSG:
if (takeSG[i].SolutionValue()):
salary += players[3][i][2]
print players[3][i][0], '(SG): ${:,d}'.format(players[3][i][2]), '(' + str(players[3][i][1]) + ')'
for i in rangeSF:
if (takeSF[i].SolutionValue()):
salary += players[4][i][2]
print players[4][i][0], '(SF): ${:,d}'.format(players[4][i][2]), '(' + str(players[4][i][1]) + ')'
print "\n", 'Total: ${:,d}'.format(salary), '(' + str(solver.Objective().Value()) + ')'
if (len(sys.argv) < 2):
print 'Usage:', sys.executable, sys.argv[0], 'players.csv'
sys.exit(1)
players = [[], [], [], [], []]
with open(sys.argv[1], 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
players[getPositionNumber(row['Subposition'])].append(
[row['Name'], float(row['Value']), int(row['Salary']), int(row['Team'])]
)
main(players, salaryCap)
|
rudidudi/Trusis | refs/heads/master | AdminLTE-master/bower_components/bootstrap-datepicker/docs/conf.py | 171 | # -*- coding: utf-8 -*-
#
# bootstrap-datepicker documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 2 14:45:57 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = ''
# The full version, including alpha/beta/rc tags.
#release = ''
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
primary_domain = 'js'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bootstrap-datepicker'
copyright = u'2016, eternicode'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'javascript'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['_themes',]
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bootstrap-datepickerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bootstrap-datepicker.tex', u'bootstrap-datepicker Documentation',
u'eternicode', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bootstrap-datepicker', u'bootstrap-datepicker Documentation',
[u'eternicode'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bootstrap-datepicker', u'bootstrap-datepicker Documentation',
u'eternicode', 'bootstrap-datepicker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
danielblazevski/mpi-python-wordcount-runningmedian | refs/heads/master | src/partitionMetaData.py | 2 |
"""
This gets the meta-data containing filenames, location in MB on where to start
and the filesize
This is embedded in an iterative loop over process rank to decide which files
and or chunks of files to send to each process
Initial plan was to include the MPI send/recv here.
Kept this part wrapped to make more modular/easier to read
"""
import os
from mpi4py import MPI
def partitionMetaData(maxReadSize, input_dir, input_files, fCount_init, chunk, AddedFiles):
runningFileSize = 0.0
fCount = fCount_init
Nfiles = len(input_files)
filesnames_tmp = []
byteStart_tmp = []
filesize_tmp = []
while runningFileSize < maxReadSize:
currentFileSize = os.path.getsize(input_dir + input_files[fCount])
if chunk*maxReadSize < currentFileSize: ### starts from chunk*maxReadSize and reads next
filesnames_tmp.append(input_files[fCount]) ### 10 MB for large files
byteStart_tmp.append((chunk - 1)*maxReadSize)
filesize_tmp.append(currentFileSize)
chunk += 1
AddedFiles = True
if chunk*maxReadSize > currentFileSize: # dump rest of file here if there is < 10MB remaining
filesnames_tmp.append(input_files[fCount])
byteStart_tmp.append((chunk -1)*maxReadSize)
filesize_tmp.append(currentFileSize)
fCount += 1
AddedFiles = True
if fCount >= Nfiles:
break
chunk = 1
break
else:
filesnames_tmp.append(input_files[fCount]) ## File is < 10 MB in size, add whole file
byteStart_tmp.append((chunk-1)*maxReadSize)
filesize_tmp.append(currentFileSize)
fCount += 1
runningFileSize += currentFileSize
Addedfiles = True
if fCount >= Nfiles:
break
return filesnames_tmp, byteStart_tmp, filesize_tmp, fCount, chunk, AddedFiles
|
viveksh13/gymkhana | refs/heads/master | venv/bin/venv/lib/python2.7/site-packages/flask/testsuite/test_apps/config_module_app.py | 1257 | import os
import flask
here = os.path.abspath(os.path.dirname(__file__))
app = flask.Flask(__name__)
|
todaychi/hue | refs/heads/master | apps/oozie/src/oozie/migrations/0015_auto__add_field_dataset_advanced_start_instance__add_field_dataset_ins.py | 39 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Dataset.advanced_start_instance'
db.add_column('oozie_dataset', 'advanced_start_instance', self.gf('django.db.models.fields.CharField')(default='0', max_length=128), keep_default=False)
# Adding field 'Dataset.instance_choice'
db.add_column('oozie_dataset', 'instance_choice', self.gf('django.db.models.fields.CharField')(default='default', max_length=10), keep_default=False)
# Adding field 'Dataset.advanced_end_instance'
db.add_column('oozie_dataset', 'advanced_end_instance', self.gf('django.db.models.fields.CharField')(default='0', max_length=128, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Dataset.advanced_start_instance'
db.delete_column('oozie_dataset', 'advanced_start_instance')
# Deleting field 'Dataset.instance_choice'
db.delete_column('oozie_dataset', 'instance_choice')
# Deleting field 'Dataset.advanced_end_instance'
db.delete_column('oozie_dataset', 'advanced_end_instance')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': ['oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 6, 19, 26, 33, 676504)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 3, 19, 26, 33, 676468)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']", 'null': 'True'})
},
'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'advanced_end_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128', 'blank': 'True'}),
'advanced_start_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128'}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_choice': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 3, 19, 26, 33, 677121)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
'oozie.decision': {
'Meta': {'object_name': 'Decision'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.decisionend': {
'Meta': {'object_name': 'DecisionEnd'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.distcp': {
'Meta': {'object_name': 'DistCp'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.email': {
'Meta': {'object_name': 'Email'},
'body': ('django.db.models.fields.TextField', [], {'default': "''"}),
'cc': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'default': "''"}),
'to': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.end': {
'Meta': {'object_name': 'End'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fork': {
'Meta': {'object_name': 'Fork'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fs': {
'Meta': {'object_name': 'Fs'},
'chmods': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'deletes': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'mkdirs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'moves': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'touchzs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'})
},
'oozie.generic': {
'Meta': {'object_name': 'Generic'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.history': {
'Meta': {'object_name': 'History'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.hive.defaults","value":"hive-site.xml"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'args': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'blank': 'True'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.job': {
'Meta': {'object_name': 'Job'},
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'oozie.join': {
'Meta': {'object_name': 'Join'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': "orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': "orm['oozie.Node']"})
},
'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': "orm['oozie.Link']", 'to': "orm['oozie.Node']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'oozie.start': {
'Meta': {'object_name': 'Start'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'})
},
'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'oozie.subworkflow': {
'Meta': {'object_name': 'SubWorkflow'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'propagate_configuration': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'sub_workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': ['oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'end_workflow'", 'null': 'True', 'to': "orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'start_workflow'", 'null': 'True', 'to': "orm['oozie.Start']"})
}
}
complete_apps = ['oozie']
|
dpiers/coderang-meteor | refs/heads/master | public/jsrepl/extern/python/unclosured/lib/python2.7/Cookie.py | 65 | #!/usr/bin/env python
#
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy..
>>> import Cookie
Most of the time you start by creating a cookie. Cookies come in
three flavors, each with slightly different encoding semantics, but
more on that later.
>>> C = Cookie.SimpleCookie()
>>> C = Cookie.SerialCookie()
>>> C = Cookie.SmartCookie()
[Note: Long-time users of Cookie.py will remember using
Cookie.Cookie() to create an Cookie object. Although deprecated, it
is still supported by the code. See the Backward Compatibility notes
for more information.]
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = Cookie.SmartCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = Cookie.SmartCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print C.output(header="Cookie:")
Cookie: rocky=road; Path=/cookie
>>> print C.output(attrs=[], header="Cookie:")
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = Cookie.SmartCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = Cookie.SmartCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print C
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = Cookie.SmartCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print C
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = Cookie.SmartCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
A Bit More Advanced
-------------------
As mentioned before, there are three different flavors of Cookie
objects, each with different encoding/decoding semantics. This
section briefly discusses the differences.
SimpleCookie
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = Cookie.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
SerialCookie
The SerialCookie expects that all values should be serialized using
cPickle (or pickle, if cPickle isn't available). As a result of
serializing, SerialCookie can save almost any Python object to a
value, and recover the exact same object when the cookie has been
returned. (SerialCookie can yield some strange-looking cookie
values, however.)
>>> C = Cookie.SerialCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."'
Be warned, however, if SerialCookie cannot de-serialize a value (because
it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
SmartCookie
The SmartCookie combines aspects of each of the other two flavors.
When setting a value in a dictionary-fashion, the SmartCookie will
serialize (ala cPickle) the value *if and only if* it isn't a
Python string. String objects are *not* serialized. Similarly,
when the load() method parses out values, it attempts to de-serialize
the value. If it fails, then it fallsback to treating the value
as a string.
>>> C = Cookie.SmartCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
Backwards Compatibility
-----------------------
In order to keep compatibilty with earlier versions of Cookie.py,
it is still possible to use Cookie.Cookie() to create a Cookie. In
fact, this simply returns a SmartCookie.
>>> C = Cookie.Cookie()
>>> print C.__class__.__name__
SmartCookie
Finis.
""" #"
# ^
# |----helps out font-lock
#
# Import our required modules
#
import string
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
import re, warnings
__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
"SmartCookie","Cookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
_idmap = ''.join(chr(x) for x in xrange(256))
def _quote(str, LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
#
# If the string does not need to be double-quoted,
# then just return the string. Otherwise, surround
# the string in doublequotes and precede quote (with a \)
# special characters.
#
if "" == translate(str, idmap, LegalChars):
return str
else:
return '"' + _nulljoin( map(_Translator.get, str, str) ) + '"'
# end _quote
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
Omatch = _OctalPatt.search(str, i)
Qmatch = _QuotePatt.search(str, i)
if not Omatch and not Qmatch: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if Omatch: j = Omatch.start(0)
if Qmatch: k = Qmatch.start(0)
if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k+2
else: # OctalPatt matched
res.append(str[i:j])
res.append( chr( int(str[j+1:j+4], 8) ) )
i = j+4
return _nulljoin(res)
# end _unquote
# The _getdate() routine is used to set the expiration time in
# the cookie's HTTP header. By default, _getdate() returns the
# current time in the appropriate "expires" format for a
# Set-Cookie header. The one optional argument is an offset from
# now, in seconds. For example, an offset of -3600 means "one hour ago".
# The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
#
# A class to hold ONE key,value pair.
# In a cookie, each such pair may have several attributes.
# so this class is used to keep the attributes associated
# with the appropriate key,value pair.
# This class also includes a coded_value attribute, which
# is used to hold the network representation of the
# value. This is most useful when Python objects are
# pickled for network transit.
#
class Morsel(dict):
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = { "expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"httponly" : "httponly",
"version" : "Version",
}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for K in self._reserved:
dict.__setitem__(self, K, "")
# end __init__
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
# end __setitem__
def isReservedKey(self, K):
return K.lower() in self._reserved
# end isReservedKey
def set(self, key, val, coded_val,
LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if "" != translate(key, idmap, LegalChars):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
# end set
def output(self, attrs=None, header = "Set-Cookie:"):
return "%s %s" % ( header, self.OutputString(attrs) )
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value) )
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % ( self.OutputString(attrs).replace('"',r'\"'), )
# end js_output()
def OutputString(self, attrs=None):
# Build up our result
#
result = []
RA = result.append
# First, the key=value pair
RA("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = self.items()
items.sort()
for K,V in items:
if V == "": continue
if K not in attrs: continue
if K == "expires" and type(V) == type(1):
RA("%s=%s" % (self._reserved[K], _getdate(V)))
elif K == "max-age" and type(V) == type(1):
RA("%s=%d" % (self._reserved[K], V))
elif K == "secure":
RA(str(self._reserved[K]))
elif K == "httponly":
RA(str(self._reserved[K]))
else:
RA("%s=%s" % (self._reserved[K], V))
# Return the result
return _semispacejoin(result)
# end OutputString
# end Morsel class
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(
r"(?x)" # This is a Verbose pattern
r"(?P<key>" # Start of group 'key'
""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy
r")" # End of group 'key'
r"\s*=\s*" # Equal Sign
r"(?P<val>" # Start of group 'val'
r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
r"|" # or
r"\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr
r"|" # or
""+ _LegalCharsPatt +"*" # Any word or empty string
r")" # End of group 'val'
r"\s*;?" # Probably ending in a semi-colon
)
# At long last, here is the cookie class.
# Using this class is almost just like using a dictionary.
# See this module's docstring for example usage.
#
class BaseCookie(dict):
# A container class for a set of Morsels
#
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
# end value_encode
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
# end value_encode
def __init__(self, input=None):
if input: self.load(input)
# end __init__
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
# end __set
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
# end __setitem__
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.output(attrs, header) )
return sep.join(result)
# end output
__str__ = output
def __repr__(self):
L = []
items = self.items()
items.sort()
for K,V in items:
L.append( '%s=%s' % (K,repr(V.value) ) )
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(L))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.js_output(attrs) )
return _nulljoin(result)
# end js_output
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if type(rawdata) == type(""):
self.__ParseString(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for k, v in rawdata.items():
self[k] = v
return
# end load()
def __ParseString(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match: break # No more cookies
K,V = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if K[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[ K[1:] ] = V
elif K.lower() in Morsel._reserved:
if M:
M[ K ] = _unquote(V)
else:
rval, cval = self.value_decode(V)
self.__set(K, rval, cval)
M = self[K]
# end __ParseString
# end BaseCookie class
class SimpleCookie(BaseCookie):
"""SimpleCookie
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote( val ), val
def value_encode(self, val):
strval = str(val)
return strval, _quote( strval )
# end SimpleCookie
class SerialCookie(BaseCookie):
"""SerialCookie
SerialCookie supports arbitrary objects as cookie values. All
values are serialized (using cPickle) before being sent to the
client. All incoming values are assumed to be valid Pickle
representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
FORMAT, THEN AN EXCEPTION WILL BE RAISED.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("SerialCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
# This could raise an exception!
return loads( _unquote(val) ), val
def value_encode(self, val):
return val, _quote( dumps(val) )
# end SerialCookie
class SmartCookie(BaseCookie):
"""SmartCookie
SmartCookie supports arbitrary objects as cookie values. If the
object is a string, then it is quoted. If the object is not a
string, however, then SmartCookie will use cPickle to serialize
the object into a string representation.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("Cookie/SmartCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
strval = _unquote(val)
try:
return loads(strval), val
except:
return strval, val
def value_encode(self, val):
if type(val) == type(""):
return val, _quote(val)
else:
return val, _quote( dumps(val) )
# end SmartCookie
###########################################################
# Backwards Compatibility: Don't break any existing code!
# We provide Cookie() as an alias for SmartCookie()
Cookie = SmartCookie
#
###########################################################
def _test():
import doctest, Cookie
return doctest.testmod(Cookie)
if __name__ == "__main__":
_test()
#Local Variables:
#tab-width: 4
#end:
|
GustavoHennig/ansible | refs/heads/devel | lib/ansible/modules/cloud/vmware/vmware_maintenancemode.py | 27 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, VMware, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_maintenancemode
short_description: Place a host into maintenance mode
description:
- Place an ESXI host into maintenance mode
- Support for VSAN compliant maintenance mode when selected
author: "Jay Jahns <jjahns@vmware.com>"
version_added: "2.1"
notes:
- Tested on vSphere 5.5 and 6.0
requirements:
- "python >= 2.6"
- PyVmomi
options:
esxi_hostname:
description:
- Name of the host as defined in vCenter
required: True
vsan_mode:
description:
- Specify which VSAN compliant mode to enter
choices:
- 'ensureObjectAccessibility'
- 'evacuateAllData'
- 'noAction'
required: False
evacuate:
description:
- If True, evacuate all powered off VMs
choices:
- True
- False
default: False
required: False
timeout:
description:
- Specify a timeout for the operation
required: False
default: 0
state:
description:
- Enter or exit maintenance mode
choices:
- present
- absent
default: present
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Enter VSAN-Compliant Maintenance Mode
local_action:
module: vmware_maintenancemode
hostname: vc_host
username: vc_user
password: vc_pass
esxi_hostname: esxi.host.example
vsan: ensureObjectAccessibility
evacuate: yes
timeout: 3600
state: present
'''
RETURN = '''
hostsystem:
description: Name of vim reference
returned: always
type: string
sample: "'vim.HostSystem:host-236'"
hostname:
description: Name of host in vCenter
returned: always
type: string
sample: "esxi.local.domain"
status:
description: Action taken
return: always
type: string
sample: "ENTER"
'''
try:
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def EnterMaintenanceMode(module, host):
if host.runtime.inMaintenanceMode:
module.exit_json(
changed=False,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='NO_ACTION',
msg='Host already in maintenance mode')
spec = vim.host.MaintenanceSpec()
if module.params['vsan']:
spec.vsanMode = vim.vsan.host.DecommissionMode()
spec.vsanMode.objectAction = module.params['vsan']
try:
task = host.EnterMaintenanceMode_Task(
module.params['timeout'],
module.params['evacuate'],
spec)
success, result = wait_for_task(task)
return dict(changed=success,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='ENTER',
msg='Host entered maintenance mode')
except TaskError:
module.fail_json(
msg='Host failed to enter maintenance mode')
def ExitMaintenanceMode(module, host):
if not host.runtime.inMaintenanceMode:
module.exit_json(
changed=False,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='NO_ACTION',
msg='Host not in maintenance mode')
try:
task = host.ExitMaintenanceMode_Task(
module.params['timeout'])
success, result = wait_for_task(task)
return dict(changed=success,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='EXIT',
msg='Host exited maintenance mode')
except TaskError:
module.fail_json(
msg='Host failed to exit maintenance mode')
def main():
spec = vmware_argument_spec()
spec.update(dict(
esxi_hostname=dict(required=True),
vsan=dict(required=False, choices=['ensureObjectAccessibility',
'evacuateAllData',
'noAction']),
evacuate=dict(required=False, type='bool', default=False),
timeout=dict(required=False, default=0, type='int'),
state=dict(required=False,
default='present',
choices=['present', 'absent'])))
module = AnsibleModule(argument_spec=spec)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
content = connect_to_api(module)
host = find_hostsystem_by_name(content, module.params['esxi_hostname'])
if not host:
module.fail_json(
msg='Host not found in vCenter')
if module.params['state'] == 'present':
result = EnterMaintenanceMode(module, host)
elif module.params['state'] == 'absent':
result = ExitMaintenanceMode(module, host)
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.vmware import *
if __name__ == '__main__':
main()
|
40223139/203739test | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/unittest/test/_test_warnings.py | 858 | # helper module for test_runner.Test_TextTestRunner.test_warnings
"""
This module has a number of tests that raise different kinds of warnings.
When the tests are run, the warnings are caught and their messages are printed
to stdout. This module also accepts an arg that is then passed to
unittest.main to affect the behavior of warnings.
Test_TextTestRunner.test_warnings executes this script with different
combinations of warnings args and -W flags and check that the output is correct.
See #10535.
"""
import sys
import unittest
import warnings
def warnfun():
warnings.warn('rw', RuntimeWarning)
class TestWarnings(unittest.TestCase):
# unittest warnings will be printed at most once per type (max one message
# for the fail* methods, and one for the assert* methods)
def test_assert(self):
self.assertEquals(2+2, 4)
self.assertEquals(2*2, 4)
self.assertEquals(2**2, 4)
def test_fail(self):
self.failUnless(1)
self.failUnless(True)
def test_other_unittest(self):
self.assertAlmostEqual(2+2, 4)
self.assertNotAlmostEqual(4+4, 2)
# these warnings are normally silenced, but they are printed in unittest
def test_deprecation(self):
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
def test_import(self):
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
# user warnings should always be printed
def test_warning(self):
warnings.warn('uw')
warnings.warn('uw')
warnings.warn('uw')
# these warnings come from the same place; they will be printed
# only once by default or three times if the 'always' filter is used
def test_function(self):
warnfun()
warnfun()
warnfun()
if __name__ == '__main__':
with warnings.catch_warnings(record=True) as ws:
# if an arg is provided pass it to unittest.main as 'warnings'
if len(sys.argv) == 2:
unittest.main(exit=False, warnings=sys.argv.pop())
else:
unittest.main(exit=False)
# print all the warning messages collected
for w in ws:
print(w.message)
|
CamelBackNotation/CarnotKE | refs/heads/master | jyhton/lib-python/2.7/encodings/cp932.py | 817 | #
# cp932.py: Python Unicode Codec for CP932
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('cp932')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='cp932',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
gorjuce/odoo | refs/heads/8.0 | addons/product_extended/product_extended.py | 185 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
class product_template(osv.osv):
_name = 'product.template'
_inherit = 'product.template'
def compute_price(self, cr, uid, product_ids, template_ids=False, recursive=False, test=False, real_time_accounting = False, context=None):
'''
Will return test dict when the test = False
Multiple ids at once?
testdict is used to inform the user about the changes to be made
'''
testdict = {}
if product_ids:
ids = product_ids
model = 'product.product'
else:
ids = template_ids
model = 'product.template'
for prod_id in ids:
bom_obj = self.pool.get('mrp.bom')
if model == 'product.product':
bom_id = bom_obj._bom_find(cr, uid, product_id=prod_id, context=context)
else:
bom_id = bom_obj._bom_find(cr, uid, product_tmpl_id=prod_id, context=context)
if bom_id:
# In recursive mode, it will first compute the prices of child boms
if recursive:
#Search the products that are components of this bom of prod_id
bom = bom_obj.browse(cr, uid, bom_id, context=context)
#Call compute_price on these subproducts
prod_set = set([x.product_id.id for x in bom.bom_line_ids])
res = self.compute_price(cr, uid, list(prod_set), recursive=recursive, test=test, real_time_accounting = real_time_accounting, context=context)
if test:
testdict.update(res)
#Use calc price to calculate and put the price on the product of the BoM if necessary
price = self._calc_price(cr, uid, bom_obj.browse(cr, uid, bom_id, context=context), test=test, real_time_accounting = real_time_accounting, context=context)
if test:
testdict.update({prod_id : price})
if test:
return testdict
else:
return True
def _calc_price(self, cr, uid, bom, test = False, real_time_accounting=False, context=None):
if context is None:
context={}
price = 0
uom_obj = self.pool.get("product.uom")
tmpl_obj = self.pool.get('product.template')
for sbom in bom.bom_line_ids:
my_qty = sbom.product_qty / sbom.product_efficiency
if not sbom.attribute_value_ids:
# No attribute_value_ids means the bom line is not variant specific
price += uom_obj._compute_price(cr, uid, sbom.product_id.uom_id.id, sbom.product_id.standard_price, sbom.product_uom.id) * my_qty
if bom.routing_id:
for wline in bom.routing_id.workcenter_lines:
wc = wline.workcenter_id
cycle = wline.cycle_nbr
hour = (wc.time_start + wc.time_stop + cycle * wc.time_cycle) * (wc.time_efficiency or 1.0)
price += wc.costs_cycle * cycle + wc.costs_hour * hour
price = self.pool.get('product.uom')._compute_price(cr,uid,bom.product_uom.id, price, bom.product_id.uom_id.id)
#Convert on product UoM quantities
if price > 0:
price = uom_obj._compute_price(cr, uid, bom.product_uom.id, price / bom.product_qty, bom.product_id.uom_id.id)
product = tmpl_obj.browse(cr, uid, bom.product_tmpl_id.id, context=context)
if not test:
if (product.valuation != "real_time" or not real_time_accounting):
tmpl_obj.write(cr, uid, [product.id], {'standard_price' : price}, context=context)
else:
#Call wizard function here
wizard_obj = self.pool.get("stock.change.standard.price")
ctx = context.copy()
ctx.update({'active_id': product.id, 'active_model': 'product.template'})
wiz_id = wizard_obj.create(cr, uid, {'new_price': price}, context=ctx)
wizard_obj.change_price(cr, uid, [wiz_id], context=ctx)
return price
class product_bom(osv.osv):
_inherit = 'mrp.bom'
_columns = {
'standard_price': fields.related('product_tmpl_id','standard_price',type="float",relation="product.product",string="Standard Price",store=False)
}
product_bom()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
hvanderlaan/ikea-smartlight | refs/heads/master | tradfri-authenticate.py | 1 | #!/usr/bin/env python
# file : tradfri-authenticate.py
# purpose : authenticate api user and generate configuration file
#
# author : maltejur
# date : 2020/10/24
"""
tradfri-authenticate.py - authenticate api user and generate configuration file
This module requires libcoap with dTLS compiled, at this moment there is no python coap module
that supports coap with dTLS. see ../bin/README how to compile libcoap with dTLS support
"""
# pylint convention disablement:
# C0103 -> invalid-name
# C0200 -> consider-using-enumerate
# pylint: disable=C0200, C0103
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import ConfigParser
from tradfri import tradfriActions
def main():
""" main function """
conf = ConfigParser.ConfigParser()
script_dir = os.path.dirname(os.path.realpath(__file__))
conf.read(script_dir + '/tradfri.cfg')
hubip = raw_input("\nhub ip:\t\t")
securityCode = raw_input("security code: \t")
print("\n[ ] acquiring api key ...", end="")
apiuser, apikey = tradfriActions.tradfri_authenticate(hubip, securityCode)
print("\r[+]\n\nuser:\t{}\napikey:\t{}\n".format(apiuser, apikey))
print("[ ] writing configuration file ...", end="")
conf.add_section('tradfri')
conf.set("tradfri", "hubip", hubip)
conf.set("tradfri", "apiuser", apiuser)
conf.set("tradfri", "apikey", apikey)
conf.write(open(script_dir + '/tradfri.cfg','w'))
print("\r[+]\n")
print("all done!\n")
if __name__ == "__main__":
main()
sys.exit(0)
|
JFriel/honours_project | refs/heads/master | networkx/build/lib/networkx/testing/__init__.py | 103 | from networkx.testing.utils import *
|
Darkduke68/MatchMaker | refs/heads/master | profiles/views.py | 1 | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import Http404
from profiles.forms import ProfileUpdateForm
from matches.models import Match
# Create your views here.
@login_required
def profile_update(request):
if request.method == 'POST':
form = ProfileUpdateForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
return redirect('profile')
else:
return render(request, 'profiles/update.html', {'form': form})
else:
form = ProfileUpdateForm(instance=request.user)
context = {
'form': form,
'user': request.user,
}
return render(request, 'profiles/update.html', context)
@login_required
def profile_get(request):
if request.method == 'GET':
return redirect('profile-single', pk=request.user.id)
return Http404
@login_required
def profile_single(request, pk):
try:
user = User.objects.get(pk=pk)
except:
user = None
if user and request.method == 'GET':
match, created = Match.objects.get_or_create_match(user_a=request.user, user_b=user)
context = {
'curr_user': request.user,
'view_user': user,
'match_percent': match.percent,
}
return render(request, 'profiles/main.html', context)
return Http404
|
Jorge-Rodriguez/ansible | refs/heads/devel | lib/ansible/modules/cloud/digital_ocean/digital_ocean_block_storage.py | 8 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_block_storage
short_description: Create/destroy or attach/detach Block Storage volumes in DigitalOcean
description:
- Create/destroy Block Storage volume in DigitalOcean, or attach/detach Block Storage volume to a droplet.
version_added: "2.2"
options:
command:
description:
- Which operation do you want to perform.
choices: ['create', 'attach']
required: true
state:
description:
- Indicate desired state of the target.
choices: ['present', 'absent']
required: true
block_size:
description:
- The size of the Block Storage volume in gigabytes. Required when command=create and state=present. If snapshot_id is included, this will be ignored.
volume_name:
description:
- The name of the Block Storage volume.
required: true
description:
description:
- Description of the Block Storage volume.
region:
description:
- The slug of the region where your Block Storage volume should be located in. If snapshot_id is included, this will be ignored.
required: true
snapshot_id:
version_added: "2.5"
description:
- The snapshot id you would like the Block Storage volume created with. If included, region and block_size will be ignored and changed to null.
droplet_id:
description:
- The droplet id you want to operate on. Required when command=attach.
extends_documentation_fragment: digital_ocean.documentation
notes:
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN.
They both refer to the v2 token.
- If snapshot_id is used, region and block_size will be ignored and changed to null.
author:
- "Harnek Sidhu (@harneksidhu)"
'''
EXAMPLES = '''
# Create new Block Storage
- digital_ocean_block_storage:
state: present
command: create
api_token: <TOKEN>
region: nyc1
block_size: 10
volume_name: nyc1-block-storage
# Delete Block Storage
- digital_ocean_block_storage:
state: absent
command: create
api_token: <TOKEN>
region: nyc1
volume_name: nyc1-block-storage
# Attach Block Storage to a Droplet
- digital_ocean_block_storage:
state: present
command: attach
api_token: <TOKEN>
volume_name: nyc1-block-storage
region: nyc1
droplet_id: <ID>
# Detach Block Storage from a Droplet
- digital_ocean_block_storage:
state: absent
command: attach
api_token: <TOKEN>
volume_name: nyc1-block-storage
region: nyc1
droplet_id: <ID>
'''
RETURN = '''
id:
description: Unique identifier of a Block Storage volume returned during creation.
returned: changed
type: str
sample: "69b25d9a-494c-12e6-a5af-001f53126b44"
'''
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
class DOBlockStorageException(Exception):
pass
class DOBlockStorage(object):
def __init__(self, module):
self.module = module
self.rest = DigitalOceanHelper(module)
def get_key_or_fail(self, k):
v = self.module.params[k]
if v is None:
self.module.fail_json(msg='Unable to load %s' % k)
return v
def poll_action_for_complete_status(self, action_id):
url = 'actions/{}'.format(action_id)
end_time = time.time() + self.module.params['timeout']
while time.time() < end_time:
time.sleep(2)
response = self.rest.get(url)
status = response.status_code
json = response.json
if status == 200:
if json['action']['status'] == 'completed':
return True
elif json['action']['status'] == 'errored':
raise DOBlockStorageException(json['message'])
raise DOBlockStorageException('Unable to reach api.digitalocean.com')
def get_attached_droplet_ID(self, volume_name, region):
url = 'volumes?name={}®ion={}'.format(volume_name, region)
response = self.rest.get(url)
status = response.status_code
json = response.json
if status == 200:
volumes = json['volumes']
if len(volumes) > 0:
droplet_ids = volumes[0]['droplet_ids']
if len(droplet_ids) > 0:
return droplet_ids[0]
return None
else:
raise DOBlockStorageException(json['message'])
def attach_detach_block_storage(self, method, volume_name, region, droplet_id):
data = {
'type': method,
'volume_name': volume_name,
'region': region,
'droplet_id': droplet_id
}
response = self.rest.post('volumes/actions', data=data)
status = response.status_code
json = response.json
if status == 202:
return self.poll_action_for_complete_status(json['action']['id'])
elif status == 200:
return True
elif status == 422:
return False
else:
raise DOBlockStorageException(json['message'])
def create_block_storage(self):
volume_name = self.get_key_or_fail('volume_name')
snapshot_id = self.module.params['snapshot_id']
if snapshot_id:
self.module.params['block_size'] = None
self.module.params['region'] = None
block_size = None
region = None
else:
block_size = self.get_key_or_fail('block_size')
region = self.get_key_or_fail('region')
description = self.module.params['description']
data = {
'size_gigabytes': block_size,
'name': volume_name,
'description': description,
'region': region,
'snapshot_id': snapshot_id,
}
response = self.rest.post("volumes", data=data)
status = response.status_code
json = response.json
if status == 201:
self.module.exit_json(changed=True, id=json['volume']['id'])
elif status == 409 and json['id'] == 'conflict':
self.module.exit_json(changed=False)
else:
raise DOBlockStorageException(json['message'])
def delete_block_storage(self):
volume_name = self.get_key_or_fail('volume_name')
region = self.get_key_or_fail('region')
url = 'volumes?name={}®ion={}'.format(volume_name, region)
attached_droplet_id = self.get_attached_droplet_ID(volume_name, region)
if attached_droplet_id is not None:
self.attach_detach_block_storage('detach', volume_name, region, attached_droplet_id)
response = self.rest.delete(url)
status = response.status_code
json = response.json
if status == 204:
self.module.exit_json(changed=True)
elif status == 404:
self.module.exit_json(changed=False)
else:
raise DOBlockStorageException(json['message'])
def attach_block_storage(self):
volume_name = self.get_key_or_fail('volume_name')
region = self.get_key_or_fail('region')
droplet_id = self.get_key_or_fail('droplet_id')
attached_droplet_id = self.get_attached_droplet_ID(volume_name, region)
if attached_droplet_id is not None:
if attached_droplet_id == droplet_id:
self.module.exit_json(changed=False)
else:
self.attach_detach_block_storage('detach', volume_name, region, attached_droplet_id)
changed_status = self.attach_detach_block_storage('attach', volume_name, region, droplet_id)
self.module.exit_json(changed=changed_status)
def detach_block_storage(self):
volume_name = self.get_key_or_fail('volume_name')
region = self.get_key_or_fail('region')
droplet_id = self.get_key_or_fail('droplet_id')
changed_status = self.attach_detach_block_storage('detach', volume_name, region, droplet_id)
self.module.exit_json(changed=changed_status)
def handle_request(module):
block_storage = DOBlockStorage(module)
command = module.params['command']
state = module.params['state']
if command == 'create':
if state == 'present':
block_storage.create_block_storage()
elif state == 'absent':
block_storage.delete_block_storage()
elif command == 'attach':
if state == 'present':
block_storage.attach_block_storage()
elif state == 'absent':
block_storage.detach_block_storage()
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
state=dict(choices=['present', 'absent'], required=True),
command=dict(choices=['create', 'attach'], required=True),
block_size=dict(type='int', required=False),
volume_name=dict(type='str', required=True),
description=dict(type='str'),
region=dict(type='str', required=False),
snapshot_id=dict(type='str', required=False),
droplet_id=dict(type='int')
)
module = AnsibleModule(argument_spec=argument_spec)
try:
handle_request(module)
except DOBlockStorageException as e:
module.fail_json(msg=e.message, exception=traceback.format_exc())
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message, exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
mcbor/adventofcode | refs/heads/master | 2016/day01/day01-pt2.py | 1 | #!/usr/bin/env python3
# Advent of Code 2016 - Day 1, Part Two
# Using turtle graphics to find the location of Easter Bunny HQ.
# It's slow, but draws pretty maps.
import sys
import turtle
def distance():
return round(abs(turtle.xcor() + abs(turtle.ycor())))
def main(argv):
if len(argv) < 2:
print("Usage: {} puzzle.txt".format(argv[0]))
return 1
with open(argv[1]) as f:
visited = set()
# init turtle to face north
turtle.speed('fastest')
turtle.home()
turtle.setheading(90)
visited.add(turtle.position())
for line in f:
if line[0] == '#':
# skip comments
continue
steps = line.split()
for step in map(str.strip, line.split(',')):
direction = step[0]
moves = int(step[1:])
if direction == 'R':
turtle.right(90)
elif direction == 'L':
turtle.left(90)
else:
print("don't know about", direction)
return 1
for _ in range(moves):
turtle.forward(1)
if turtle.position() in visited:
print("Visited {} twice, we're there! Distance: {}".format(turtle.position(), distance()))
input("Press any key to exit...")
return 0
else:
visited.add(turtle.position())
print(step + ": " + str(turtle.position()))
print("Stopped at {}, Distance: {}".format(str(turtle.position()), distance()))
input("Press any key to exit...")
if __name__ == '__main__':
sys.exit(main(sys.argv)) |
fredkingham/blog-of-fred | refs/heads/master | django/contrib/comments/signals.py | 425 | """
Signals relating to comments.
"""
from django.dispatch import Signal
# Sent just before a comment will be posted (after it's been approved and
# moderated; this can be used to modify the comment (in place) with posting
# details or other such actions. If any receiver returns False the comment will be
# discarded and a 403 (not allowed) response. This signal is sent at more or less
# the same time (just before, actually) as the Comment object's pre-save signal,
# except that the HTTP request is sent along with this signal.
comment_will_be_posted = Signal(providing_args=["comment", "request"])
# Sent just after a comment was posted. See above for how this differs
# from the Comment object's post-save signal.
comment_was_posted = Signal(providing_args=["comment", "request"])
# Sent after a comment was "flagged" in some way. Check the flag to see if this
# was a user requesting removal of a comment, a moderator approving/removing a
# comment, or some other custom user flag.
comment_was_flagged = Signal(providing_args=["comment", "flag", "created", "request"])
|
denys-duchier/django | refs/heads/master | tests/migrations/test_autodetector.py | 35 | import functools
import re
from unittest import mock
from django.apps import apps
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser
from django.core.validators import RegexValidator, validate_slug
from django.db import connection, models
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.state import ModelState, ProjectState
from django.test import TestCase, override_settings
from django.test.utils import isolate_lru_cache
from .models import FoodManager, FoodQuerySet
class DeconstructibleObject:
"""
A custom deconstructible object.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def deconstruct(self):
return (
self.__module__ + '.' + self.__class__.__name__,
self.args,
self.kwargs
)
class AutodetectorTests(TestCase):
"""
Tests the migration autodetector.
"""
author_empty = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_name = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
])
author_name_null = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, null=True)),
])
author_name_longer = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=400)),
])
author_name_renamed = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("names", models.CharField(max_length=200)),
])
author_name_default = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default='Ada Lovelace')),
])
author_dates_of_birth_auto_now = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("date_of_birth", models.DateField(auto_now=True)),
("date_time_of_birth", models.DateTimeField(auto_now=True)),
("time_of_birth", models.TimeField(auto_now=True)),
])
author_dates_of_birth_auto_now_add = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("date_of_birth", models.DateField(auto_now_add=True)),
("date_time_of_birth", models.DateTimeField(auto_now_add=True)),
("time_of_birth", models.TimeField(auto_now_add=True)),
])
author_name_deconstructible_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject())),
])
author_name_deconstructible_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject())),
])
author_name_deconstructible_3 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=models.IntegerField())),
])
author_name_deconstructible_4 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=models.IntegerField())),
])
author_name_deconstructible_list_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=[DeconstructibleObject(), 123])),
])
author_name_deconstructible_list_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=[DeconstructibleObject(), 123])),
])
author_name_deconstructible_list_3 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=[DeconstructibleObject(), 999])),
])
author_name_deconstructible_tuple_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=(DeconstructibleObject(), 123))),
])
author_name_deconstructible_tuple_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=(DeconstructibleObject(), 123))),
])
author_name_deconstructible_tuple_3 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=(DeconstructibleObject(), 999))),
])
author_name_deconstructible_dict_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default={
'item': DeconstructibleObject(), 'otheritem': 123
})),
])
author_name_deconstructible_dict_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default={
'item': DeconstructibleObject(), 'otheritem': 123
})),
])
author_name_deconstructible_dict_3 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default={
'item': DeconstructibleObject(), 'otheritem': 999
})),
])
author_name_nested_deconstructible_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject(
DeconstructibleObject(1),
(DeconstructibleObject('t1'), DeconstructibleObject('t2'),),
a=DeconstructibleObject('A'),
b=DeconstructibleObject(B=DeconstructibleObject('c')),
))),
])
author_name_nested_deconstructible_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject(
DeconstructibleObject(1),
(DeconstructibleObject('t1'), DeconstructibleObject('t2'),),
a=DeconstructibleObject('A'),
b=DeconstructibleObject(B=DeconstructibleObject('c')),
))),
])
author_name_nested_deconstructible_changed_arg = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject(
DeconstructibleObject(1),
(DeconstructibleObject('t1'), DeconstructibleObject('t2-changed'),),
a=DeconstructibleObject('A'),
b=DeconstructibleObject(B=DeconstructibleObject('c')),
))),
])
author_name_nested_deconstructible_extra_arg = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject(
DeconstructibleObject(1),
(DeconstructibleObject('t1'), DeconstructibleObject('t2'),),
None,
a=DeconstructibleObject('A'),
b=DeconstructibleObject(B=DeconstructibleObject('c')),
))),
])
author_name_nested_deconstructible_changed_kwarg = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject(
DeconstructibleObject(1),
(DeconstructibleObject('t1'), DeconstructibleObject('t2'),),
a=DeconstructibleObject('A'),
b=DeconstructibleObject(B=DeconstructibleObject('c-changed')),
))),
])
author_name_nested_deconstructible_extra_kwarg = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject(
DeconstructibleObject(1),
(DeconstructibleObject('t1'), DeconstructibleObject('t2'),),
a=DeconstructibleObject('A'),
b=DeconstructibleObject(B=DeconstructibleObject('c')),
c=None,
))),
])
author_custom_pk = ModelState("testapp", "Author", [("pk_field", models.IntegerField(primary_key=True))])
author_with_biography_non_blank = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField()),
("biography", models.TextField()),
])
author_with_biography_blank = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(blank=True)),
("biography", models.TextField(blank=True)),
])
author_with_book = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
])
author_with_book_order_wrt = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
], options={"order_with_respect_to": "book"})
author_renamed_with_book = ModelState("testapp", "Writer", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
])
author_with_publisher_string = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("publisher_name", models.CharField(max_length=200)),
])
author_with_publisher = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
])
author_with_user = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("user", models.ForeignKey("auth.User", models.CASCADE)),
])
author_with_custom_user = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("user", models.ForeignKey("thirdapp.CustomUser", models.CASCADE)),
])
author_proxy = ModelState("testapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author",))
author_proxy_options = ModelState("testapp", "AuthorProxy", [], {
"proxy": True,
"verbose_name": "Super Author",
}, ("testapp.author", ))
author_proxy_notproxy = ModelState("testapp", "AuthorProxy", [], {}, ("testapp.author", ))
author_proxy_third = ModelState("thirdapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author", ))
author_proxy_third_notproxy = ModelState("thirdapp", "AuthorProxy", [], {}, ("testapp.author", ))
author_proxy_proxy = ModelState("testapp", "AAuthorProxyProxy", [], {"proxy": True}, ("testapp.authorproxy", ))
author_unmanaged = ModelState("testapp", "AuthorUnmanaged", [], {"managed": False}, ("testapp.author", ))
author_unmanaged_managed = ModelState("testapp", "AuthorUnmanaged", [], {}, ("testapp.author", ))
author_unmanaged_default_pk = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_unmanaged_custom_pk = ModelState("testapp", "Author", [
("pk_field", models.IntegerField(primary_key=True)),
])
author_with_m2m = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher")),
])
author_with_m2m_blank = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher", blank=True)),
])
author_with_m2m_through = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher", through="testapp.Contract")),
])
author_with_renamed_m2m_through = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher", through="testapp.Deal")),
])
author_with_former_m2m = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.CharField(max_length=100)),
])
author_with_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
], {
"permissions": [('can_hire', 'Can hire')],
"verbose_name": "Authi",
})
author_with_db_table_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_one"})
author_with_new_db_table_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_two"})
author_renamed_with_db_table_options = ModelState("testapp", "NewAuthor", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_one"})
author_renamed_with_new_db_table_options = ModelState("testapp", "NewAuthor", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_three"})
contract = ModelState("testapp", "Contract", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
])
contract_renamed = ModelState("testapp", "Deal", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
])
publisher = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
])
publisher_with_author = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("name", models.CharField(max_length=100)),
])
publisher_with_aardvark_author = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Aardvark", models.CASCADE)),
("name", models.CharField(max_length=100)),
])
publisher_with_book = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("otherapp.Book", models.CASCADE)),
("name", models.CharField(max_length=100)),
])
other_pony = ModelState("otherapp", "Pony", [
("id", models.AutoField(primary_key=True)),
])
other_pony_food = ModelState("otherapp", "Pony", [
("id", models.AutoField(primary_key=True)),
], managers=[
('food_qs', FoodQuerySet.as_manager()),
('food_mgr', FoodManager('a', 'b')),
('food_mgr_kwargs', FoodManager('x', 'y', 3, 4)),
])
other_stable = ModelState("otherapp", "Stable", [("id", models.AutoField(primary_key=True))])
third_thing = ModelState("thirdapp", "Thing", [("id", models.AutoField(primary_key=True))])
book = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_proxy_fk = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("thirdapp.AuthorProxy", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_proxy_proxy_fk = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.AAuthorProxyProxy", models.CASCADE)),
])
book_migrations_fk = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("migrations.UnmigratedModel", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_with_no_author = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=200)),
])
book_with_author_renamed = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Writer", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_with_field_and_author_renamed = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("writer", models.ForeignKey("testapp.Writer", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_with_multiple_authors = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("authors", models.ManyToManyField("testapp.Author")),
("title", models.CharField(max_length=200)),
])
book_with_multiple_authors_through_attribution = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("authors", models.ManyToManyField("testapp.Author", through="otherapp.Attribution")),
("title", models.CharField(max_length=200)),
])
book_indexes = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"indexes": [models.Index(fields=["author", "title"], name="book_title_author_idx")],
})
book_unordered_indexes = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"indexes": [models.Index(fields=["title", "author"], name="book_author_title_idx")],
})
book_foo_together = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("author", "title")},
"unique_together": {("author", "title")},
})
book_foo_together_2 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "author")},
"unique_together": {("title", "author")},
})
book_foo_together_3 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("newfield", models.IntegerField()),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "newfield")},
"unique_together": {("title", "newfield")},
})
book_foo_together_4 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("newfield2", models.IntegerField()),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "newfield2")},
"unique_together": {("title", "newfield2")},
})
attribution = ModelState("otherapp", "Attribution", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
])
edition = ModelState("thirdapp", "Edition", [
("id", models.AutoField(primary_key=True)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
])
custom_user = ModelState("thirdapp", "CustomUser", [
("id", models.AutoField(primary_key=True)),
("username", models.CharField(max_length=255)),
], bases=(AbstractBaseUser, ))
custom_user_no_inherit = ModelState("thirdapp", "CustomUser", [
("id", models.AutoField(primary_key=True)),
("username", models.CharField(max_length=255)),
])
aardvark = ModelState("thirdapp", "Aardvark", [("id", models.AutoField(primary_key=True))])
aardvark_testapp = ModelState("testapp", "Aardvark", [("id", models.AutoField(primary_key=True))])
aardvark_based_on_author = ModelState("testapp", "Aardvark", [], bases=("testapp.Author", ))
aardvark_pk_fk_author = ModelState("testapp", "Aardvark", [
("id", models.OneToOneField("testapp.Author", models.CASCADE, primary_key=True)),
])
knight = ModelState("eggs", "Knight", [("id", models.AutoField(primary_key=True))])
rabbit = ModelState("eggs", "Rabbit", [
("id", models.AutoField(primary_key=True)),
("knight", models.ForeignKey("eggs.Knight", models.CASCADE)),
("parent", models.ForeignKey("eggs.Rabbit", models.CASCADE)),
], {
"unique_together": {("parent", "knight")},
"indexes": [models.Index(fields=["parent", "knight"], name='rabbit_circular_fk_index')],
})
def repr_changes(self, changes, include_dependencies=False):
output = ""
for app_label, migrations in sorted(changes.items()):
output += " %s:\n" % app_label
for migration in migrations:
output += " %s\n" % migration.name
for operation in migration.operations:
output += " %s\n" % operation
if include_dependencies:
output += " Dependencies:\n"
if migration.dependencies:
for dep in migration.dependencies:
output += " %s\n" % (dep,)
else:
output += " None\n"
return output
def assertNumberMigrations(self, changes, app_label, number):
if len(changes.get(app_label, [])) != number:
self.fail("Incorrect number of migrations (%s) for %s (expected %s)\n%s" % (
len(changes.get(app_label, [])),
app_label,
number,
self.repr_changes(changes),
))
def assertMigrationDependencies(self, changes, app_label, position, dependencies):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < position + 1:
self.fail("No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)))
migration = changes[app_label][position]
if set(migration.dependencies) != set(dependencies):
self.fail("Migration dependencies mismatch for %s.%s (expected %s):\n%s" % (
app_label,
migration.name,
dependencies,
self.repr_changes(changes, include_dependencies=True),
))
def assertOperationTypes(self, changes, app_label, position, types):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < position + 1:
self.fail("No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)))
migration = changes[app_label][position]
real_types = [operation.__class__.__name__ for operation in migration.operations]
if types != real_types:
self.fail("Operation type mismatch for %s.%s (expected %s):\n%s" % (
app_label,
migration.name,
types,
self.repr_changes(changes),
))
def assertOperationAttributes(self, changes, app_label, position, operation_position, **attrs):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < position + 1:
self.fail("No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)))
migration = changes[app_label][position]
if len(changes[app_label]) < position + 1:
self.fail("No operation at index %s for %s.%s\n%s" % (
operation_position,
app_label,
migration.name,
self.repr_changes(changes),
))
operation = migration.operations[operation_position]
for attr, value in attrs.items():
if getattr(operation, attr, None) != value:
self.fail("Attribute mismatch for %s.%s op #%s, %s (expected %r, got %r):\n%s" % (
app_label,
migration.name,
operation_position,
attr,
value,
getattr(operation, attr, None),
self.repr_changes(changes),
))
def assertOperationFieldAttributes(self, changes, app_label, position, operation_position, **attrs):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < position + 1:
self.fail("No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)))
migration = changes[app_label][position]
if len(changes[app_label]) < position + 1:
self.fail("No operation at index %s for %s.%s\n%s" % (
operation_position,
app_label,
migration.name,
self.repr_changes(changes),
))
operation = migration.operations[operation_position]
if not hasattr(operation, 'field'):
self.fail("No field attribute for %s.%s op #%s." % (
app_label,
migration.name,
operation_position,
))
field = operation.field
for attr, value in attrs.items():
if getattr(field, attr, None) != value:
self.fail("Field attribute mismatch for %s.%s op #%s, field.%s (expected %r, got %r):\n%s" % (
app_label,
migration.name,
operation_position,
attr,
value,
getattr(field, attr, None),
self.repr_changes(changes),
))
def make_project_state(self, model_states):
"Shortcut to make ProjectStates from lists of predefined models"
project_state = ProjectState()
for model_state in model_states:
project_state.add_model(model_state.clone())
return project_state
def get_changes(self, before_states, after_states, questioner=None):
return MigrationAutodetector(
self.make_project_state(before_states),
self.make_project_state(after_states),
questioner,
)._detect_changes()
def test_arrange_for_graph(self):
"""Tests auto-naming of migrations for graph matching."""
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"))
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("otherapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
changes = autodetector.arrange_for_graph(changes, graph)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_author")
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_pony_stable")
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_trim_apps(self):
"""
Trim does not remove dependencies but does remove unwanted apps.
"""
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable, self.third_thing])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_initial": True}))
changes = autodetector._detect_changes()
# Run through arrange_for_graph
graph = MigrationGraph()
changes = autodetector.arrange_for_graph(changes, graph)
changes["testapp"][0].dependencies.append(("otherapp", "0001_initial"))
changes = autodetector._trim_to_apps(changes, {"testapp"})
# Make sure there's the right set of migrations
self.assertEqual(changes["testapp"][0].name, "0001_initial")
self.assertEqual(changes["otherapp"][0].name, "0001_initial")
self.assertNotIn("thirdapp", changes)
def test_custom_migration_name(self):
"""Tests custom naming of migrations for graph matching."""
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
migration_name = 'custom_name'
changes = autodetector.arrange_for_graph(changes, graph, migration_name)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_%s" % migration_name)
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_%s" % migration_name)
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_new_model(self):
"""Tests autodetection of new models."""
changes = self.get_changes([], [self.other_pony_food])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Pony")
self.assertEqual([name for name, mgr in changes['otherapp'][0].operations[0].managers],
['food_qs', 'food_mgr', 'food_mgr_kwargs'])
def test_old_model(self):
"""Tests deletion of old models."""
changes = self.get_changes([self.author_empty], [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["DeleteModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
def test_add_field(self):
"""Tests autodetection of new fields."""
changes = self.get_changes([self.author_empty], [self.author_name])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition',
side_effect=AssertionError("Should not have prompted for not null addition"))
def test_add_date_fields_with_auto_now_not_asking_for_default(self, mocked_ask_method):
changes = self.get_changes([self.author_empty], [self.author_dates_of_birth_auto_now])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField", "AddField"])
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now=True)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition',
side_effect=AssertionError("Should not have prompted for not null addition"))
def test_add_date_fields_with_auto_now_add_not_asking_for_null_addition(self, mocked_ask_method):
changes = self.get_changes([self.author_empty], [self.author_dates_of_birth_auto_now_add])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField", "AddField"])
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now_add=True)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_auto_now_add_addition')
def test_add_date_fields_with_auto_now_add_asking_for_default(self, mocked_ask_method):
changes = self.get_changes([self.author_empty], [self.author_dates_of_birth_auto_now_add])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField", "AddField"])
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now_add=True)
self.assertEqual(mocked_ask_method.call_count, 3)
def test_remove_field(self):
"""Tests autodetection of removed fields."""
changes = self.get_changes([self.author_name], [self.author_empty])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RemoveField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
def test_alter_field(self):
"""Tests autodetection of new fields."""
changes = self.get_changes([self.author_name], [self.author_name_longer])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=True)
def test_supports_functools_partial(self):
def _content_file_name(instance, filename, key, **kwargs):
return '{}/{}'.format(instance, filename)
def content_file_name(key, **kwargs):
return functools.partial(_content_file_name, key, **kwargs)
# An unchanged partial reference.
before = [ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("file", models.FileField(max_length=200, upload_to=content_file_name('file'))),
])]
after = [ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("file", models.FileField(max_length=200, upload_to=content_file_name('file'))),
])]
changes = self.get_changes(before, after)
self.assertNumberMigrations(changes, 'testapp', 0)
# A changed partial reference.
args_changed = [ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("file", models.FileField(max_length=200, upload_to=content_file_name('other-file'))),
])]
changes = self.get_changes(before, args_changed)
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ['AlterField'])
# Can't use assertOperationFieldAttributes because we need the
# deconstructed version, i.e., the exploded func/args/keywords rather
# than the partial: we don't care if it's not the same instance of the
# partial, only if it's the same source function, args, and keywords.
value = changes['testapp'][0].operations[0].field.upload_to
self.assertEqual(
(_content_file_name, ('other-file',), {}),
(value.func, value.args, value.keywords)
)
kwargs_changed = [ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("file", models.FileField(max_length=200, upload_to=content_file_name('file', spam='eggs'))),
])]
changes = self.get_changes(before, kwargs_changed)
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ['AlterField'])
value = changes['testapp'][0].operations[0].field.upload_to
self.assertEqual(
(_content_file_name, ('file',), {'spam': 'eggs'}),
(value.func, value.args, value.keywords)
)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration',
side_effect=AssertionError("Should not have prompted for not null addition"))
def test_alter_field_to_not_null_with_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
changes = self.get_changes([self.author_name_null], [self.author_name_default])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, default='Ada Lovelace')
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration',
return_value=models.NOT_PROVIDED)
def test_alter_field_to_not_null_without_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
changes = self.get_changes([self.author_name_null], [self.author_name])
self.assertEqual(mocked_ask_method.call_count, 1)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, default=models.NOT_PROVIDED)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration',
return_value='Some Name')
def test_alter_field_to_not_null_oneoff_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
changes = self.get_changes([self.author_name_null], [self.author_name])
self.assertEqual(mocked_ask_method.call_count, 1)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=False)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, default="Some Name")
def test_rename_field(self):
"""Tests autodetection of renamed fields."""
changes = self.get_changes(
[self.author_name], [self.author_name_renamed], MigrationQuestioner({"ask_rename": True})
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="name", new_name="names")
def test_rename_model(self):
"""Tests autodetection of renamed models."""
changes = self.get_changes(
[self.author_with_book, self.book],
[self.author_renamed_with_book, self.book_with_author_renamed],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="Author", new_name="Writer")
# Now that RenameModel handles related fields too, there should be
# no AlterField for the related field.
self.assertNumberMigrations(changes, 'otherapp', 0)
def test_rename_m2m_through_model(self):
"""
Tests autodetection of renamed models that are used in M2M relations as
through models.
"""
changes = self.get_changes(
[self.author_with_m2m_through, self.publisher, self.contract],
[self.author_with_renamed_m2m_through, self.publisher, self.contract_renamed],
MigrationQuestioner({'ask_rename_model': True})
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ['RenameModel'])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name='Contract', new_name='Deal')
def test_rename_model_with_renamed_rel_field(self):
"""
Tests autodetection of renamed models while simultaneously renaming one
of the fields that relate to the renamed model.
"""
changes = self.get_changes(
[self.author_with_book, self.book],
[self.author_renamed_with_book, self.book_with_field_and_author_renamed],
MigrationQuestioner({"ask_rename": True, "ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="Author", new_name="Writer")
# Right number/type of migrations for related field rename?
# Alter is already taken care of.
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["RenameField"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, old_name="author", new_name="writer")
def test_rename_model_with_fks_in_different_position(self):
"""
#24537 - The order of fields in a model does not influence
the RenameModel detection.
"""
before = [
ModelState("testapp", "EntityA", [
("id", models.AutoField(primary_key=True)),
]),
ModelState("testapp", "EntityB", [
("id", models.AutoField(primary_key=True)),
("some_label", models.CharField(max_length=255)),
("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)),
]),
]
after = [
ModelState("testapp", "EntityA", [
("id", models.AutoField(primary_key=True)),
]),
ModelState("testapp", "RenamedEntityB", [
("id", models.AutoField(primary_key=True)),
("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)),
("some_label", models.CharField(max_length=255)),
]),
]
changes = self.get_changes(before, after, MigrationQuestioner({"ask_rename_model": True}))
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, old_name="EntityB", new_name="RenamedEntityB")
def test_fk_dependency(self):
"""Having a ForeignKey automatically adds a dependency."""
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (author),
# thirdapp (edition) depends on otherapp (book)
changes = self.get_changes([], [self.author_name, self.book, self.edition])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("testapp", "auto_1")])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="Edition")
self.assertMigrationDependencies(changes, 'thirdapp', 0, [("otherapp", "auto_1")])
def test_proxy_fk_dependency(self):
"""FK dependencies still work on proxy models."""
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (authorproxy)
changes = self.get_changes([], [self.author_empty, self.author_proxy_third, self.book_proxy_fk])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("thirdapp", "auto_1")])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="AuthorProxy")
self.assertMigrationDependencies(changes, 'thirdapp', 0, [("testapp", "auto_1")])
def test_same_app_no_fk_dependency(self):
"""
A migration with a FK between two models of the same app
does not have a dependency to itself.
"""
changes = self.get_changes([], [self.author_with_publisher, self.publisher])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
def test_circular_fk_dependency(self):
"""
Having a circular ForeignKey dependency automatically
resolves the situation into 2 migrations on one side and 1 on the other.
"""
changes = self.get_changes([], [self.author_with_book, self.book, self.publisher_with_book])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertMigrationDependencies(changes, 'testapp', 0, [("otherapp", "auto_1")])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 2)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'otherapp', 1, ["AddField"])
self.assertMigrationDependencies(changes, 'otherapp', 0, [])
self.assertMigrationDependencies(changes, 'otherapp', 1, [("otherapp", "auto_1"), ("testapp", "auto_1")])
# both split migrations should be `initial`
self.assertTrue(changes['otherapp'][0].initial)
self.assertTrue(changes['otherapp'][1].initial)
def test_same_app_circular_fk_dependency(self):
"""
A migration with a FK between two models of the same app does
not have a dependency to itself.
"""
changes = self.get_changes([], [self.author_with_publisher, self.publisher_with_author])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
def test_same_app_circular_fk_dependency_with_unique_together_and_indexes(self):
"""
#22275 - A migration with circular FK dependency does not try
to create unique together constraint and indexes before creating all
required fields first.
"""
changes = self.get_changes([], [self.knight, self.rabbit])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'eggs', 1)
self.assertOperationTypes(
changes, 'eggs', 0, ["CreateModel", "CreateModel", "AddIndex", "AlterUniqueTogether"]
)
self.assertNotIn("unique_together", changes['eggs'][0].operations[0].options)
self.assertNotIn("unique_together", changes['eggs'][0].operations[1].options)
self.assertMigrationDependencies(changes, 'eggs', 0, [])
def test_alter_db_table_add(self):
"""Tests detection for adding db_table in model's options."""
changes = self.get_changes([self.author_empty], [self.author_with_db_table_options])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", table="author_one")
def test_alter_db_table_change(self):
"""Tests detection for changing db_table in model's options'."""
changes = self.get_changes([self.author_with_db_table_options], [self.author_with_new_db_table_options])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", table="author_two")
def test_alter_db_table_remove(self):
"""Tests detection for removing db_table in model's options."""
changes = self.get_changes([self.author_with_db_table_options], [self.author_empty])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", table=None)
def test_alter_db_table_no_changes(self):
"""
Alter_db_table doesn't generate a migration if no changes have been made.
"""
changes = self.get_changes([self.author_with_db_table_options], [self.author_with_db_table_options])
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_keep_db_table_with_model_change(self):
"""
Tests when model changes but db_table stays as-is, autodetector must not
create more than one operation.
"""
changes = self.get_changes(
[self.author_with_db_table_options],
[self.author_renamed_with_db_table_options],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor")
def test_alter_db_table_with_model_change(self):
"""
Tests when model and db_table changes, autodetector must create two
operations.
"""
changes = self.get_changes(
[self.author_with_db_table_options],
[self.author_renamed_with_new_db_table_options],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel", "AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="newauthor", table="author_three")
def test_identical_regex_doesnt_alter(self):
from_state = ModelState(
"testapp", "model", [("id", models.AutoField(primary_key=True, validators=[
RegexValidator(
re.compile('^[-a-zA-Z0-9_]+\\Z'),
"Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.",
'invalid'
)
]))]
)
to_state = ModelState(
"testapp", "model", [("id", models.AutoField(primary_key=True, validators=[validate_slug]))]
)
changes = self.get_changes([from_state], [to_state])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 0)
def test_different_regex_does_alter(self):
from_state = ModelState(
"testapp", "model", [("id", models.AutoField(primary_key=True, validators=[
RegexValidator(
re.compile('^[a-z]+\\Z', 32),
"Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.",
'invalid'
)
]))]
)
to_state = ModelState(
"testapp", "model", [("id", models.AutoField(primary_key=True, validators=[validate_slug]))]
)
changes = self.get_changes([from_state], [to_state])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
def test_empty_foo_together(self):
"""
#23452 - Empty unique/index_together shouldn't generate a migration.
"""
# Explicitly testing for not specified, since this is the case after
# a CreateModel operation w/o any definition on the original model
model_state_not_specified = ModelState("a", "model", [("id", models.AutoField(primary_key=True))])
# Explicitly testing for None, since this was the issue in #23452 after
# a AlterFooTogether operation with e.g. () as value
model_state_none = ModelState("a", "model", [
("id", models.AutoField(primary_key=True))
], {
"index_together": None,
"unique_together": None,
})
# Explicitly testing for the empty set, since we now always have sets.
# During removal (('col1', 'col2'),) --> () this becomes set([])
model_state_empty = ModelState("a", "model", [
("id", models.AutoField(primary_key=True))
], {
"index_together": set(),
"unique_together": set(),
})
def test(from_state, to_state, msg):
changes = self.get_changes([from_state], [to_state])
if len(changes) > 0:
ops = ', '.join(o.__class__.__name__ for o in changes['a'][0].operations)
self.fail('Created operation(s) %s from %s' % (ops, msg))
tests = (
(model_state_not_specified, model_state_not_specified, '"not specified" to "not specified"'),
(model_state_not_specified, model_state_none, '"not specified" to "None"'),
(model_state_not_specified, model_state_empty, '"not specified" to "empty"'),
(model_state_none, model_state_not_specified, '"None" to "not specified"'),
(model_state_none, model_state_none, '"None" to "None"'),
(model_state_none, model_state_empty, '"None" to "empty"'),
(model_state_empty, model_state_not_specified, '"empty" to "not specified"'),
(model_state_empty, model_state_none, '"empty" to "None"'),
(model_state_empty, model_state_empty, '"empty" to "empty"'),
)
for t in tests:
test(*t)
def test_create_model_with_indexes(self):
"""Test creation of new model with indexes already defined."""
author = ModelState('otherapp', 'Author', [
('id', models.AutoField(primary_key=True)),
('name', models.CharField(max_length=200)),
], {'indexes': [models.Index(fields=['name'], name='create_model_with_indexes_idx')]})
changes = self.get_changes([], [author])
added_index = models.Index(fields=['name'], name='create_model_with_indexes_idx')
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 2)
# Right actions order?
self.assertOperationTypes(changes, 'otherapp', 0, ['CreateModel', 'AddIndex'])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name='Author')
self.assertOperationAttributes(changes, 'otherapp', 0, 1, model_name='author', index=added_index)
def test_add_indexes(self):
"""Test change detection of new indexes."""
changes = self.get_changes([self.author_empty, self.book], [self.author_empty, self.book_indexes])
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ['AddIndex'])
added_index = models.Index(fields=['author', 'title'], name='book_title_author_idx')
self.assertOperationAttributes(changes, 'otherapp', 0, 0, model_name='book', index=added_index)
def test_remove_indexes(self):
"""Test change detection of removed indexes."""
changes = self.get_changes([self.author_empty, self.book_indexes], [self.author_empty, self.book])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ['RemoveIndex'])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, model_name='book', name='book_title_author_idx')
def test_order_fields_indexes(self):
"""Test change detection of reordering of fields in indexes."""
changes = self.get_changes(
[self.author_empty, self.book_indexes], [self.author_empty, self.book_unordered_indexes]
)
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ['RemoveIndex', 'AddIndex'])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, model_name='book', name='book_title_author_idx')
added_index = models.Index(fields=['title', 'author'], name='book_author_title_idx')
self.assertOperationAttributes(changes, 'otherapp', 0, 1, model_name='book', index=added_index)
def test_add_foo_together(self):
"""Tests index/unique_together detection."""
changes = self.get_changes([self.author_empty, self.book], [self.author_empty, self.book_foo_together])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together={("author", "title")})
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together={("author", "title")})
def test_remove_foo_together(self):
"""Tests index/unique_together detection."""
changes = self.get_changes([self.author_empty, self.book_foo_together], [self.author_empty, self.book])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together=set())
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together=set())
def test_foo_together_remove_fk(self):
"""Tests unique_together and field removal detection & ordering"""
changes = self.get_changes(
[self.author_empty, self.book_foo_together], [self.author_empty, self.book_with_no_author]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, [
"AlterUniqueTogether", "AlterIndexTogether", "RemoveField"
])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together=set())
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together=set())
self.assertOperationAttributes(changes, "otherapp", 0, 2, model_name="book", name="author")
def test_foo_together_no_changes(self):
"""
index/unique_together doesn't generate a migration if no
changes have been made.
"""
changes = self.get_changes(
[self.author_empty, self.book_foo_together], [self.author_empty, self.book_foo_together]
)
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_foo_together_ordering(self):
"""
index/unique_together also triggers on ordering changes.
"""
changes = self.get_changes(
[self.author_empty, self.book_foo_together], [self.author_empty, self.book_foo_together_2]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together={("title", "author")})
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together={("title", "author")})
def test_add_field_and_foo_together(self):
"""
Added fields will be created before using them in index/unique_together.
"""
changes = self.get_changes([self.author_empty, self.book], [self.author_empty, self.book_foo_together_3])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AddField", "AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={("title", "newfield")})
self.assertOperationAttributes(changes, "otherapp", 0, 2, name="book", index_together={("title", "newfield")})
def test_create_model_and_unique_together(self):
author = ModelState("otherapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
])
book_with_author = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("otherapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "author")},
"unique_together": {("title", "author")},
})
changes = self.get_changes([self.book_with_no_author], [author, book_with_author])
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 4)
# Right actions order?
self.assertOperationTypes(
changes, 'otherapp', 0,
['CreateModel', 'AddField', 'AlterUniqueTogether', 'AlterIndexTogether']
)
def test_remove_field_and_foo_together(self):
"""
Removed fields will be removed after updating index/unique_together.
"""
changes = self.get_changes(
[self.author_empty, self.book_foo_together_3], [self.author_empty, self.book_foo_together]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RemoveField", "AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, model_name="book", name="newfield")
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={("author", "title")})
self.assertOperationAttributes(changes, "otherapp", 0, 2, name="book", index_together={("author", "title")})
def test_rename_field_and_foo_together(self):
"""
Removed fields will be removed after updating index/unique_together.
"""
changes = self.get_changes(
[self.author_empty, self.book_foo_together_3],
[self.author_empty, self.book_foo_together_4],
MigrationQuestioner({"ask_rename": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RenameField", "AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={
("title", "newfield2")
})
self.assertOperationAttributes(changes, "otherapp", 0, 2, name="book", index_together={("title", "newfield2")})
def test_proxy(self):
"""The autodetector correctly deals with proxy models."""
# First, we test adding a proxy model
changes = self.get_changes([self.author_empty], [self.author_empty, self.author_proxy])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="AuthorProxy", options={"proxy": True, "indexes": []}
)
# Now, we test turning a proxy model into a non-proxy model
# It should delete the proxy then make the real one
changes = self.get_changes(
[self.author_empty, self.author_proxy], [self.author_empty, self.author_proxy_notproxy]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="AuthorProxy", options={})
def test_proxy_custom_pk(self):
"""
#23415 - The autodetector must correctly deal with custom FK on proxy
models.
"""
# First, we test the default pk field name
changes = self.get_changes([], [self.author_empty, self.author_proxy_third, self.book_proxy_fk])
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'id')
# Now, we test the custom pk field name
changes = self.get_changes([], [self.author_custom_pk, self.author_proxy_third, self.book_proxy_fk])
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'pk_field')
def test_proxy_to_mti_with_fk_to_proxy(self):
# First, test the pk table and field name.
changes = self.get_changes(
[],
[self.author_empty, self.author_proxy_third, self.book_proxy_fk],
)
self.assertEqual(
changes['otherapp'][0].operations[0].fields[2][1].remote_field.model._meta.db_table,
'testapp_author',
)
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'id')
# Change AuthorProxy to use MTI.
changes = self.get_changes(
[self.author_empty, self.author_proxy_third, self.book_proxy_fk],
[self.author_empty, self.author_proxy_third_notproxy, self.book_proxy_fk],
)
# Right number/type of migrations for the AuthorProxy model?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ['DeleteModel', 'CreateModel'])
# Right number/type of migrations for the Book model with a FK to
# AuthorProxy?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ['AlterField'])
# otherapp should depend on thirdapp.
self.assertMigrationDependencies(changes, 'otherapp', 0, [('thirdapp', 'auto_1')])
# Now, test the pk table and field name.
self.assertEqual(
changes['otherapp'][0].operations[0].field.remote_field.model._meta.db_table,
'thirdapp_authorproxy',
)
self.assertEqual(changes['otherapp'][0].operations[0].field.remote_field.field_name, 'author_ptr')
def test_proxy_to_mti_with_fk_to_proxy_proxy(self):
# First, test the pk table and field name.
changes = self.get_changes(
[],
[self.author_empty, self.author_proxy, self.author_proxy_proxy, self.book_proxy_proxy_fk],
)
self.assertEqual(
changes['otherapp'][0].operations[0].fields[1][1].remote_field.model._meta.db_table,
'testapp_author',
)
self.assertEqual(changes['otherapp'][0].operations[0].fields[1][1].remote_field.field_name, 'id')
# Change AuthorProxy to use MTI. FK still points to AAuthorProxyProxy,
# a proxy of AuthorProxy.
changes = self.get_changes(
[self.author_empty, self.author_proxy, self.author_proxy_proxy, self.book_proxy_proxy_fk],
[self.author_empty, self.author_proxy_notproxy, self.author_proxy_proxy, self.book_proxy_proxy_fk],
)
# Right number/type of migrations for the AuthorProxy model?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ['DeleteModel', 'CreateModel'])
# Right number/type of migrations for the Book model with a FK to
# AAuthorProxyProxy?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ['AlterField'])
# otherapp should depend on testapp.
self.assertMigrationDependencies(changes, 'otherapp', 0, [('testapp', 'auto_1')])
# Now, test the pk table and field name.
self.assertEqual(
changes['otherapp'][0].operations[0].field.remote_field.model._meta.db_table,
'testapp_authorproxy',
)
self.assertEqual(changes['otherapp'][0].operations[0].field.remote_field.field_name, 'author_ptr')
def test_unmanaged_create(self):
"""The autodetector correctly deals with managed models."""
# First, we test adding an unmanaged model
changes = self.get_changes([self.author_empty], [self.author_empty, self.author_unmanaged])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="AuthorUnmanaged", options={"managed": False})
def test_unmanaged_to_managed(self):
# Now, we test turning an unmanaged model into a managed model
changes = self.get_changes(
[self.author_empty, self.author_unmanaged], [self.author_empty, self.author_unmanaged_managed]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="authorunmanaged", options={})
def test_managed_to_unmanaged(self):
# Now, we turn managed to unmanaged.
changes = self.get_changes(
[self.author_empty, self.author_unmanaged_managed], [self.author_empty, self.author_unmanaged]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="authorunmanaged", options={"managed": False})
def test_unmanaged_custom_pk(self):
"""
#23415 - The autodetector must correctly deal with custom FK on
unmanaged models.
"""
# First, we test the default pk field name
changes = self.get_changes([], [self.author_unmanaged_default_pk, self.book])
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'id')
# Now, we test the custom pk field name
changes = self.get_changes([], [self.author_unmanaged_custom_pk, self.book])
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'pk_field')
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
changes = self.get_changes([self.custom_user], [self.custom_user, self.author_with_custom_user])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertMigrationDependencies(changes, 'testapp', 0, [("__setting__", "AUTH_USER_MODEL")])
def test_swappable_changed(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
before = self.make_project_state([self.custom_user, self.author_with_user])
with override_settings(AUTH_USER_MODEL="thirdapp.CustomUser"):
after = self.make_project_state([self.custom_user, self.author_with_custom_user])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, model_name="author", name='user')
fk_field = changes['testapp'][0].operations[0].field
to_model = '%s.%s' % (
fk_field.remote_field.model._meta.app_label,
fk_field.remote_field.model._meta.object_name,
)
self.assertEqual(to_model, 'thirdapp.CustomUser')
def test_add_field_with_default(self):
"""#22030 - Adding a field with a default should work."""
changes = self.get_changes([self.author_empty], [self.author_name_default])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="name")
def test_custom_deconstructible(self):
"""
Two instances which deconstruct to the same value aren't considered a
change.
"""
changes = self.get_changes([self.author_name_deconstructible_1], [self.author_name_deconstructible_2])
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_deconstruct_field_kwarg(self):
"""Field instances are handled correctly by nested deconstruction."""
changes = self.get_changes([self.author_name_deconstructible_3], [self.author_name_deconstructible_4])
self.assertEqual(changes, {})
def test_deconstructible_list(self):
"""Nested deconstruction descends into lists."""
# When lists contain items that deconstruct to identical values, those lists
# should be considered equal for the purpose of detecting state changes
# (even if the original items are unequal).
changes = self.get_changes(
[self.author_name_deconstructible_list_1], [self.author_name_deconstructible_list_2]
)
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed lists should be reported
# as a change
changes = self.get_changes(
[self.author_name_deconstructible_list_1], [self.author_name_deconstructible_list_3]
)
self.assertEqual(len(changes), 1)
def test_deconstructible_tuple(self):
"""Nested deconstruction descends into tuples."""
# When tuples contain items that deconstruct to identical values, those tuples
# should be considered equal for the purpose of detecting state changes
# (even if the original items are unequal).
changes = self.get_changes(
[self.author_name_deconstructible_tuple_1], [self.author_name_deconstructible_tuple_2]
)
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed tuples should be reported
# as a change
changes = self.get_changes(
[self.author_name_deconstructible_tuple_1], [self.author_name_deconstructible_tuple_3]
)
self.assertEqual(len(changes), 1)
def test_deconstructible_dict(self):
"""Nested deconstruction descends into dict values."""
# When dicts contain items whose values deconstruct to identical values,
# those dicts should be considered equal for the purpose of detecting
# state changes (even if the original values are unequal).
changes = self.get_changes(
[self.author_name_deconstructible_dict_1], [self.author_name_deconstructible_dict_2]
)
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed dicts should be reported
# as a change
changes = self.get_changes(
[self.author_name_deconstructible_dict_1], [self.author_name_deconstructible_dict_3]
)
self.assertEqual(len(changes), 1)
def test_nested_deconstructible_objects(self):
"""
Nested deconstruction is applied recursively to the args/kwargs of
deconstructed objects.
"""
# If the items within a deconstructed object's args/kwargs have the same
# deconstructed values - whether or not the items themselves are different
# instances - then the object as a whole is regarded as unchanged.
changes = self.get_changes(
[self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_2]
)
self.assertEqual(changes, {})
# Differences that exist solely within the args list of a deconstructed object
# should be reported as changes
changes = self.get_changes(
[self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_changed_arg]
)
self.assertEqual(len(changes), 1)
# Additional args should also be reported as a change
changes = self.get_changes(
[self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_extra_arg]
)
self.assertEqual(len(changes), 1)
# Differences that exist solely within the kwargs dict of a deconstructed object
# should be reported as changes
changes = self.get_changes(
[self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_changed_kwarg]
)
self.assertEqual(len(changes), 1)
# Additional kwargs should also be reported as a change
changes = self.get_changes(
[self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_extra_kwarg]
)
self.assertEqual(len(changes), 1)
def test_deconstruct_type(self):
"""
#22951 -- Uninstantiated classes with deconstruct are correctly returned
by deep_deconstruct during serialization.
"""
author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(
max_length=200,
# IntegerField intentionally not instantiated.
default=models.IntegerField,
))
],
)
changes = self.get_changes([], [author])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
def test_replace_string_with_foreignkey(self):
"""
#22300 - Adding an FK in the same "spot" as a deleted CharField should
work.
"""
changes = self.get_changes([self.author_with_publisher_string], [self.author_with_publisher, self.publisher])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "RemoveField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publisher_name")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="publisher")
def test_foreign_key_removed_before_target_model(self):
"""
Removing an FK and the model it targets in the same change must remove
the FK field before the model to maintain consistency.
"""
changes = self.get_changes(
[self.author_with_publisher, self.publisher], [self.author_name]
) # removes both the model and FK
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RemoveField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Publisher")
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition',
side_effect=AssertionError("Should not have prompted for not null addition"))
def test_add_many_to_many(self, mocked_ask_method):
"""#22435 - Adding a ManyToManyField should not prompt for a default."""
changes = self.get_changes([self.author_empty, self.publisher], [self.author_with_m2m, self.publisher])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publishers")
def test_alter_many_to_many(self):
changes = self.get_changes(
[self.author_with_m2m, self.publisher], [self.author_with_m2m_blank, self.publisher]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publishers")
def test_create_with_through_model(self):
"""
Adding a m2m with a through model and the models that use it should be
ordered correctly.
"""
changes = self.get_changes([], [self.author_with_m2m_through, self.publisher, self.contract])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, [
"CreateModel", "CreateModel", "CreateModel", "AddField", "AddField"
])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Contract")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="Publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 3, model_name='contract', name='publisher')
self.assertOperationAttributes(changes, 'testapp', 0, 4, model_name='author', name='publishers')
def test_many_to_many_removed_before_through_model(self):
"""
Removing a ManyToManyField and the "through" model in the same change
must remove the field before the model to maintain consistency.
"""
changes = self.get_changes(
[self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution],
[self.book_with_no_author, self.author_name],
)
# Remove both the through model and ManyToMany
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RemoveField", "RemoveField", "RemoveField", "DeleteModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="author", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 1, name="book", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 2, name="authors", model_name='book')
self.assertOperationAttributes(changes, 'otherapp', 0, 3, name='Attribution')
def test_many_to_many_removed_before_through_model_2(self):
"""
Removing a model that contains a ManyToManyField and the "through" model
in the same change must remove the field before the model to maintain
consistency.
"""
changes = self.get_changes(
[self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution],
[self.author_name],
)
# Remove both the through model and ManyToMany
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, [
"RemoveField", "RemoveField", "RemoveField", "DeleteModel", "DeleteModel"
])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="author", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 1, name="book", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 2, name="authors", model_name='book')
self.assertOperationAttributes(changes, 'otherapp', 0, 3, name='Attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 4, name='Book')
def test_m2m_w_through_multistep_remove(self):
"""
A model with a m2m field that specifies a "through" model cannot be
removed in the same migration as that through model as the schema will
pass through an inconsistent state. The autodetector should produce two
migrations to avoid this issue.
"""
changes = self.get_changes([self.author_with_m2m_through, self.publisher, self.contract], [self.publisher])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, [
"RemoveField", "RemoveField", "RemoveField", "DeleteModel", "DeleteModel"
])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publishers", model_name='author')
self.assertOperationAttributes(changes, "testapp", 0, 1, name="author", model_name='contract')
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher", model_name='contract')
self.assertOperationAttributes(changes, "testapp", 0, 3, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 4, name="Contract")
def test_concrete_field_changed_to_many_to_many(self):
"""
#23938 - Changing a concrete field into a ManyToManyField
first removes the concrete field and then adds the m2m field.
"""
changes = self.get_changes([self.author_with_former_m2m], [self.author_with_m2m, self.publisher])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "RemoveField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name='Publisher')
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publishers", model_name='author')
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="publishers", model_name='author')
def test_many_to_many_changed_to_concrete_field(self):
"""
#23938 - Changing a ManyToManyField into a concrete field
first removes the m2m field and then adds the concrete field.
"""
changes = self.get_changes([self.author_with_m2m, self.publisher], [self.author_with_former_m2m])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "AddField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publishers", model_name='author')
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publishers", model_name='author')
self.assertOperationAttributes(changes, 'testapp', 0, 2, name='Publisher')
self.assertOperationFieldAttributes(changes, 'testapp', 0, 1, max_length=100)
def test_non_circular_foreignkey_dependency_removal(self):
"""
If two models with a ForeignKey from one to the other are removed at the
same time, the autodetector should remove them in the correct order.
"""
changes = self.get_changes([self.author_with_publisher, self.publisher_with_author], [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "RemoveField", "DeleteModel", "DeleteModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publisher", model_name='author')
self.assertOperationAttributes(changes, "testapp", 0, 1, name="author", model_name='publisher')
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 3, name="Publisher")
def test_alter_model_options(self):
"""Changing a model's options should make a change."""
changes = self.get_changes([self.author_empty], [self.author_with_options])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, options={
"permissions": [('can_hire', 'Can hire')],
"verbose_name": "Authi",
})
# Changing them back to empty should also make a change
changes = self.get_changes([self.author_with_options], [self.author_empty])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", options={})
def test_alter_model_options_proxy(self):
"""Changing a proxy model's options should also make a change."""
changes = self.get_changes(
[self.author_proxy, self.author_empty], [self.author_proxy_options, self.author_empty]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="authorproxy", options={
"verbose_name": "Super Author"
})
def test_set_alter_order_with_respect_to(self):
"""Setting order_with_respect_to adds a field."""
changes = self.get_changes([self.book, self.author_with_book], [self.book, self.author_with_book_order_wrt])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to="book")
def test_add_alter_order_with_respect_to(self):
"""
Setting order_with_respect_to when adding the FK too does
things in the right order.
"""
changes = self.get_changes([self.author_name], [self.book, self.author_with_book_order_wrt])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, model_name="author", name="book")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book")
def test_remove_alter_order_with_respect_to(self):
"""
Removing order_with_respect_to when removing the FK too does
things in the right order.
"""
changes = self.get_changes([self.book, self.author_with_book_order_wrt], [self.author_name])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo", "RemoveField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to=None)
self.assertOperationAttributes(changes, 'testapp', 0, 1, model_name="author", name="book")
def test_add_model_order_with_respect_to(self):
"""
Setting order_with_respect_to when adding the whole model
does things in the right order.
"""
changes = self.get_changes([], [self.book, self.author_with_book_order_wrt])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book")
self.assertNotIn("_order", [name for name, field in changes['testapp'][0].operations[0].fields])
def test_alter_model_managers(self):
"""
Changing the model managers adds a new operation.
"""
changes = self.get_changes([self.other_pony], [self.other_pony_food])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["AlterModelManagers"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="pony")
self.assertEqual([name for name, mgr in changes['otherapp'][0].operations[0].managers],
['food_qs', 'food_mgr', 'food_mgr_kwargs'])
self.assertEqual(changes['otherapp'][0].operations[0].managers[1][1].args, ('a', 'b', 1, 2))
self.assertEqual(changes['otherapp'][0].operations[0].managers[2][1].args, ('x', 'y', 3, 4))
def test_swappable_first_inheritance(self):
"""Swappable models get their CreateModel first."""
changes = self.get_changes([], [self.custom_user, self.aardvark])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark")
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable_first_setting(self):
"""Swappable models get their CreateModel first."""
with isolate_lru_cache(apps.get_swappable_settings_name):
changes = self.get_changes([], [self.custom_user_no_inherit, self.aardvark])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark")
def test_bases_first(self):
"""Bases of other models come first."""
changes = self.get_changes([], [self.aardvark_based_on_author, self.author_name])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark")
def test_multiple_bases(self):
"""#23956 - Inheriting models doesn't move *_ptr fields into AddField operations."""
A = ModelState("app", "A", [("a_id", models.AutoField(primary_key=True))])
B = ModelState("app", "B", [("b_id", models.AutoField(primary_key=True))])
C = ModelState("app", "C", [], bases=("app.A", "app.B"))
D = ModelState("app", "D", [], bases=("app.A", "app.B"))
E = ModelState("app", "E", [], bases=("app.A", "app.B"))
changes = self.get_changes([], [A, B, C, D, E])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, [
"CreateModel", "CreateModel", "CreateModel", "CreateModel", "CreateModel"
])
self.assertOperationAttributes(changes, "app", 0, 0, name="A")
self.assertOperationAttributes(changes, "app", 0, 1, name="B")
self.assertOperationAttributes(changes, "app", 0, 2, name="C")
self.assertOperationAttributes(changes, "app", 0, 3, name="D")
self.assertOperationAttributes(changes, "app", 0, 4, name="E")
def test_proxy_bases_first(self):
"""Bases of proxies come first."""
changes = self.get_changes([], [self.author_empty, self.author_proxy, self.author_proxy_proxy])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="AuthorProxy")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="AAuthorProxyProxy")
def test_pk_fk_included(self):
"""
A relation used as the primary key is kept as part of CreateModel.
"""
changes = self.get_changes([], [self.aardvark_pk_fk_author, self.author_name])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark")
def test_first_dependency(self):
"""
A dependency to an app with no migrations uses __first__.
"""
# Load graph
loader = MigrationLoader(connection)
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = ["migrations"]
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("migrations", "__first__")])
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_last_dependency(self):
"""
A dependency to an app with existing migrations uses the
last migration of that app.
"""
# Load graph
loader = MigrationLoader(connection)
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = ["migrations"]
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("migrations", "0002_second")])
def test_alter_fk_before_model_deletion(self):
"""
ForeignKeys are altered _before_ the model they used to
refer to are deleted.
"""
changes = self.get_changes(
[self.author_name, self.publisher_with_author],
[self.aardvark_testapp, self.publisher_with_aardvark_author]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Aardvark")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="Author")
def test_fk_dependency_other_app(self):
"""
#23100 - ForeignKeys correctly depend on other apps' models.
"""
changes = self.get_changes([self.author_name, self.book], [self.author_with_book, self.book])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="book")
self.assertMigrationDependencies(changes, 'testapp', 0, [("otherapp", "__first__")])
def test_circular_dependency_mixed_addcreate(self):
"""
#23315 - The dependency resolver knows to put all CreateModel
before AddField and not become unsolvable.
"""
address = ModelState("a", "Address", [
("id", models.AutoField(primary_key=True)),
("country", models.ForeignKey("b.DeliveryCountry", models.CASCADE)),
])
person = ModelState("a", "Person", [
("id", models.AutoField(primary_key=True)),
])
apackage = ModelState("b", "APackage", [
("id", models.AutoField(primary_key=True)),
("person", models.ForeignKey("a.Person", models.CASCADE)),
])
country = ModelState("b", "DeliveryCountry", [
("id", models.AutoField(primary_key=True)),
])
changes = self.get_changes([], [address, person, apackage, country])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel", "CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertOperationTypes(changes, 'b', 0, ["CreateModel", "CreateModel"])
@override_settings(AUTH_USER_MODEL="a.Tenant")
def test_circular_dependency_swappable(self):
"""
#23322 - The dependency resolver knows to explicitly resolve
swappable models.
"""
with isolate_lru_cache(apps.get_swappable_settings_name):
tenant = ModelState("a", "Tenant", [
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("b.Address", models.CASCADE))],
bases=(AbstractBaseUser, )
)
address = ModelState("b", "Address", [
("id", models.AutoField(primary_key=True)),
("tenant", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE)),
])
changes = self.get_changes([], [address, tenant])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertMigrationDependencies(changes, 'a', 0, [])
self.assertMigrationDependencies(changes, 'a', 1, [('a', 'auto_1'), ('b', 'auto_1')])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'b', 0, ["CreateModel"])
self.assertMigrationDependencies(changes, 'b', 0, [('__setting__', 'AUTH_USER_MODEL')])
@override_settings(AUTH_USER_MODEL="b.Tenant")
def test_circular_dependency_swappable2(self):
"""
#23322 - The dependency resolver knows to explicitly resolve
swappable models but with the swappable not being the first migrated
model.
"""
with isolate_lru_cache(apps.get_swappable_settings_name):
address = ModelState("a", "Address", [
("id", models.AutoField(primary_key=True)),
("tenant", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE)),
])
tenant = ModelState("b", "Tenant", [
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("a.Address", models.CASCADE))],
bases=(AbstractBaseUser, )
)
changes = self.get_changes([], [address, tenant])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertMigrationDependencies(changes, 'a', 0, [])
self.assertMigrationDependencies(changes, 'a', 1, [('__setting__', 'AUTH_USER_MODEL'), ('a', 'auto_1')])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'b', 0, ["CreateModel"])
self.assertMigrationDependencies(changes, 'b', 0, [('a', 'auto_1')])
@override_settings(AUTH_USER_MODEL="a.Person")
def test_circular_dependency_swappable_self(self):
"""
#23322 - The dependency resolver knows to explicitly resolve
swappable models.
"""
with isolate_lru_cache(apps.get_swappable_settings_name):
person = ModelState("a", "Person", [
("id", models.AutoField(primary_key=True)),
("parent1", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE, related_name='children'))
])
changes = self.get_changes([], [person])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertMigrationDependencies(changes, 'a', 0, [])
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition',
side_effect=AssertionError("Should not have prompted for not null addition"))
def test_add_blank_textfield_and_charfield(self, mocked_ask_method):
"""
#23405 - Adding a NOT NULL and blank `CharField` or `TextField`
without default should not prompt for a default.
"""
changes = self.get_changes([self.author_empty], [self.author_with_biography_blank])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition')
def test_add_non_blank_textfield_and_charfield(self, mocked_ask_method):
"""
#23405 - Adding a NOT NULL and non-blank `CharField` or `TextField`
without default should prompt for a default.
"""
changes = self.get_changes([self.author_empty], [self.author_with_biography_non_blank])
self.assertEqual(mocked_ask_method.call_count, 2)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0)
|
leppa/home-assistant | refs/heads/dev | homeassistant/components/fan/device_trigger.py | 8 | """Provides device automations for Fan."""
from typing import List
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType, state
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_PLATFORM,
CONF_TYPE,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from . import DOMAIN
TRIGGER_TYPES = {"turned_on", "turned_off"}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
}
)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device triggers for Fan devices."""
registry = await entity_registry.async_get_registry(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
# Add triggers for each entity that belongs to this integration
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turned_on",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turned_off",
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
config = TRIGGER_SCHEMA(config)
if config[CONF_TYPE] == "turned_on":
from_state = STATE_OFF
to_state = STATE_ON
else:
from_state = STATE_ON
to_state = STATE_OFF
state_config = {
state.CONF_PLATFORM: "state",
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state.CONF_FROM: from_state,
state.CONF_TO: to_state,
}
state_config = state.TRIGGER_SCHEMA(state_config)
return await state.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
|
HiroIshikawa/21playground | refs/heads/master | learning/venv/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/compat.py | 2942 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
|
pselle/calibre | refs/heads/master | src/calibre/utils/shared_file.py | 9 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import os, sys
from calibre.constants import iswindows, plugins
'''
This modeule defines a share_open() function which is a replacement for
python's builtin open() function.
This replacement, opens 'shareable' files on all platforms. That is files that
can be read from and written to and deleted at the same time by multiple
processes. All file handles are non-inheritable, as in Python 3, but unlike,
Python 2. Non-inheritance is atomic.
Caveats on windows: On windows sharing is co-operative, i.e. it only works if
all processes involved open the file with share_open(). Also while you can
delete a file that is open, you cannot open a new file with the same filename
until all open file handles are closed. You also cannot delete the containing
directory until all file handles are closed. To get around this, rename the
file before deleting it.
'''
speedup, err = plugins['speedup']
if not speedup:
raise RuntimeError('Failed to load the speedup plugin with error: %s' % err)
valid_modes = {'a', 'a+', 'a+b', 'ab', 'r', 'rb', 'r+', 'r+b', 'w', 'wb', 'w+', 'w+b'}
def validate_mode(mode):
return mode in valid_modes
class FlagConstants(object):
def __init__(self):
for x in 'APPEND CREAT TRUNC EXCL RDWR RDONLY WRONLY'.split():
x = 'O_' + x
setattr(self, x, getattr(os, x))
for x in 'RANDOM SEQUENTIAL TEXT BINARY'.split():
x = 'O_' + x
setattr(self, x, getattr(os, x, 0))
fc = FlagConstants()
def flags_from_mode(mode):
if not validate_mode(mode):
raise ValueError('The mode is invalid')
m = mode[0]
random = '+' in mode
binary = 'b' in mode
if m == 'a':
flags = fc.O_APPEND | fc.O_CREAT
if random:
flags |= fc.O_RDWR | fc.O_RANDOM
else:
flags |= fc.O_WRONLY | fc.O_SEQUENTIAL
elif m == 'r':
if random:
flags = fc.O_RDWR | fc.O_RANDOM
else:
flags = fc.O_RDONLY | fc.O_SEQUENTIAL
elif m == 'w':
if random:
flags = fc.O_RDWR | fc.O_RANDOM
else:
flags = fc.O_WRONLY | fc.O_SEQUENTIAL
flags |= fc.O_TRUNC | fc.O_CREAT
flags |= (fc.O_BINARY if binary else fc.O_TEXT)
return flags
if iswindows:
from numbers import Integral
import msvcrt
import win32file, pywintypes
CREATE_NEW = win32file.CREATE_NEW
CREATE_ALWAYS = win32file.CREATE_ALWAYS
OPEN_EXISTING = win32file.OPEN_EXISTING
OPEN_ALWAYS = win32file.OPEN_ALWAYS
TRUNCATE_EXISTING = win32file.TRUNCATE_EXISTING
FILE_SHARE_READ = win32file.FILE_SHARE_READ
FILE_SHARE_WRITE = win32file.FILE_SHARE_WRITE
FILE_SHARE_DELETE = win32file.FILE_SHARE_DELETE
FILE_SHARE_VALID_FLAGS = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE
FILE_ATTRIBUTE_READONLY = win32file.FILE_ATTRIBUTE_READONLY
FILE_ATTRIBUTE_NORMAL = win32file.FILE_ATTRIBUTE_NORMAL
FILE_ATTRIBUTE_TEMPORARY = win32file.FILE_ATTRIBUTE_TEMPORARY
FILE_FLAG_DELETE_ON_CLOSE = win32file.FILE_FLAG_DELETE_ON_CLOSE
FILE_FLAG_SEQUENTIAL_SCAN = win32file.FILE_FLAG_SEQUENTIAL_SCAN
FILE_FLAG_RANDOM_ACCESS = win32file.FILE_FLAG_RANDOM_ACCESS
GENERIC_READ = win32file.GENERIC_READ & 0xffffffff
GENERIC_WRITE = win32file.GENERIC_WRITE & 0xffffffff
DELETE = 0x00010000
_ACCESS_MASK = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
_ACCESS_MAP = {
os.O_RDONLY : GENERIC_READ,
os.O_WRONLY : GENERIC_WRITE,
os.O_RDWR : GENERIC_READ | GENERIC_WRITE
}
_CREATE_MASK = os.O_CREAT | os.O_EXCL | os.O_TRUNC
_CREATE_MAP = {
0 : OPEN_EXISTING,
os.O_EXCL : OPEN_EXISTING,
os.O_CREAT : OPEN_ALWAYS,
os.O_CREAT | os.O_EXCL : CREATE_NEW,
os.O_CREAT | os.O_TRUNC | os.O_EXCL : CREATE_NEW,
os.O_TRUNC : TRUNCATE_EXISTING,
os.O_TRUNC | os.O_EXCL : TRUNCATE_EXISTING,
os.O_CREAT | os.O_TRUNC : CREATE_ALWAYS
}
def raise_winerror(pywinerr):
raise WindowsError(pywinerr.winerror, (pywinerr.funcname or '') + b': ' + (pywinerr.strerror or '')), None, sys.exc_info()[2]
def os_open(path, flags, mode=0o777, share_flags=FILE_SHARE_VALID_FLAGS):
'''
Replacement for os.open() allowing moving or unlinking before closing
'''
if not isinstance(flags, Integral):
raise TypeError('flags must be an integer')
if not isinstance(mode, Integral):
raise TypeError('mode must be an integer')
if share_flags & ~FILE_SHARE_VALID_FLAGS:
raise ValueError('bad share_flags: %r' % share_flags)
access_flags = _ACCESS_MAP[flags & _ACCESS_MASK]
create_flags = _CREATE_MAP[flags & _CREATE_MASK]
attrib_flags = FILE_ATTRIBUTE_NORMAL
if flags & os.O_CREAT and mode & ~0o444 == 0:
attrib_flags = FILE_ATTRIBUTE_READONLY
if flags & os.O_TEMPORARY:
share_flags |= FILE_SHARE_DELETE
attrib_flags |= FILE_FLAG_DELETE_ON_CLOSE
access_flags |= DELETE
if flags & os.O_SHORT_LIVED:
attrib_flags |= FILE_ATTRIBUTE_TEMPORARY
if flags & os.O_SEQUENTIAL:
attrib_flags |= FILE_FLAG_SEQUENTIAL_SCAN
if flags & os.O_RANDOM:
attrib_flags |= FILE_FLAG_RANDOM_ACCESS
try:
h = win32file.CreateFileW(
path, access_flags, share_flags, None, create_flags, attrib_flags, None)
except pywintypes.error as e:
raise_winerror(e)
ans = msvcrt.open_osfhandle(h, flags | os.O_NOINHERIT)
h.Detach() # We dont want the handle to be automatically closed when h is deleted
return ans
def share_open(path, mode='r', buffering=-1):
flags = flags_from_mode(mode)
return speedup.fdopen(os_open(path, flags), path, mode, buffering)
else:
def share_open(path, mode='r', buffering=-1):
flags = flags_from_mode(mode) | speedup.O_CLOEXEC
return speedup.fdopen(os.open(path, flags), path, mode, buffering)
def raise_winerror(x):
raise NotImplementedError(), None, sys.exc_info()[2]
def test():
import repr as reprlib
def eq(x, y):
if x != y:
raise AssertionError('%s != %s' % (reprlib.repr(x), reprlib.repr(y)))
from calibre.ptempfile import TemporaryDirectory
with TemporaryDirectory() as tdir:
fname = os.path.join(tdir, 'test.txt')
with share_open(fname, 'wb') as f:
f.write(b'a' * 20 * 1024)
eq(fname, f.name)
f = share_open(fname, 'rb')
eq(f.read(1), b'a')
if iswindows:
os.rename(fname, fname+'.moved')
os.remove(fname+'.moved')
else:
os.remove(fname)
eq(f.read(1), b'a')
f2 = share_open(fname, 'w+b')
f2.write(b'b' * 10 * 1024)
f2.seek(0)
eq(f.read(10000), b'a'*10000)
eq(f2.read(100), b'b' * 100)
f3 = share_open(fname, 'rb')
eq(f3.read(100), b'b' * 100)
|
siosio/intellij-community | refs/heads/master | python/testData/psi/FStringTerminatedByQuoteInsideFStringLiteral.py | 15 | s = f'foo{f"'"}baz' |
ryklith/pyltesim | refs/heads/master | raps/raps.py | 1 | #!/usr/bin/env python
''' Resource Allocation using Power control and Sleep as described in my academic papers, mostly in the JSAC one.
This is performed within a base station or sector. For all associated mobiles, allocate powers and sleep modes over the OFDMA frame.
File: RAPS.py
'''
__author__ = "Hauke Holtkamp"
__credits__ = "Hauke Holtkamp"
__license__ = "unknown"
__version__ = "unknown"
__maintainer__ = "Hauke Holtkamp"
__email__ = "h.holtkamp@gmail.com"
__status__ = "Development"
from world import mobile
import numpy as np
from quantmap import quantmap
from rcg import rcg
from iwf import iwf
from utils import utils
from optim import optimMinPow
from scipy import linalg
import logging
logger = logging.getLogger('RAPS_script')
def raps(wrld, cell, mobiles, rate, plotting=False):
"""In the cell, allocate powers and sleep modes to mobiles."""
# need to map algorithm indices to mobile ids (artifact from MATLAB)
id_map = dict()
for k, mob in enumerate(mobiles):
id_map[k] = mob.id_
# Make sure all mobiles are associated with the cell correctly
for mob in mobiles:
if mob.cell != cell:
raise ValueError('Faulty passing of mobiles to RAPS')
logger.info( '{0:50} {1:5d}'.format('Mobiles in this cell:', len(mobiles)))
# Build SINR arrays
users = len(mobiles)
N = wrld.PHY.numFreqChunks
T = wrld.PHY.numTimeslots
noisePower = wrld.wconf.systemNoisePower
# EC_Optim contains one MIMO value per mobile (the center chunk effective channel)
EC_Optim = np.empty([users, cell.antennas, wrld.mobiles[0].antennas], dtype=complex) # TODO Will we ever have mobiles with different numbers of annteas?
# SINR_Quant contains one value for each RB and user
SINR_Quant = np.empty([N, T, users])
centerChunkIndex = np.floor(N/2)
whichTimeslotIndex = list(cell.sleep_slot_priority).index(0)
for idx, mob in enumerate(mobiles):
EC_Optim[idx,:,:] = mob.OFDMA_EC[:,:,centerChunkIndex,whichTimeslotIndex] / N # scale this effective channel over all chunks
SINR_Quant[:,:,idx] = np.mean(mob.OFDMA_effSINR,0) # SINR for RCG
### Step 1 ###
# Optimization call
pSupplyOptim, resourceAlloc, status = optimMinPow.optimizePCDTX(EC_Optim, np.ones(EC_Optim.shape[0]), rate, wrld.PHY.systemBandwidth, cell.pMax, mobiles[0].BS.p0, mobiles[0].BS.m, mobiles[0].BS.pS)
logger.debug( 'Resource Allocation: ' + str(resourceAlloc))
logger.debug( 'Sleep priority: ' + str(cell.sleep_slot_priority))
logger.info( '{0:50} {1:5.2f} W'.format('Real-valued optimization objective:', pSupplyOptim) )
## Plot ##
if cell.cellid == 12345 and plotting: # center in tier 1
from plotting import channelplotter
channelplotter.bar(np.mean(np.mean(EC_Optim,1),1),'Abs mean of MIMO EC Optim', 'sinr_optim.pdf')
channelplotter.OFDMAchannel(SINR_Quant, 'SINR Quant', 'sinr_quant.pdf')
channelplotter.bar(resourceAlloc,'Resource Share Optim', 'rscshare.pdf')
import pdb; pdb.set_trace()
### Step 2 ###
# Map real valued solution to OFDMA frame
# QUANTMAP
resourcesPerTimeslot = quantmap.quantmap(resourceAlloc, N, T)
outmap = np.empty([N, T])
# Handle sleep slot alignment here.
resourcesPerTimeslot = resourcesPerTimeslot[cell.sleep_slot_priority]
# RCG
for t in np.arange(T):
outmap[:,t],_ = rcg.rcg(SINR_Quant[:,t,:],resourcesPerTimeslot[t,:]) # outmap.shape = (N,T) tells the user index
# Given allocation and rate target, we inverse waterfill channels for each user separately on the basis of full SINR
# IWF
powerlvls = np.empty([N, T, mob.antennas])
powerlvls[:] = np.nan
for idx, obj in enumerate(mobiles):
# grab user SINR
EC_usr = obj.OFDMA_EC[:,:,outmap==idx] # all effective channels assigned to this user
noiseIfPower_usr = np.real(EC_usr[0,0,:].repeat(2) * 0 + 1) # TODO remove later #(obj.baseStations[obj.BS].cells[obj.cell].OFDMA_interferencePower + obj.baseStations[obj.BS].cells[obj.cell].OFDMA_noisePower) * np.ones(SINR_user_all[0,0,:,:].shape)[outmap==idx].ravel().repeat(2) # one IF value per resource, so repeat once to match spatial channels
# create list of eigVals
eigVals = np.real([linalg.eig(EC_usr[:,:,i])[0] for i in np.arange(EC_usr.shape[2])]).ravel() # two eigvals (spatial channels) per resource
targetLoad = rate * wrld.PHY.simulationTime
# inverse waterfill and fill back to OFDMA position
powlvl, waterlvl, cap = iwf.inversewaterfill(eigVals, targetLoad, noiseIfPower_usr, wrld.PHY.systemBandwidth / N, wrld.PHY.simulationTime / T)
powerlvls[outmap==idx,:] = powlvl.reshape(EC_usr.shape[2],obj.antennas)
ptx = np.array([np.nansum(np.nansum(powerlvls[:,t,:],axis=0),axis=0) for t in np.arange(T)])
logging.debug('Ptx' + str(ptx))
if (ptx > cell.pMax).any():
raise ValueError('Transmission power too high in IWF: '+str(ptx)+ ' W')
# Store power levels in cell for next round
cell.OFDMA_power[:] = np.swapaxes(np.swapaxes(powerlvls[:],1,2),0,1)
cell.OFDMA_power[np.isnan(cell.OFDMA_power)] = 0
# remap to mobile ids
outmap_ids = np.copy(outmap)
for k,v in id_map.iteritems():
outmap_ids[outmap==k] = v
cell.outmap = outmap_ids
psupplyPerSlot = mobiles[0].BS.p0 + mobiles[0].BS.m * ptx
psupplyPerSlot[np.isnan(psupplyPerSlot)] = mobiles[0].BS.pS
pSupplyQuant = np.mean(psupplyPerSlot)
logger.info( '{0:50} {1:5.2f} W'.format('Integer-valued optimization objective:', pSupplyQuant))
return pSupplyOptim, pSupplyQuant
def capacity_achieved_per_mobile(target, wrld, cell, mobiles):
'''Returns list in length of number of mobiles in cell indicating capacity achieved (True) or not (False) for each mobile.'''
li = []
for idx, mob in enumerate(mobiles):
cap = achieved_capacity_on_mobile(wrld, cell, mobiles, idx)
if cap < target:
li.append(False)
else:
li.append(True)
return li
def achieved_capacity_in_cell(wrld, cell, mobiles):
"""Returns cell capacity (all users) for this frame."""
cap = 0
for idx, mob in enumerate(mobiles):
cap += achieved_capacity_on_mobile(wrld, cell, mobiles, idx)
return cap
def achieved_capacity_on_mobile(wrld, cell, mobiles, user):
"""Calculate achieved link bit load of one user for this frame."""
import pdb; pdb.set_trace()
mobile_id = mobiles[user].id_
n, t = np.where(cell.outmap==mobile_id)
N = wrld.PHY.numFreqChunks
T = wrld.PHY.numTimeslots
systemBandwidth = wrld.PHY.systemBandwidth
totalTime = wrld.PHY.simulationTime
resourceTime = totalTime / T
resourceBandwidth = systemBandwidth / N
cap = 0
for i in np.arange(len(n)):
power = cell.OFDMA_power[:]
cap += RB_bit_capacity(mobiles[user], n[i], t[i], resourceBandwidth,
resourceTime, power)
return cap
|
WSDC-NITWarangal/django | refs/heads/master | tests/test_client/auth_backends.py | 315 | from django.contrib.auth.backends import ModelBackend
class TestClientBackend(ModelBackend):
pass
|
ClusterHQ/dvol | refs/heads/master | dvol_python/texttable.py | 1 | #!/usr/bin/env python
#
# texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2011 Gerome Fournier <jef(at)foutaise.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""module for creating simple ASCII tables
Example:
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([ ["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"] ])
print table.draw() + "\\n"
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
__all__ = ["Texttable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'GPL'
__version__ = '0.8.1'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
"""
import math
import sys
import string
from functools import reduce
try:
if sys.version >= '2.3':
import textwrap
elif sys.version >= '2.2':
from optparse import textwrap
else:
from optik import textwrap
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters
"""
if not isinstance(iterable, str):
return iterable.__len__()
try:
return len(str(iterable, 'utf'))
except:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class bcolors:
PURPLE = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
WHITE = ''
def bcolors_public_props():
return (name for name in dir(bcolors) if not name.startswith('_'))
def get_color_string(type, string):
end = bcolors.ENDC
if type == bcolors.WHITE:
end = ''
return '%s%s%s' % (type, string, end)
class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
if max_width <= 0:
max_width = False
self._max_width = max_width
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either "a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(str, array))
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i,x in enumerate(array):
cells.append(self._str(i,x))
self._rows.append(cells)
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(next(rows))
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
if type(x) is str:
return x
else:
if x is None:
return str(x)
else:
return str(x.encode('utf-8'))
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
for attr in bcolors_public_props():
cell = cell.replace(getattr(bcolors, attr), '').replace(bcolors.ENDC,'')
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
for attr in bcolors_public_props():
part = part.replace(getattr(bcolors, attr), '')
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
items = len(maxi)
length = reduce(lambda x,y: x+y, maxi)
if self._max_width and length + items * 3 + 1 > self._max_width:
max_lengths = maxi
maxi = [(self._max_width - items * 3 -1) // items \
for n in range(items)]
# free space to distribute
free = 0
# how many columns are oversized
oversized = 0
# reduce size of columns that need less space and calculate how
# much space is freed
for col, max_len in enumerate(max_lengths):
current_length = maxi[col]
# column needs less space, adjust and
# update free space
if current_length > max_len:
free += current_length - max_len
maxi[col] = max_len
# column needs more space, count it
elif max_len > current_length:
oversized += 1
# as long as free space is available, distribute it
while free > 0:
# available free space for each oversized column
free_part = int(math.ceil(float(free) / float(oversized)))
for col, max_len in enumerate(max_lengths):
current_length = maxi[col]
# column needs more space
if current_length < max_len:
# how much space is needed
needed = max_len - current_length
# enough free space for column
if needed <= free_part:
maxi[col] = max_len
free -= needed
oversized -= 1
# still oversized after re-sizing
else:
maxi[col] = maxi[col] + free_part
free -= free_part
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
lost_color = bcolors.WHITE
original_cell = cell_line
for attr in bcolors_public_props():
cell_line = cell_line.replace(
getattr(bcolors, attr), '').replace(bcolors.ENDC,''
)
if cell_line.replace(bcolors.ENDC,'') != original_cell.replace(
bcolors.ENDC,'') and attr != 'ENDC':
if not lost_color:
lost_color = attr
fill = width - len(cell_line)
try:
cell_line = get_color_string(
getattr(bcolors, lost_color),cell_line
)
except AttributeError:
pass
if isheader:
align = "c"
if align == "r":
out += "%s " % (fill * space + cell_line)
elif align == "c":
out += "%s " % (fill//2 * space + cell_line \
+ (fill//2 + fill%2) * space)
else:
out += "%s " % (cell_line + fill * space)
if length < len(line):
out += "%s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
original_cell = cell
lost_color = bcolors.WHITE
for attr in bcolors_public_props():
cell = cell.replace(
getattr(bcolors, attr), '').replace(bcolors.ENDC,'')
if cell.replace(bcolors.ENDC,'') != original_cell.replace(
bcolors.ENDC,'') and attr != 'ENDC':
if not lost_color:
lost_color = attr
for c in cell.split('\n'):
if type(c) is not str:
try:
c = str(c, 'utf')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (c, strerror))
c = str(c, 'utf', 'replace')
try:
array.extend(
[get_color_string(
getattr(bcolors, lost_color),x
) for x in textwrap.wrap(c, width)
]
)
except AttributeError:
array.extend(textwrap.wrap(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * (missing // 2)
cell.extend([""] * (missing // 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([ [get_color_string(bcolors.GREEN, "Name Of Person"), "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
[get_color_string(bcolors.BLUE,"Mr\nBaptiste\nClement"), 1, get_color_string(bcolors.RED,"Baby")] ])
print(table.draw() + "\n")
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([['text', "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print(table.draw())
|
munnerz/CouchPotatoServer | refs/heads/master | libs/pyasn1/compat/binary.py | 172 | from sys import version_info
if version_info[0:2] < (2, 6):
def bin(x):
if x <= 1:
return '0b'+str(x)
else:
return bin(x>>1) + str(x&1)
else:
bin = bin
|
MadeiraCloud/salt | refs/heads/master | sources/salt/runners/thin.py | 1 | # -*- coding: utf-8 -*-
'''
The thin runner is used to manage the salt thin systems.
Salt Thin is a transport-less version of Salt that can be used to run rouitines
in a standalone way. This runner has tools which generate the standalone salt
system for easy consumption.
'''
# Import Salt libs
import salt.utils.thin
def generate(extra_mods='', overwrite=False):
'''
Generate the salt-thin tarball and print the location of the tarball
Optional additional mods to include (e.g. mako) can be supplied as a comma
delimited string. Permits forcing an overwrite of the output file as well.
CLI Example:
.. code-block:: bash
salt-run thin.generate
salt-run thin.generate mako
salt-run thin.generate mako,wempy 1
salt-run thin.generate overwrite=1
'''
print(salt.utils.thin.gen_thin(__opts__['cachedir'], extra_mods, overwrite))
|
alexsavio/scikit-learn | refs/heads/master | examples/svm/plot_svm_regression.py | 120 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
jwalgran/otm-core | refs/heads/develop | opentreemap/treemap/templatetags/comment_sequence.py | 3 | from django.template import Library
register = Library()
@register.filter
def in_thread_order(comments):
'''Convert a list of comments in chronological order into a list of
comments in tree order, where the children of a comment appear
directly after the comment in the list.'''
roots = []
children_of = {}
for c in comments:
if c.parent:
if c.parent.id not in children_of:
children_of[c.parent.id] = []
children_of[c.parent.id].append(c)
else:
roots.append(c)
def order_children_for(parent):
if parent.id in children_of:
children = []
for child in children_of[parent.id]:
children.append(child)
children.extend(order_children_for(child))
return children
else:
return []
sequenced = []
for root in roots:
sequenced.append(root)
sequenced.extend(order_children_for(root))
return sequenced
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.